15 #ifdef __SYCL_DEVICE_ONLY__
24 #include <type_traits>
27 inline namespace _V1 {
35 (std::is_same_v<T, int> || std::is_same_v<T, unsigned int> ||
36 std::is_same_v<T, long> || std::is_same_v<T, unsigned long> ||
37 std::is_same_v<T, long long> || std::is_same_v<T, unsigned long long> ||
38 std::is_same_v<T, float> || std::is_same_v<T, double> ||
39 std::is_pointer_v<T>);
51 template <memory_order Order>
89 template <
typename T,
typename =
void>
struct bit_equal;
92 struct bit_equal<T, typename
std::enable_if_t<std::is_integral_v<T>>> {
93 bool operator()(
const T &lhs,
const T &rhs) {
return lhs == rhs; }
98 auto LhsInt = sycl::bit_cast<uint32_t>(lhs);
99 auto RhsInt = sycl::bit_cast<uint32_t>(rhs);
100 return LhsInt == RhsInt;
106 auto LhsInt = sycl::bit_cast<uint64_t>(lhs);
107 auto RhsInt = sycl::bit_cast<uint64_t>(rhs);
108 return LhsInt == RhsInt;
118 "Invalid atomic type. Valid types are int, unsigned int, long, "
119 "unsigned long, long long, unsigned long long, float, double "
120 "and pointer types");
122 "Invalid atomic address_space. Valid address spaces are: "
123 "global_space, local_space, ext_intel_global_device_space, "
127 "Invalid default memory_order for atomics. Valid defaults are: "
128 "relaxed, acq_rel, seq_cst");
131 static_assert(DefaultOrder != sycl::memory_order::seq_cst,
132 "seq_cst memory order is not supported on AMDGPU");
152 #ifdef __SYCL_DEVICE_ONLY__
154 :
ptr(address_space_cast<AddressSpace, access::
decorated::no>(&ref)) {}
158 :
ptr(reinterpret_cast<
std::atomic<T> *>(&ref)) {}
167 #ifdef __SYCL_DEVICE_ONLY__
168 detail::spirv::AtomicStore(
ptr, scope, order, operand);
182 #ifdef __SYCL_DEVICE_ONLY__
183 return detail::spirv::AtomicLoad(
ptr, scope, order);
194 #ifdef __SYCL_DEVICE_ONLY__
195 return detail::spirv::AtomicExchange(
ptr, scope, order, operand);
206 #ifdef __SYCL_DEVICE_ONLY__
207 T value = detail::spirv::AtomicCompareExchange(
ptr, scope,
success, failure,
216 return ptr->compare_exchange_strong(expected, desired,
236 #ifdef __SYCL_DEVICE_ONLY__
240 return ptr->compare_exchange_weak(expected, desired,
254 #ifdef __SYCL_DEVICE_ONLY__
262 template <
typename T,
bool IsAspectAtomic64AttrUsed,
memory_order DefaultOrder,
266 :
public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
273 template <
typename T,
bool IsAspectAtomic64AttrUsed,
memory_order DefaultOrder,
277 typename
std::enable_if_t<std::is_integral_v<T>>>
278 :
public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
302 #ifdef __SYCL_DEVICE_ONLY__
303 return detail::spirv::AtomicIAdd(
ptr, scope, order, operand);
311 return fetch_add(operand) + operand;
321 return fetch_add(1) + 1;
326 #ifdef __SYCL_DEVICE_ONLY__
327 return detail::spirv::AtomicISub(
ptr, scope, order, operand);
335 return fetch_sub(operand) - operand;
345 return fetch_sub(1) - 1;
350 #ifdef __SYCL_DEVICE_ONLY__
351 return detail::spirv::AtomicAnd(
ptr, scope, order, operand);
359 return fetch_and(operand) & operand;
364 #ifdef __SYCL_DEVICE_ONLY__
365 return detail::spirv::AtomicOr(
ptr, scope, order, operand);
376 #ifdef __SYCL_DEVICE_ONLY__
377 return detail::spirv::AtomicXor(
ptr, scope, order, operand);
385 return fetch_xor(operand) ^ operand;
390 #ifdef __SYCL_DEVICE_ONLY__
391 return detail::spirv::AtomicMin(
ptr, scope, order, operand);
394 T old =
load(load_order, scope);
395 while (operand < old &&
404 #ifdef __SYCL_DEVICE_ONLY__
405 return detail::spirv::AtomicMax(
ptr, scope, order, operand);
408 T old =
load(load_order, scope);
409 while (operand > old &&
421 template <
typename T,
bool IsAspectAtomic64AttrUsed,
memory_order DefaultOrder,
425 typename
std::enable_if_t<std::is_floating_point_v<T>>>
426 :
public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
452 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
453 return detail::spirv::AtomicFAdd(
ptr, scope, order, operand);
460 load(load_order, scope);
461 desired = expected + operand;
468 return fetch_add(operand) + operand;
475 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
476 return detail::spirv::AtomicFAdd(
ptr, scope, order, -operand);
479 T expected =
load(load_order, scope);
482 desired = expected - operand;
489 return fetch_sub(operand) - operand;
496 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
497 return detail::spirv::AtomicMin(
ptr, scope, order, operand);
500 T old =
load(load_order, scope);
501 while (operand < old &&
512 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
513 return detail::spirv::AtomicMax(
ptr, scope, order, operand);
516 T old =
load(load_order, scope);
517 while (operand > old &&
532 #ifndef __SYCL_DEVICE_ONLY__
535 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]]
atomic_ref_impl<
537 T, true, DefaultOrder, DefaultScope,
538 AddressSpace, typename std::enable_if_t<std::is_integral_v<T>>>
540 DefaultOrder, DefaultScope, AddressSpace> {
545 DefaultScope, AddressSpace>::atomic_ref_impl::operator=;
552 #ifndef __SYCL_DEVICE_ONLY__
555 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]]
atomic_ref_impl<
557 T, true, DefaultOrder, DefaultScope,
558 AddressSpace, typename std::enable_if_t<std::is_floating_point_v<T>>>
560 DefaultOrder, DefaultScope, AddressSpace> {
565 DefaultScope, AddressSpace>::atomic_ref_impl::operator=;
571 template <
typename T,
bool IsAspectAtomic64AttrUsed,
memory_order DefaultOrder,
573 #ifndef __SYCL_DEVICE_ONLY__
576 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]]
atomic_ref_impl<
578 T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace>
599 using base_type::is_lock_free;
602 :
base_type(reinterpret_cast<uintptr_t &>(ref)) {}
606 base_type::store(
reinterpret_cast<uintptr_t
>(operand), order, scope);
616 return reinterpret_cast<T *
>(base_type::load(order, scope));
623 return reinterpret_cast<T *
>(base_type::exchange(
624 reinterpret_cast<uintptr_t
>(operand), order, scope));
635 expected =
load(load_order, scope);
636 desired = expected + operand;
642 return fetch_add(operand) + operand;
656 T *expected =
load(load_order, scope);
659 desired = expected - operand;
665 return fetch_sub(operand) - operand;
678 return base_type::compare_exchange_strong(
679 reinterpret_cast<uintptr_t &
>(expected),
680 reinterpret_cast<uintptr_t
>(desired),
success, failure, scope);
694 return base_type::compare_exchange_weak(
695 reinterpret_cast<uintptr_t &
>(expected),
696 reinterpret_cast<uintptr_t
>(desired),
success, failure, scope);
707 using base_type::ptr;
720 DefaultScope, AddressSpace> {
723 AddressSpace>::atomic_ref_impl;
725 AddressSpace>::
operator=;
=8, DefaultOrder, DefaultScope, AddressSpace >::atomic_ref_impl T
=8, DefaultOrder, DefaultScope, AddressSpace >::operator= T
static constexpr memory_order default_write_order
static constexpr size_t required_alignment
bool compare_exchange_strong(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
static constexpr memory_scope default_scope
atomic_ref_base(const atomic_ref_base &ref) noexcept
bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
static constexpr bool is_always_lock_free
bool is_lock_free() const noexcept
T load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
bool compare_exchange_weak(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
atomic_ref_base & operator=(const atomic_ref_base &)=delete
T operator=(T desired) const noexcept
void store(T operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
static constexpr memory_order default_read_modify_write_order
static constexpr memory_order default_read_order
bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
T exchange(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator|=(T operand) const noexcept
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator+=(T operand) const noexcept
T operator++() const noexcept
T fetch_or(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator&=(T operand) const noexcept
T operator--() const noexcept
T operator-=(T operand) const noexcept
value_type difference_type
T fetch_xor(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T fetch_and(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator^=(T operand) const noexcept
T operator--(int) const noexcept
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator++(int) const noexcept
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
value_type difference_type
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator+=(T operand) const noexcept
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T operator-=(T operand) const noexcept
bool compare_exchange_weak(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T * exchange(T *operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
bool compare_exchange_weak(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
T * operator++() const noexcept
bool compare_exchange_strong(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T * operator++(int) const noexcept
T * operator=(T *desired) const noexcept
T * operator-=(difference_type operand) const noexcept
T * fetch_sub(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
T * load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
bool compare_exchange_strong(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
ptrdiff_t difference_type
void store(T *operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
T * operator--() const noexcept
T * operator--(int) const noexcept
T * operator+=(difference_type operand) const noexcept
T * fetch_add(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
@ ext_intel_global_device_space
sycl::memory_order memory_order
constexpr memory_order getLoadOrder(memory_order order)
std::bool_constant< Order==memory_order::relaxed||Order==memory_order::acq_rel||Order==memory_order::seq_cst > IsValidDefaultOrder
std::memory_order getStdMemoryOrder(__spv::MemorySemanticsMask::Flag)
std::ptrdiff_t difference_type
_Abi const simd< _Tp, _Abi > & noexcept
static constexpr bool value
static constexpr bool value
bool operator()(const T &lhs, const T &rhs)
bool operator()(const double &lhs, const double &rhs)
bool operator()(const float &lhs, const float &rhs)