DPC++ Runtime
Runtime libraries for oneAPI DPC++
atomic_ref.hpp
Go to the documentation of this file.
1 //==----- atomic_ref.hpp - SYCL 2020 atomic_ref ----------------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
11 #include <sycl/access/access.hpp>
12 #include <sycl/aspects.hpp>
13 #include <sycl/atomic.hpp>
14 #include <sycl/detail/defines.hpp>
15 #include <sycl/memory_enums.hpp>
16 #ifdef __SYCL_DEVICE_ONLY__
17 #include <sycl/detail/spirv.hpp>
18 #include <sycl/multi_ptr.hpp>
19 #endif
20 
21 #ifndef __SYCL_DEVICE_ONLY__
22 #include <atomic>
23 #endif
24 #include <type_traits>
25 
26 namespace sycl {
28 namespace detail {
29 
32 
33 template <typename T> struct IsValidAtomicRefType {
34  static constexpr bool value =
35  (std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
36  std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
37  std::is_same<T, long long>::value ||
38  std::is_same<T, unsigned long long>::value ||
39  std::is_same<T, float>::value || std::is_same<T, double>::value ||
40  std::is_pointer<T>::value);
41 };
42 
43 template <sycl::access::address_space AS> struct IsValidAtomicRefAddressSpace {
44  static constexpr bool value =
45  (AS == access::address_space::global_space ||
46  AS == access::address_space::local_space ||
47  AS == access::address_space::ext_intel_global_device_space ||
48  AS == access::address_space::generic_space);
49 };
50 
51 // DefaultOrder parameter is limited to read-modify-write orders
52 template <memory_order Order>
53 using IsValidDefaultOrder = bool_constant<Order == memory_order::relaxed ||
54  Order == memory_order::acq_rel ||
55  Order == memory_order::seq_cst>;
56 
57 template <memory_order ReadModifyWriteOrder> struct memory_order_traits;
58 
60  static constexpr memory_order read_order = memory_order::relaxed;
61  static constexpr memory_order write_order = memory_order::relaxed;
62 };
63 
65  static constexpr memory_order read_order = memory_order::acquire;
66  static constexpr memory_order write_order = memory_order::release;
67 };
68 
70  static constexpr memory_order read_order = memory_order::seq_cst;
71  static constexpr memory_order write_order = memory_order::seq_cst;
72 };
73 
74 inline constexpr memory_order getLoadOrder(memory_order order) {
75  switch (order) {
76  case memory_order::relaxed:
77  return memory_order::relaxed;
78 
79  case memory_order::acquire:
80  case memory_order::__consume_unsupported:
81  case memory_order::acq_rel:
82  case memory_order::release:
83  return memory_order::acquire;
84 
85  case memory_order::seq_cst:
86  return memory_order::seq_cst;
87  }
88 }
89 
90 template <typename T, typename = void> struct bit_equal;
91 
92 template <typename T>
93 struct bit_equal<T, typename detail::enable_if_t<std::is_integral<T>::value>> {
94  bool operator()(const T &lhs, const T &rhs) { return lhs == rhs; }
95 };
96 
97 template <> struct bit_equal<float> {
98  bool operator()(const float &lhs, const float &rhs) {
99  auto LhsInt = sycl::bit_cast<uint32_t>(lhs);
100  auto RhsInt = sycl::bit_cast<uint32_t>(rhs);
101  return LhsInt == RhsInt;
102  }
103 };
104 
105 template <> struct bit_equal<double> {
106  bool operator()(const double &lhs, const double &rhs) {
107  auto LhsInt = sycl::bit_cast<uint64_t>(lhs);
108  auto RhsInt = sycl::bit_cast<uint64_t>(rhs);
109  return LhsInt == RhsInt;
110  }
111 };
112 
113 // Functionality for any atomic of type T, reused by partial specializations
114 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
115  access::address_space AddressSpace>
117  static_assert(
119  "Invalid atomic type. Valid types are int, unsigned int, long, "
120  "unsigned long, long long, unsigned long long, float, double "
121  "and pointer types");
123  "Invalid atomic address_space. Valid address spaces are: "
124  "global_space, local_space, ext_intel_global_device_space, "
125  "generic_space");
126  static_assert(
128  "Invalid default memory_order for atomics. Valid defaults are: "
129  "relaxed, acq_rel, seq_cst");
130 #ifdef __AMDGPU__
131  // FIXME should this query device's memory capabilities at runtime?
132  static_assert(DefaultOrder != sycl::memory_order::seq_cst,
133  "seq_cst memory order is not supported on AMDGPU");
134 #endif
135 
136 
137 public:
138  using value_type = T;
139  static constexpr size_t required_alignment = sizeof(T);
140  static constexpr bool is_always_lock_free =
142  static constexpr memory_order default_read_order =
144  static constexpr memory_order default_write_order =
146  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
147  static constexpr memory_scope default_scope = DefaultScope;
148 
149  bool is_lock_free() const noexcept {
151  }
152 
153 #ifdef __SYCL_DEVICE_ONLY__
154  explicit atomic_ref_base(T &ref)
155  : ptr(address_space_cast<AddressSpace, access::decorated::no>(&ref)) {}
156 #else
157  // FIXME: This reinterpret_cast is UB, but happens to work for now
158  explicit atomic_ref_base(T &ref)
159  : ptr(reinterpret_cast<std::atomic<T> *>(&ref)) {}
160 #endif
161  // Our implementation of copy constructor could be trivial
162  // Defined this way for consistency with standard atomic_ref
163  atomic_ref_base(const atomic_ref_base &ref) noexcept { ptr = ref.ptr; };
164  atomic_ref_base &operator=(const atomic_ref_base &) = delete;
165 
166  void store(T operand, memory_order order = default_write_order,
167  memory_scope scope = default_scope) const noexcept {
168 #ifdef __SYCL_DEVICE_ONLY__
169  detail::spirv::AtomicStore(ptr, scope, order, operand);
170 #else
171  (void)scope;
172  ptr->store(operand, detail::getStdMemoryOrder(order));
173 #endif
174  }
175 
176  T operator=(T desired) const noexcept {
177  store(desired);
178  return desired;
179  }
180 
181  T load(memory_order order = default_read_order,
182  memory_scope scope = default_scope) const noexcept {
183 #ifdef __SYCL_DEVICE_ONLY__
184  return detail::spirv::AtomicLoad(ptr, scope, order);
185 #else
186  (void)scope;
187  return ptr->load(detail::getStdMemoryOrder(order));
188 #endif
189  }
190 
191  operator T() const noexcept { return load(); }
192 
193  T exchange(T operand, memory_order order = default_read_modify_write_order,
194  memory_scope scope = default_scope) const noexcept {
195 #ifdef __SYCL_DEVICE_ONLY__
196  return detail::spirv::AtomicExchange(ptr, scope, order, operand);
197 #else
198  (void)scope;
199  return ptr->exchange(operand, detail::getStdMemoryOrder(order));
200 #endif
201  }
202 
203  bool
204  compare_exchange_strong(T &expected, T desired, memory_order success,
205  memory_order failure,
206  memory_scope scope = default_scope) const noexcept {
207 #ifdef __SYCL_DEVICE_ONLY__
208  T value = detail::spirv::AtomicCompareExchange(ptr, scope, success, failure,
209  desired, expected);
210  bool succeeded = detail::bit_equal<T>()(value, expected);
211  if (!succeeded) {
212  expected = value;
213  }
214  return succeeded;
215 #else
216  (void)scope;
217  return ptr->compare_exchange_strong(expected, desired,
219  detail::getStdMemoryOrder(failure));
220 #endif
221  }
222 
223  bool
224  compare_exchange_strong(T &expected, T desired,
225  memory_order order = default_read_modify_write_order,
226  memory_scope scope = default_scope) const noexcept {
227  return compare_exchange_strong(expected, desired, order, order, scope);
228  }
229 
230  bool
231  compare_exchange_weak(T &expected, T desired, memory_order success,
232  memory_order failure,
233  memory_scope scope = default_scope) const noexcept {
234  // SPIR-V AtomicCompareExchangeWeak is deprecated and equivalent to
235  // AtomicCompareExchange. For now, use AtomicCompareExchange on device and
236  // compare_exchange_weak on host
237 #ifdef __SYCL_DEVICE_ONLY__
238  return compare_exchange_strong(expected, desired, success, failure, scope);
239 #else
240  (void)scope;
241  return ptr->compare_exchange_weak(expected, desired,
243  detail::getStdMemoryOrder(failure));
244 #endif
245  }
246 
247  bool
248  compare_exchange_weak(T &expected, T desired,
249  memory_order order = default_read_modify_write_order,
250  memory_scope scope = default_scope) const noexcept {
251  return compare_exchange_weak(expected, desired, order, order, scope);
252  }
253 
254 protected:
255 #ifdef __SYCL_DEVICE_ONLY__
257 #else
258  std::atomic<T> *ptr;
259 #endif
260 };
261 
262 // Hook allowing partial specializations to inherit atomic_ref_base
263 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
264  memory_scope DefaultScope, access::address_space AddressSpace,
265  typename = void>
267  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
268 public:
269  using atomic_ref_base<T, DefaultOrder, DefaultScope,
270  AddressSpace>::atomic_ref_base;
271 };
272 
273 // Partial specialization for integral types
274 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
275  memory_scope DefaultScope, access::address_space AddressSpace>
276 class atomic_ref_impl<T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope,
277  AddressSpace,
278  typename detail::enable_if_t<std::is_integral<T>::value>>
279  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
280 
281 public:
282  using value_type = T;
284  static constexpr size_t required_alignment = sizeof(T);
285  static constexpr bool is_always_lock_free =
287  static constexpr memory_order default_read_order =
289  static constexpr memory_order default_write_order =
291  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
292  static constexpr memory_scope default_scope = DefaultScope;
293 
294  using atomic_ref_base<T, DefaultOrder, DefaultScope,
295  AddressSpace>::atomic_ref_base;
297  using atomic_ref_base<T, DefaultOrder, DefaultScope,
298  AddressSpace>::compare_exchange_weak;
300 
301  T fetch_add(T operand, memory_order order = default_read_modify_write_order,
302  memory_scope scope = default_scope) const noexcept {
303 #ifdef __SYCL_DEVICE_ONLY__
304  return detail::spirv::AtomicIAdd(ptr, scope, order, operand);
305 #else
306  (void)scope;
307  return ptr->fetch_add(operand, detail::getStdMemoryOrder(order));
308 #endif
309  }
310 
311  T operator+=(T operand) const noexcept {
312  return fetch_add(operand) + operand;
313  }
314 
315  T operator++(int) const noexcept {
316  // TODO: use AtomicIIncrement as an optimization
317  return fetch_add(1);
318  }
319 
320  T operator++() const noexcept {
321  // TODO: use AtomicIIncrement as an optimization
322  return fetch_add(1) + 1;
323  }
324 
325  T fetch_sub(T operand, memory_order order = default_read_modify_write_order,
326  memory_scope scope = default_scope) const noexcept {
327 #ifdef __SYCL_DEVICE_ONLY__
328  return detail::spirv::AtomicISub(ptr, scope, order, operand);
329 #else
330  (void)scope;
331  return ptr->fetch_sub(operand, detail::getStdMemoryOrder(order));
332 #endif
333  }
334 
335  T operator-=(T operand) const noexcept {
336  return fetch_sub(operand) - operand;
337  }
338 
339  T operator--(int) const noexcept {
340  // TODO: use AtomicIDecrement as an optimization
341  return fetch_sub(1);
342  }
343 
344  T operator--() const noexcept {
345  // TODO: use AtomicIDecrement as an optimization
346  return fetch_sub(1) - 1;
347  }
348 
349  T fetch_and(T operand, memory_order order = default_read_modify_write_order,
350  memory_scope scope = default_scope) const noexcept {
351 #ifdef __SYCL_DEVICE_ONLY__
352  return detail::spirv::AtomicAnd(ptr, scope, order, operand);
353 #else
354  (void)scope;
355  return ptr->fetch_and(operand, detail::getStdMemoryOrder(order));
356 #endif
357  }
358 
359  T operator&=(T operand) const noexcept {
360  return fetch_and(operand) & operand;
361  }
362 
363  T fetch_or(T operand, memory_order order = default_read_modify_write_order,
364  memory_scope scope = default_scope) const noexcept {
365 #ifdef __SYCL_DEVICE_ONLY__
366  return detail::spirv::AtomicOr(ptr, scope, order, operand);
367 #else
368  (void)scope;
369  return ptr->fetch_or(operand, detail::getStdMemoryOrder(order));
370 #endif
371  }
372 
373  T operator|=(T operand) const noexcept { return fetch_or(operand) | operand; }
374 
375  T fetch_xor(T operand, memory_order order = default_read_modify_write_order,
376  memory_scope scope = default_scope) const noexcept {
377 #ifdef __SYCL_DEVICE_ONLY__
378  return detail::spirv::AtomicXor(ptr, scope, order, operand);
379 #else
380  (void)scope;
381  return ptr->fetch_xor(operand, detail::getStdMemoryOrder(order));
382 #endif
383  }
384 
385  T operator^=(T operand) const noexcept {
386  return fetch_xor(operand) ^ operand;
387  }
388 
389  T fetch_min(T operand, memory_order order = default_read_modify_write_order,
390  memory_scope scope = default_scope) const noexcept {
391 #ifdef __SYCL_DEVICE_ONLY__
392  return detail::spirv::AtomicMin(ptr, scope, order, operand);
393 #else
394  auto load_order = detail::getLoadOrder(order);
395  T old = load(load_order, scope);
396  while (operand < old &&
397  !compare_exchange_weak(old, operand, order, scope)) {
398  }
399  return old;
400 #endif
401  }
402 
403  T fetch_max(T operand, memory_order order = default_read_modify_write_order,
404  memory_scope scope = default_scope) const noexcept {
405 #ifdef __SYCL_DEVICE_ONLY__
406  return detail::spirv::AtomicMax(ptr, scope, order, operand);
407 #else
408  auto load_order = detail::getLoadOrder(order);
409  T old = load(load_order, scope);
410  while (operand > old &&
411  !compare_exchange_weak(old, operand, order, scope)) {
412  }
413  return old;
414 #endif
415  }
416 
417 private:
419 };
420 
421 // Partial specialization for floating-point types
422 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
423  memory_scope DefaultScope, access::address_space AddressSpace>
425  T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace,
426  typename detail::enable_if_t<std::is_floating_point<T>::value>>
427  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
428 
429 public:
430  using value_type = T;
432  static constexpr size_t required_alignment = sizeof(T);
433  static constexpr bool is_always_lock_free =
435  static constexpr memory_order default_read_order =
437  static constexpr memory_order default_write_order =
439  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
440  static constexpr memory_scope default_scope = DefaultScope;
441 
442  using atomic_ref_base<T, DefaultOrder, DefaultScope,
443  AddressSpace>::atomic_ref_base;
445  using atomic_ref_base<T, DefaultOrder, DefaultScope,
446  AddressSpace>::compare_exchange_weak;
448 
449  T fetch_add(T operand, memory_order order = default_read_modify_write_order,
450  memory_scope scope = default_scope) const noexcept {
451 // TODO: Remove the "native atomics" macro check once implemented for all
452 // backends
453 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
454  return detail::spirv::AtomicFAdd(ptr, scope, order, operand);
455 #else
456  auto load_order = detail::getLoadOrder(order);
457  T expected;
458  T desired;
459  do {
460  expected =
461  load(load_order, scope); // performs better with load in CAS loop.
462  desired = expected + operand;
463  } while (!compare_exchange_weak(expected, desired, order, scope));
464  return expected;
465 #endif
466  }
467 
468  T operator+=(T operand) const noexcept {
469  return fetch_add(operand) + operand;
470  }
471 
472  T fetch_sub(T operand, memory_order order = default_read_modify_write_order,
473  memory_scope scope = default_scope) const noexcept {
474 // TODO: Remove the "native atomics" macro check once implemented for all
475 // backends
476 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
477  return detail::spirv::AtomicFAdd(ptr, scope, order, -operand);
478 #else
479  auto load_order = detail::getLoadOrder(order);
480  T expected = load(load_order, scope);
481  T desired;
482  do {
483  desired = expected - operand;
484  } while (!compare_exchange_weak(expected, desired, order, scope));
485  return expected;
486 #endif
487  }
488 
489  T operator-=(T operand) const noexcept {
490  return fetch_sub(operand) - operand;
491  }
492 
493  T fetch_min(T operand, memory_order order = default_read_modify_write_order,
494  memory_scope scope = default_scope) const noexcept {
495 // TODO: Remove the "native atomics" macro check once implemented for all
496 // backends
497 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
498  return detail::spirv::AtomicMin(ptr, scope, order, operand);
499 #else
500  auto load_order = detail::getLoadOrder(order);
501  T old = load(load_order, scope);
502  while (operand < old &&
503  !compare_exchange_weak(old, operand, order, scope)) {
504  }
505  return old;
506 #endif
507  }
508 
509  T fetch_max(T operand, memory_order order = default_read_modify_write_order,
510  memory_scope scope = default_scope) const noexcept {
511 // TODO: Remove the "native atomics" macro check once implemented for all
512 // backends
513 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
514  return detail::spirv::AtomicMax(ptr, scope, order, operand);
515 #else
516  auto load_order = detail::getLoadOrder(order);
517  T old = load(load_order, scope);
518  while (operand > old &&
519  !compare_exchange_weak(old, operand, order, scope)) {
520  }
521  return old;
522 #endif
523  }
524 
525 private:
527 };
528 
529 // Partial specialization for 64-bit integral types needed for optional kernel
530 // features
531 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
532  access::address_space AddressSpace>
533 #ifndef __SYCL_DEVICE_ONLY__
535 #else
536 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]] atomic_ref_impl<
537 #endif
538  T, /*IsAspectAtomic64AttrUsed = */ true, DefaultOrder, DefaultScope,
539  AddressSpace, typename detail::enable_if_t<std::is_integral<T>::value>>
540  : public atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false,
541  DefaultOrder, DefaultScope, AddressSpace> {
542 public:
543  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
544  DefaultScope, AddressSpace>::atomic_ref_impl;
545  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
546  DefaultScope, AddressSpace>::atomic_ref_impl::operator=;
547 };
548 
549 // Partial specialization for 64-bit floating-point types needed for optional
550 // kernel features
551 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
552  access::address_space AddressSpace>
553 #ifndef __SYCL_DEVICE_ONLY__
555 #else
556 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]] atomic_ref_impl<
557 #endif
558  T, /*IsAspectAtomic64AttrUsed = */ true, DefaultOrder, DefaultScope,
559  AddressSpace,
560  typename detail::enable_if_t<std::is_floating_point<T>::value>>
561  : public atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false,
562  DefaultOrder, DefaultScope, AddressSpace> {
563 public:
564  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
565  DefaultScope, AddressSpace>::atomic_ref_impl;
566  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
567  DefaultScope, AddressSpace>::atomic_ref_impl::operator=;
568 };
569 
570 // Partial specialization for pointer types
571 // Arithmetic is emulated because target's representation of T* is unknown
572 // TODO: Find a way to use intptr_t or uintptr_t atomics instead
573 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder, memory_scope DefaultScope,
574  access::address_space AddressSpace>
575 class atomic_ref_impl<T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace>
576  : public atomic_ref_base<uintptr_t, DefaultOrder, DefaultScope,
577  AddressSpace> {
578 
579 private:
580  using base_type =
582 
583 public:
584  using value_type = T *;
585  using difference_type = ptrdiff_t;
586  static constexpr size_t required_alignment = sizeof(T *);
587  static constexpr bool is_always_lock_free =
589  static constexpr memory_order default_read_order =
591  static constexpr memory_order default_write_order =
593  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
594  static constexpr memory_scope default_scope = DefaultScope;
595 
596  using base_type::is_lock_free;
597 
598  explicit atomic_ref_impl(T *&ref)
599  : base_type(reinterpret_cast<uintptr_t &>(ref)) {}
600 
601  void store(T *operand, memory_order order = default_write_order,
602  memory_scope scope = default_scope) const noexcept {
603  base_type::store(reinterpret_cast<uintptr_t>(operand), order, scope);
604  }
605 
606  T *operator=(T *desired) const noexcept {
607  store(desired);
608  return desired;
609  }
610 
611  T *load(memory_order order = default_read_order,
612  memory_scope scope = default_scope) const noexcept {
613  return reinterpret_cast<T *>(base_type::load(order, scope));
614  }
615 
616  operator T *() const noexcept { return load(); }
617 
618  T *exchange(T *operand, memory_order order = default_read_modify_write_order,
619  memory_scope scope = default_scope) const noexcept {
620  return reinterpret_cast<T *>(base_type::exchange(
621  reinterpret_cast<uintptr_t>(operand), order, scope));
622  }
623 
625  memory_order order = default_read_modify_write_order,
626  memory_scope scope = default_scope) const noexcept {
627  // TODO: Find a way to avoid compare_exchange here
628  auto load_order = detail::getLoadOrder(order);
629  T *expected;
630  T *desired;
631  do {
632  expected = load(load_order, scope);
633  desired = expected + operand;
634  } while (!compare_exchange_weak(expected, desired, order, scope));
635  return expected;
636  }
637 
638  T *operator+=(difference_type operand) const noexcept {
639  return fetch_add(operand) + operand;
640  }
641 
642  T *operator++(int) const noexcept { return fetch_add(difference_type(1)); }
643 
644  T *operator++() const noexcept {
645  return fetch_add(difference_type(1)) + difference_type(1);
646  }
647 
649  memory_order order = default_read_modify_write_order,
650  memory_scope scope = default_scope) const noexcept {
651  // TODO: Find a way to avoid compare_exchange here
652  auto load_order = detail::getLoadOrder(order);
653  T *expected = load(load_order, scope);
654  T *desired;
655  do {
656  desired = expected - operand;
657  } while (!compare_exchange_weak(expected, desired, order, scope));
658  return expected;
659  }
660 
661  T *operator-=(difference_type operand) const noexcept {
662  return fetch_sub(operand) - operand;
663  }
664 
665  T *operator--(int) const noexcept { return fetch_sub(difference_type(1)); }
666 
667  T *operator--() const noexcept {
668  return fetch_sub(difference_type(1)) - difference_type(1);
669  }
670 
671  bool
672  compare_exchange_strong(T *&expected, T *desired, memory_order success,
673  memory_order failure,
674  memory_scope scope = default_scope) const noexcept {
675  return base_type::compare_exchange_strong(
676  reinterpret_cast<uintptr_t &>(expected),
677  reinterpret_cast<uintptr_t>(desired), success, failure, scope);
678  }
679 
680  bool
681  compare_exchange_strong(T *&expected, T *desired,
682  memory_order order = default_read_modify_write_order,
683  memory_scope scope = default_scope) const noexcept {
684  return compare_exchange_strong(expected, desired, order, order, scope);
685  }
686 
687  bool
688  compare_exchange_weak(T *&expected, T *desired, memory_order success,
689  memory_order failure,
690  memory_scope scope = default_scope) const noexcept {
691  return base_type::compare_exchange_weak(
692  reinterpret_cast<uintptr_t &>(expected),
693  reinterpret_cast<uintptr_t>(desired), success, failure, scope);
694  }
695 
696  bool
697  compare_exchange_weak(T *&expected, T *desired,
698  memory_order order = default_read_modify_write_order,
699  memory_scope scope = default_scope) const noexcept {
700  return compare_exchange_weak(expected, desired, order, order, scope);
701  }
702 
703 private:
704  using base_type::ptr;
705 };
706 
707 } // namespace detail
708 
709 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
710  access::address_space AddressSpace =
711  access::address_space::generic_space>
712 // if sizeof(T) == 8 bytes, then the type T is optional kernel feature, so it
713 // was decorated with [[__sycl_detail__::__uses_aspects__(aspect::atomic64))]]
714 // attribute in detail::atomic_ref_impl partial specializations above
716  : public detail::atomic_ref_impl<T, sizeof(T) == 8, DefaultOrder,
717  DefaultScope, AddressSpace> {
718 public:
719  using detail::atomic_ref_impl<T, sizeof(T) == 8, DefaultOrder, DefaultScope,
720  AddressSpace>::atomic_ref_impl;
721  using detail::atomic_ref_impl<T, sizeof(T) == 8, DefaultOrder, DefaultScope,
722  AddressSpace>::operator=;
723 };
724 
725 } // __SYCL_INLINE_VER_NAMESPACE(_V1)
726 } // namespace sycl
aspects.hpp
sycl::_V1::detail::atomic_ref_base::is_lock_free
bool is_lock_free() const noexcept
Definition: atomic_ref.hpp:149
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator++
T * operator++() const noexcept
Definition: atomic_ref.hpp:644
sycl::_V1::detail::IsValidAtomicRefAddressSpace
Definition: atomic_ref.hpp:43
sycl::_V1::detail::atomic_ref_base::atomic_ref_base
atomic_ref_base(T &ref)
Definition: atomic_ref.hpp:158
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_strong
bool compare_exchange_strong(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:681
sycl::_V1::detail::bit_equal< T, typename detail::enable_if_t< std::is_integral< T >::value > >::operator()
bool operator()(const T &lhs, const T &rhs)
Definition: atomic_ref.hpp:94
T
sycl::_V1::atomic_ref
Definition: atomic_ref.hpp:715
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_sub
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:472
sycl::_V1::ext::oneapi::detail::memory_order
sycl::ext::oneapi::memory_order memory_order
Definition: atomic_ref.hpp:32
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_add
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:301
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator-=
T * operator-=(difference_type operand) const noexcept
Definition: atomic_ref.hpp:661
__SYCL_INLINE_VER_NAMESPACE
#define __SYCL_INLINE_VER_NAMESPACE(X)
Definition: defines_elementary.hpp:11
sycl::_V1::detail::atomic_ref_base::compare_exchange_weak
bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:231
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator++
T operator++(int) const noexcept
Definition: atomic_ref.hpp:315
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::load
T * load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:611
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_and
T fetch_and(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:349
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::value_type
T * value_type
Definition: atomic_ref.hpp:584
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator--
T operator--() const noexcept
Definition: atomic_ref.hpp:344
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator--
T operator--(int) const noexcept
Definition: atomic_ref.hpp:339
sycl::_V1::memory_order::relaxed
@ relaxed
sycl::_V1::detail::bit_equal
Definition: atomic_ref.hpp:90
sycl
---— Error handling, matching OpenCL plugin semantics.
Definition: access.hpp:14
bool_constant
sycl::_V1::memory_order::acq_rel
@ acq_rel
access.hpp
sycl::_V1::detail::bit_equal< float >::operator()
bool operator()(const float &lhs, const float &rhs)
Definition: atomic_ref.hpp:98
sycl::_V1::detail::memory_scope
sycl::memory_scope memory_scope
Definition: atomic_ref.hpp:31
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_max
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:403
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_min
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:389
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::operator-=
T operator-=(T operand) const noexcept
Definition: atomic_ref.hpp:489
sycl::_V1::detail::enable_if_t
typename std::enable_if< B, T >::type enable_if_t
Definition: stl_type_traits.hpp:24
sycl::_V1::detail::atomic_ref_base::ptr
std::atomic< T > * ptr
Definition: atomic_ref.hpp:258
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::fetch_add
T * fetch_add(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:624
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_add
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:449
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::fetch_sub
T * fetch_sub(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:648
sycl::_V1::multi_ptr
Provides constructors for address space qualified and non address space qualified pointers to allow i...
Definition: atomic.hpp:34
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator+=
T * operator+=(difference_type operand) const noexcept
Definition: atomic_ref.hpp:638
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::exchange
T * exchange(T *operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:618
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_weak
bool compare_exchange_weak(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:688
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_weak
bool compare_exchange_weak(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:697
sycl::_V1::detail::atomic_ref_impl
Definition: atomic_ref.hpp:266
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator+=
T operator+=(T operand) const noexcept
Definition: atomic_ref.hpp:311
sycl::_V1::detail::atomic_ref_base::exchange
T exchange(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:193
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator&=
T operator&=(T operand) const noexcept
Definition: atomic_ref.hpp:359
sycl::_V1::ext::oneapi::experimental::operator=
annotated_arg & operator=(annotated_arg &)=default
sycl::_V1::detail::atomic_ref_base::atomic_ref_base
atomic_ref_base(const atomic_ref_base &ref) noexcept
Definition: atomic_ref.hpp:163
multi_ptr.hpp
spirv.hpp
defines.hpp
sycl::_V1::detail::atomic_ref_base::store
void store(T operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:166
sycl::_V1::detail::atomic_ref_base::load
T load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:181
sycl::_V1::errc::success
@ success
sycl::_V1::detail::memory_order
sycl::memory_order memory_order
Definition: atomic.hpp:38
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::store
void store(T *operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:601
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator++
T operator++() const noexcept
Definition: atomic_ref.hpp:320
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator-=
T operator-=(T operand) const noexcept
Definition: atomic_ref.hpp:335
sycl::_V1::ext::oneapi::detail::memory_scope
sycl::ext::oneapi::memory_scope memory_scope
Definition: atomic_ref.hpp:33
sycl::_V1::detail::atomic_ref_base< uintptr_t, DefaultOrder, DefaultScope, AddressSpace >::value_type
uintptr_t value_type
Definition: atomic_ref.hpp:138
sycl::_V1::detail::atomic_ref_base::compare_exchange_weak
bool compare_exchange_weak(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:248
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::operator+=
T operator+=(T operand) const noexcept
Definition: atomic_ref.hpp:468
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator=
T * operator=(T *desired) const noexcept
Definition: atomic_ref.hpp:606
sycl::_V1::detail::atomic_ref_base::compare_exchange_strong
bool compare_exchange_strong(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:224
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_or
T fetch_or(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:363
sycl::_V1::access::decorated
decorated
Definition: access.hpp:59
sycl::_V1::memory_order::seq_cst
@ seq_cst
sycl::_V1::ext::oneapi::detail::getLoadOrder
constexpr memory_order getLoadOrder(memory_order order)
Definition: atomic_ref.hpp:74
sycl::_V1::address_space_cast
multi_ptr< ElementType, Space, DecorateAddress > address_space_cast(ElementType *pointer)
Definition: multi_ptr.hpp:1300
sycl::_V1::detail::atomic_ref_base::compare_exchange_strong
bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:204
sycl::_V1::detail::getStdMemoryOrder
static std::memory_order getStdMemoryOrder(__spv::MemorySemanticsMask::Flag)
Definition: atomic.hpp:84
sycl::_V1::memory_scope
memory_scope
Definition: memory_enums.hpp:26
sycl::_V1::detail::IsValidAtomicRefType
Definition: atomic_ref.hpp:33
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator^=
T operator^=(T operand) const noexcept
Definition: atomic_ref.hpp:385
sycl::_V1::detail::bit_equal< double >::operator()
bool operator()(const double &lhs, const double &rhs)
Definition: atomic_ref.hpp:106
sycl::_V1::detail::memory_order_traits
Definition: atomic_ref.hpp:57
std
Definition: accessor.hpp:3230
sycl::_V1::detail::atomic_ref_base::operator=
T operator=(T desired) const noexcept
Definition: atomic_ref.hpp:176
sycl::_V1::detail::IsValidDefaultOrder
bool_constant< Order==memory_order::relaxed||Order==memory_order::acq_rel||Order==memory_order::seq_cst > IsValidDefaultOrder
Definition: atomic_ref.hpp:55
memory_enums.hpp
sycl::_V1::memory_order
memory_order
Definition: memory_enums.hpp:16
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_min
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:493
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator--
T * operator--() const noexcept
Definition: atomic_ref.hpp:667
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_xor
T fetch_xor(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:375
sycl::_V1::detail::atomic_ref_base
Definition: atomic_ref.hpp:116
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::atomic_ref_impl
atomic_ref_impl(T *&ref)
Definition: atomic_ref.hpp:598
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator++
T * operator++(int) const noexcept
Definition: atomic_ref.hpp:642
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator|=
T operator|=(T operand) const noexcept
Definition: atomic_ref.hpp:373
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::difference_type
value_type difference_type
Definition: atomic_ref.hpp:431
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_strong
bool compare_exchange_strong(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:672
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_max
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:509
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_sub
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:325
atomic.hpp
sycl::_V1::detail::atomic_ref_impl< T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::difference_type
value_type difference_type
Definition: atomic_ref.hpp:283
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::difference_type
ptrdiff_t difference_type
Definition: atomic_ref.hpp:585
sycl::_V1::detail::atomic_ref_impl< T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace >::operator--
T * operator--(int) const noexcept
Definition: atomic_ref.hpp:665
sycl::_V1::access::address_space
address_space
Definition: access.hpp:47