DPC++ Runtime
Runtime libraries for oneAPI DPC++
atomic_ref.hpp
Go to the documentation of this file.
1 //==----- atomic_ref.hpp - SYCL 2020 atomic_ref ----------------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
11 #include <sycl/access/access.hpp> // for address_space
12 #include <sycl/bit_cast.hpp> // for bit_cast
13 #include <sycl/memory_enums.hpp> // for getStdMemoryOrder, memory_order
14 
15 #ifdef __SYCL_DEVICE_ONLY__
16 #include <sycl/detail/spirv.hpp>
17 #include <sycl/multi_ptr.hpp>
18 #else
19 #include <atomic> // for atomic
20 #endif
21 
22 #include <stddef.h> // for size_t, ptrdiff_t
23 #include <stdint.h> // for uintptr_t, uint32_t, uint64_t
24 #include <type_traits> // for enable_if_t, bool_constant
25 
26 namespace sycl {
27 inline namespace _V1 {
28 namespace detail {
29 
32 
33 template <typename T> struct IsValidAtomicRefType {
34  static constexpr bool value =
35  (std::is_same_v<T, int> || std::is_same_v<T, unsigned int> ||
36  std::is_same_v<T, long> || std::is_same_v<T, unsigned long> ||
37  std::is_same_v<T, long long> || std::is_same_v<T, unsigned long long> ||
38  std::is_same_v<T, float> || std::is_same_v<T, double> ||
39  std::is_pointer_v<T>);
40 };
41 
42 template <sycl::access::address_space AS> struct IsValidAtomicRefAddressSpace {
43  static constexpr bool value =
48 };
49 
50 // DefaultOrder parameter is limited to read-modify-write orders
51 template <memory_order Order>
52 using IsValidDefaultOrder = std::bool_constant<Order == memory_order::relaxed ||
53  Order == memory_order::acq_rel ||
54  Order == memory_order::seq_cst>;
55 
56 template <memory_order ReadModifyWriteOrder> struct memory_order_traits;
57 
59  static constexpr memory_order read_order = memory_order::relaxed;
60  static constexpr memory_order write_order = memory_order::relaxed;
61 };
62 
64  static constexpr memory_order read_order = memory_order::acquire;
65  static constexpr memory_order write_order = memory_order::release;
66 };
67 
69  static constexpr memory_order read_order = memory_order::seq_cst;
70  static constexpr memory_order write_order = memory_order::seq_cst;
71 };
72 
73 inline constexpr memory_order getLoadOrder(memory_order order) {
74  switch (order) {
76  return memory_order::relaxed;
77 
82  return memory_order::acquire;
83 
85  return memory_order::seq_cst;
86  }
87 }
88 
89 template <typename T, typename = void> struct bit_equal;
90 
91 template <typename T>
92 struct bit_equal<T, typename std::enable_if_t<std::is_integral_v<T>>> {
93  bool operator()(const T &lhs, const T &rhs) { return lhs == rhs; }
94 };
95 
96 template <> struct bit_equal<float> {
97  bool operator()(const float &lhs, const float &rhs) {
98  auto LhsInt = sycl::bit_cast<uint32_t>(lhs);
99  auto RhsInt = sycl::bit_cast<uint32_t>(rhs);
100  return LhsInt == RhsInt;
101  }
102 };
103 
104 template <> struct bit_equal<double> {
105  bool operator()(const double &lhs, const double &rhs) {
106  auto LhsInt = sycl::bit_cast<uint64_t>(lhs);
107  auto RhsInt = sycl::bit_cast<uint64_t>(rhs);
108  return LhsInt == RhsInt;
109  }
110 };
111 
112 // Functionality for any atomic of type T, reused by partial specializations
113 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
114  access::address_space AddressSpace>
116  static_assert(
118  "Invalid atomic type. Valid types are int, unsigned int, long, "
119  "unsigned long, long long, unsigned long long, float, double "
120  "and pointer types");
122  "Invalid atomic address_space. Valid address spaces are: "
123  "global_space, local_space, ext_intel_global_device_space, "
124  "generic_space");
125  static_assert(
127  "Invalid default memory_order for atomics. Valid defaults are: "
128  "relaxed, acq_rel, seq_cst");
129 #ifdef __AMDGPU__
130  // FIXME should this query device's memory capabilities at runtime?
131  static_assert(DefaultOrder != sycl::memory_order::seq_cst,
132  "seq_cst memory order is not supported on AMDGPU");
133 #endif
134 
135 
136 public:
137  using value_type = T;
138  static constexpr size_t required_alignment = sizeof(T);
139  static constexpr bool is_always_lock_free =
141  static constexpr memory_order default_read_order =
145  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
146  static constexpr memory_scope default_scope = DefaultScope;
147 
148  bool is_lock_free() const noexcept {
150  }
151 
152 #ifdef __SYCL_DEVICE_ONLY__
153  explicit atomic_ref_base(T &ref)
154  : ptr(address_space_cast<AddressSpace, access::decorated::no>(&ref)) {}
155 #else
156  // FIXME: This reinterpret_cast is UB, but happens to work for now
157  explicit atomic_ref_base(T &ref)
158  : ptr(reinterpret_cast<std::atomic<T> *>(&ref)) {}
159 #endif
160  // Our implementation of copy constructor could be trivial
161  // Defined this way for consistency with standard atomic_ref
162  atomic_ref_base(const atomic_ref_base &ref) noexcept { ptr = ref.ptr; };
164 
165  void store(T operand, memory_order order = default_write_order,
166  memory_scope scope = default_scope) const noexcept {
167 #ifdef __SYCL_DEVICE_ONLY__
168  detail::spirv::AtomicStore(ptr, scope, order, operand);
169 #else
170  (void)scope;
171  ptr->store(operand, detail::getStdMemoryOrder(order));
172 #endif
173  }
174 
175  T operator=(T desired) const noexcept {
176  store(desired);
177  return desired;
178  }
179 
181  memory_scope scope = default_scope) const noexcept {
182 #ifdef __SYCL_DEVICE_ONLY__
183  return detail::spirv::AtomicLoad(ptr, scope, order);
184 #else
185  (void)scope;
186  return ptr->load(detail::getStdMemoryOrder(order));
187 #endif
188  }
189 
190  operator T() const noexcept { return load(); }
191 
193  memory_scope scope = default_scope) const noexcept {
194 #ifdef __SYCL_DEVICE_ONLY__
195  return detail::spirv::AtomicExchange(ptr, scope, order, operand);
196 #else
197  (void)scope;
198  return ptr->exchange(operand, detail::getStdMemoryOrder(order));
199 #endif
200  }
201 
202  bool
203  compare_exchange_strong(T &expected, T desired, memory_order success,
204  memory_order failure,
205  memory_scope scope = default_scope) const noexcept {
206 #ifdef __SYCL_DEVICE_ONLY__
207  T value = detail::spirv::AtomicCompareExchange(ptr, scope, success, failure,
208  desired, expected);
209  bool succeeded = detail::bit_equal<T>()(value, expected);
210  if (!succeeded) {
211  expected = value;
212  }
213  return succeeded;
214 #else
215  (void)scope;
216  return ptr->compare_exchange_strong(expected, desired,
218  detail::getStdMemoryOrder(failure));
219 #endif
220  }
221 
222  bool
223  compare_exchange_strong(T &expected, T desired,
225  memory_scope scope = default_scope) const noexcept {
226  return compare_exchange_strong(expected, desired, order, order, scope);
227  }
228 
229  bool
230  compare_exchange_weak(T &expected, T desired, memory_order success,
231  memory_order failure,
232  memory_scope scope = default_scope) const noexcept {
233  // SPIR-V AtomicCompareExchangeWeak is deprecated and equivalent to
234  // AtomicCompareExchange. For now, use AtomicCompareExchange on device and
235  // compare_exchange_weak on host
236 #ifdef __SYCL_DEVICE_ONLY__
237  return compare_exchange_strong(expected, desired, success, failure, scope);
238 #else
239  (void)scope;
240  return ptr->compare_exchange_weak(expected, desired,
242  detail::getStdMemoryOrder(failure));
243 #endif
244  }
245 
246  bool
247  compare_exchange_weak(T &expected, T desired,
249  memory_scope scope = default_scope) const noexcept {
250  return compare_exchange_weak(expected, desired, order, order, scope);
251  }
252 
253 protected:
254 #ifdef __SYCL_DEVICE_ONLY__
256 #else
257  std::atomic<T> *ptr;
258 #endif
259 };
260 
261 // Hook allowing partial specializations to inherit atomic_ref_base
262 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
263  memory_scope DefaultScope, access::address_space AddressSpace,
264  typename = void>
266  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
267 public:
268  using atomic_ref_base<T, DefaultOrder, DefaultScope,
269  AddressSpace>::atomic_ref_base;
270 };
271 
272 // Partial specialization for integral types
273 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
274  memory_scope DefaultScope, access::address_space AddressSpace>
275 class atomic_ref_impl<T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope,
276  AddressSpace,
277  typename std::enable_if_t<std::is_integral_v<T>>>
278  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
279 
280 public:
281  using value_type = T;
283  static constexpr size_t required_alignment = sizeof(T);
284  static constexpr bool is_always_lock_free =
286  static constexpr memory_order default_read_order =
290  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
291  static constexpr memory_scope default_scope = DefaultScope;
292 
293  using atomic_ref_base<T, DefaultOrder, DefaultScope,
294  AddressSpace>::atomic_ref_base;
296  using atomic_ref_base<T, DefaultOrder, DefaultScope,
297  AddressSpace>::compare_exchange_weak;
299 
301  memory_scope scope = default_scope) const noexcept {
302 #ifdef __SYCL_DEVICE_ONLY__
303  return detail::spirv::AtomicIAdd(ptr, scope, order, operand);
304 #else
305  (void)scope;
306  return ptr->fetch_add(operand, detail::getStdMemoryOrder(order));
307 #endif
308  }
309 
310  T operator+=(T operand) const noexcept {
311  return fetch_add(operand) + operand;
312  }
313 
314  T operator++(int) const noexcept {
315  // TODO: use AtomicIIncrement as an optimization
316  return fetch_add(1);
317  }
318 
319  T operator++() const noexcept {
320  // TODO: use AtomicIIncrement as an optimization
321  return fetch_add(1) + 1;
322  }
323 
325  memory_scope scope = default_scope) const noexcept {
326 #ifdef __SYCL_DEVICE_ONLY__
327  return detail::spirv::AtomicISub(ptr, scope, order, operand);
328 #else
329  (void)scope;
330  return ptr->fetch_sub(operand, detail::getStdMemoryOrder(order));
331 #endif
332  }
333 
334  T operator-=(T operand) const noexcept {
335  return fetch_sub(operand) - operand;
336  }
337 
338  T operator--(int) const noexcept {
339  // TODO: use AtomicIDecrement as an optimization
340  return fetch_sub(1);
341  }
342 
343  T operator--() const noexcept {
344  // TODO: use AtomicIDecrement as an optimization
345  return fetch_sub(1) - 1;
346  }
347 
349  memory_scope scope = default_scope) const noexcept {
350 #ifdef __SYCL_DEVICE_ONLY__
351  return detail::spirv::AtomicAnd(ptr, scope, order, operand);
352 #else
353  (void)scope;
354  return ptr->fetch_and(operand, detail::getStdMemoryOrder(order));
355 #endif
356  }
357 
358  T operator&=(T operand) const noexcept {
359  return fetch_and(operand) & operand;
360  }
361 
363  memory_scope scope = default_scope) const noexcept {
364 #ifdef __SYCL_DEVICE_ONLY__
365  return detail::spirv::AtomicOr(ptr, scope, order, operand);
366 #else
367  (void)scope;
368  return ptr->fetch_or(operand, detail::getStdMemoryOrder(order));
369 #endif
370  }
371 
372  T operator|=(T operand) const noexcept { return fetch_or(operand) | operand; }
373 
375  memory_scope scope = default_scope) const noexcept {
376 #ifdef __SYCL_DEVICE_ONLY__
377  return detail::spirv::AtomicXor(ptr, scope, order, operand);
378 #else
379  (void)scope;
380  return ptr->fetch_xor(operand, detail::getStdMemoryOrder(order));
381 #endif
382  }
383 
384  T operator^=(T operand) const noexcept {
385  return fetch_xor(operand) ^ operand;
386  }
387 
389  memory_scope scope = default_scope) const noexcept {
390 #ifdef __SYCL_DEVICE_ONLY__
391  return detail::spirv::AtomicMin(ptr, scope, order, operand);
392 #else
393  auto load_order = detail::getLoadOrder(order);
394  T old = load(load_order, scope);
395  while (operand < old &&
396  !compare_exchange_weak(old, operand, order, scope)) {
397  }
398  return old;
399 #endif
400  }
401 
403  memory_scope scope = default_scope) const noexcept {
404 #ifdef __SYCL_DEVICE_ONLY__
405  return detail::spirv::AtomicMax(ptr, scope, order, operand);
406 #else
407  auto load_order = detail::getLoadOrder(order);
408  T old = load(load_order, scope);
409  while (operand > old &&
410  !compare_exchange_weak(old, operand, order, scope)) {
411  }
412  return old;
413 #endif
414  }
415 
416 private:
418 };
419 
420 // Partial specialization for floating-point types
421 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
422  memory_scope DefaultScope, access::address_space AddressSpace>
423 class atomic_ref_impl<T, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope,
424  AddressSpace,
425  typename std::enable_if_t<std::is_floating_point_v<T>>>
426  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
427 
428 public:
429  using value_type = T;
431  static constexpr size_t required_alignment = sizeof(T);
432  static constexpr bool is_always_lock_free =
434  static constexpr memory_order default_read_order =
438  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
439  static constexpr memory_scope default_scope = DefaultScope;
440 
441  using atomic_ref_base<T, DefaultOrder, DefaultScope,
442  AddressSpace>::atomic_ref_base;
444  using atomic_ref_base<T, DefaultOrder, DefaultScope,
445  AddressSpace>::compare_exchange_weak;
447 
449  memory_scope scope = default_scope) const noexcept {
450 // TODO: Remove the "native atomics" macro check once implemented for all
451 // backends
452 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
453  return detail::spirv::AtomicFAdd(ptr, scope, order, operand);
454 #else
455  auto load_order = detail::getLoadOrder(order);
456  T expected;
457  T desired;
458  do {
459  expected =
460  load(load_order, scope); // performs better with load in CAS loop.
461  desired = expected + operand;
462  } while (!compare_exchange_weak(expected, desired, order, scope));
463  return expected;
464 #endif
465  }
466 
467  T operator+=(T operand) const noexcept {
468  return fetch_add(operand) + operand;
469  }
470 
472  memory_scope scope = default_scope) const noexcept {
473 // TODO: Remove the "native atomics" macro check once implemented for all
474 // backends
475 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
476  return detail::spirv::AtomicFAdd(ptr, scope, order, -operand);
477 #else
478  auto load_order = detail::getLoadOrder(order);
479  T expected = load(load_order, scope);
480  T desired;
481  do {
482  desired = expected - operand;
483  } while (!compare_exchange_weak(expected, desired, order, scope));
484  return expected;
485 #endif
486  }
487 
488  T operator-=(T operand) const noexcept {
489  return fetch_sub(operand) - operand;
490  }
491 
493  memory_scope scope = default_scope) const noexcept {
494 // TODO: Remove the "native atomics" macro check once implemented for all
495 // backends
496 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
497  return detail::spirv::AtomicMin(ptr, scope, order, operand);
498 #else
499  auto load_order = detail::getLoadOrder(order);
500  T old = load(load_order, scope);
501  while (operand < old &&
502  !compare_exchange_weak(old, operand, order, scope)) {
503  }
504  return old;
505 #endif
506  }
507 
509  memory_scope scope = default_scope) const noexcept {
510 // TODO: Remove the "native atomics" macro check once implemented for all
511 // backends
512 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
513  return detail::spirv::AtomicMax(ptr, scope, order, operand);
514 #else
515  auto load_order = detail::getLoadOrder(order);
516  T old = load(load_order, scope);
517  while (operand > old &&
518  !compare_exchange_weak(old, operand, order, scope)) {
519  }
520  return old;
521 #endif
522  }
523 
524 private:
526 };
527 
528 // Partial specialization for 64-bit integral types needed for optional kernel
529 // features
530 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
531  access::address_space AddressSpace>
532 #ifndef __SYCL_DEVICE_ONLY__
534 #else
535 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]] atomic_ref_impl<
536 #endif
537  T, /*IsAspectAtomic64AttrUsed = */ true, DefaultOrder, DefaultScope,
538  AddressSpace, typename std::enable_if_t<std::is_integral_v<T>>>
539  : public atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false,
540  DefaultOrder, DefaultScope, AddressSpace> {
541 public:
542  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
543  DefaultScope, AddressSpace>::atomic_ref_impl;
544  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
545  DefaultScope, AddressSpace>::atomic_ref_impl::operator=;
546 };
547 
548 // Partial specialization for 64-bit floating-point types needed for optional
549 // kernel features
550 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
551  access::address_space AddressSpace>
552 #ifndef __SYCL_DEVICE_ONLY__
554 #else
555 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]] atomic_ref_impl<
556 #endif
557  T, /*IsAspectAtomic64AttrUsed = */ true, DefaultOrder, DefaultScope,
558  AddressSpace, typename std::enable_if_t<std::is_floating_point_v<T>>>
559  : public atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false,
560  DefaultOrder, DefaultScope, AddressSpace> {
561 public:
562  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
563  DefaultScope, AddressSpace>::atomic_ref_impl;
564  using atomic_ref_impl<T, /*IsAspectAtomic64AttrUsed = */ false, DefaultOrder,
565  DefaultScope, AddressSpace>::atomic_ref_impl::operator=;
566 };
567 
568 // Partial specialization for pointer types
569 // Arithmetic is emulated because target's representation of T* is unknown
570 // TODO: Find a way to use intptr_t or uintptr_t atomics instead
571 template <typename T, bool IsAspectAtomic64AttrUsed, memory_order DefaultOrder,
572  memory_scope DefaultScope, access::address_space AddressSpace>
573 #ifndef __SYCL_DEVICE_ONLY__
575 #else
576 class [[__sycl_detail__::__uses_aspects__(aspect::atomic64)]] atomic_ref_impl<
577 #endif
578  T *, IsAspectAtomic64AttrUsed, DefaultOrder, DefaultScope, AddressSpace>
579  : public atomic_ref_base<uintptr_t, DefaultOrder, DefaultScope,
580  AddressSpace> {
581 
582 private:
583  using base_type =
585 
586 public:
587  using value_type = T *;
588  using difference_type = ptrdiff_t;
589  static constexpr size_t required_alignment = sizeof(T *);
590  static constexpr bool is_always_lock_free =
592  static constexpr memory_order default_read_order =
596  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
597  static constexpr memory_scope default_scope = DefaultScope;
598 
599  using base_type::is_lock_free;
600 
601  explicit atomic_ref_impl(T *&ref)
602  : base_type(reinterpret_cast<uintptr_t &>(ref)) {}
603 
604  void store(T *operand, memory_order order = default_write_order,
605  memory_scope scope = default_scope) const noexcept {
606  base_type::store(reinterpret_cast<uintptr_t>(operand), order, scope);
607  }
608 
609  T *operator=(T *desired) const noexcept {
610  store(desired);
611  return desired;
612  }
613 
615  memory_scope scope = default_scope) const noexcept {
616  return reinterpret_cast<T *>(base_type::load(order, scope));
617  }
618 
619  operator T *() const noexcept { return load(); }
620 
622  memory_scope scope = default_scope) const noexcept {
623  return reinterpret_cast<T *>(base_type::exchange(
624  reinterpret_cast<uintptr_t>(operand), order, scope));
625  }
626 
629  memory_scope scope = default_scope) const noexcept {
630  // TODO: Find a way to avoid compare_exchange here
631  auto load_order = detail::getLoadOrder(order);
632  T *expected;
633  T *desired;
634  do {
635  expected = load(load_order, scope);
636  desired = expected + operand;
637  } while (!compare_exchange_weak(expected, desired, order, scope));
638  return expected;
639  }
640 
641  T *operator+=(difference_type operand) const noexcept {
642  return fetch_add(operand) + operand;
643  }
644 
645  T *operator++(int) const noexcept { return fetch_add(difference_type(1)); }
646 
647  T *operator++() const noexcept {
648  return fetch_add(difference_type(1)) + difference_type(1);
649  }
650 
653  memory_scope scope = default_scope) const noexcept {
654  // TODO: Find a way to avoid compare_exchange here
655  auto load_order = detail::getLoadOrder(order);
656  T *expected = load(load_order, scope);
657  T *desired;
658  do {
659  desired = expected - operand;
660  } while (!compare_exchange_weak(expected, desired, order, scope));
661  return expected;
662  }
663 
664  T *operator-=(difference_type operand) const noexcept {
665  return fetch_sub(operand) - operand;
666  }
667 
668  T *operator--(int) const noexcept { return fetch_sub(difference_type(1)); }
669 
670  T *operator--() const noexcept {
671  return fetch_sub(difference_type(1)) - difference_type(1);
672  }
673 
674  bool
675  compare_exchange_strong(T *&expected, T *desired, memory_order success,
676  memory_order failure,
677  memory_scope scope = default_scope) const noexcept {
678  return base_type::compare_exchange_strong(
679  reinterpret_cast<uintptr_t &>(expected),
680  reinterpret_cast<uintptr_t>(desired), success, failure, scope);
681  }
682 
683  bool
684  compare_exchange_strong(T *&expected, T *desired,
686  memory_scope scope = default_scope) const noexcept {
687  return compare_exchange_strong(expected, desired, order, order, scope);
688  }
689 
690  bool
691  compare_exchange_weak(T *&expected, T *desired, memory_order success,
692  memory_order failure,
693  memory_scope scope = default_scope) const noexcept {
694  return base_type::compare_exchange_weak(
695  reinterpret_cast<uintptr_t &>(expected),
696  reinterpret_cast<uintptr_t>(desired), success, failure, scope);
697  }
698 
699  bool
700  compare_exchange_weak(T *&expected, T *desired,
702  memory_scope scope = default_scope) const noexcept {
703  return compare_exchange_weak(expected, desired, order, order, scope);
704  }
705 
706 private:
707  using base_type::ptr;
708 };
709 
710 } // namespace detail
711 
712 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
713  access::address_space AddressSpace =
715 // if sizeof(T) == 8 bytes, then the type T is optional kernel feature, so it
716 // was decorated with [[__sycl_detail__::__uses_aspects__(aspect::atomic64))]]
717 // attribute in detail::atomic_ref_impl partial specializations above
719  : public detail::atomic_ref_impl<T, sizeof(T) == 8, DefaultOrder,
720  DefaultScope, AddressSpace> {
721 public:
722  using detail::atomic_ref_impl<T, sizeof(T) == 8, DefaultOrder, DefaultScope,
723  AddressSpace>::atomic_ref_impl;
724  using detail::atomic_ref_impl<T, sizeof(T) == 8, DefaultOrder, DefaultScope,
725  AddressSpace>::operator=;
726 };
727 
728 } // namespace _V1
729 } // namespace sycl
=8, DefaultOrder, DefaultScope, AddressSpace >::atomic_ref_impl T
Definition: atomic_ref.hpp:723
=8, DefaultOrder, DefaultScope, AddressSpace >::operator= T
Definition: atomic_ref.hpp:725
static constexpr memory_order default_write_order
Definition: atomic_ref.hpp:143
static constexpr size_t required_alignment
Definition: atomic_ref.hpp:138
bool compare_exchange_strong(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:223
static constexpr memory_scope default_scope
Definition: atomic_ref.hpp:146
atomic_ref_base(const atomic_ref_base &ref) noexcept
Definition: atomic_ref.hpp:162
bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:203
static constexpr bool is_always_lock_free
Definition: atomic_ref.hpp:139
bool is_lock_free() const noexcept
Definition: atomic_ref.hpp:148
T load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:180
bool compare_exchange_weak(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:247
atomic_ref_base & operator=(const atomic_ref_base &)=delete
T operator=(T desired) const noexcept
Definition: atomic_ref.hpp:175
void store(T operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:165
static constexpr memory_order default_read_modify_write_order
Definition: atomic_ref.hpp:145
static constexpr memory_order default_read_order
Definition: atomic_ref.hpp:141
bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:230
T exchange(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:192
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:324
T fetch_or(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:362
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:388
T fetch_xor(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:374
T fetch_and(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:348
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:402
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:300
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:471
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:508
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:492
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:448
bool compare_exchange_weak(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:700
T * exchange(T *operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:621
bool compare_exchange_weak(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:691
bool compare_exchange_strong(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:684
T * fetch_sub(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:651
T * load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:614
bool compare_exchange_strong(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:675
void store(T *operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:604
T * fetch_add(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:627
sycl::memory_order memory_order
Definition: atomic.hpp:38
constexpr memory_order getLoadOrder(memory_order order)
Definition: atomic_ref.hpp:73
std::bool_constant< Order==memory_order::relaxed||Order==memory_order::acq_rel||Order==memory_order::seq_cst > IsValidDefaultOrder
Definition: atomic_ref.hpp:54
std::memory_order getStdMemoryOrder(__spv::MemorySemanticsMask::Flag)
Definition: atomic.hpp:82
std::ptrdiff_t difference_type
Definition: multi_ptr.hpp:460
Definition: access.hpp:18
_Abi const simd< _Tp, _Abi > & noexcept
Definition: simd.hpp:1324
bool operator()(const double &lhs, const double &rhs)
Definition: atomic_ref.hpp:105
bool operator()(const float &lhs, const float &rhs)
Definition: atomic_ref.hpp:97