DPC++ Runtime
Runtime libraries for oneAPI DPC++
atomic_ref.hpp
Go to the documentation of this file.
1 //==----- atomic_ref.hpp - SYCL_ONEAPI_extended_atomics atomic_ref ---------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
12 #include <CL/sycl/atomic.hpp>
14 #ifdef __SYCL_DEVICE_ONLY__
15 #include <CL/sycl/detail/spirv.hpp>
16 #include <CL/sycl/multi_ptr.hpp>
17 #endif
18 
19 #ifndef __SYCL_DEVICE_ONLY__
20 #include <atomic>
21 #endif
22 #include <type_traits>
23 
25 namespace sycl {
26 namespace ext {
27 namespace oneapi {
28 namespace detail {
29 
30 // Import from detail:: into ext::oneapi::detail:: to improve readability later
31 using namespace ::cl::sycl::detail;
32 
35 
36 template <typename T> struct IsValidAtomicRefType {
37  static constexpr bool value =
38  (std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
39  std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
40  std::is_same<T, long long>::value ||
41  std::is_same<T, unsigned long long>::value ||
42  std::is_same<T, float>::value || std::is_same<T, double>::value ||
43  std::is_pointer<T>::value);
44 };
45 
46 template <cl::sycl::access::address_space AS>
48  bool_constant<AS == access::address_space::global_space ||
49  AS == access::address_space::local_space ||
50  AS == access::address_space::global_device_space>;
51 
52 // DefaultOrder parameter is limited to read-modify-write orders
53 template <memory_order Order>
54 using IsValidDefaultOrder = bool_constant<Order == memory_order::relaxed ||
55  Order == memory_order::acq_rel ||
56  Order == memory_order::seq_cst>;
57 
58 template <memory_order ReadModifyWriteOrder> struct memory_order_traits;
59 
61  static constexpr memory_order read_order = memory_order::relaxed;
62  static constexpr memory_order write_order = memory_order::relaxed;
63 };
64 
66  static constexpr memory_order read_order = memory_order::acquire;
67  static constexpr memory_order write_order = memory_order::release;
68 };
69 
71  static constexpr memory_order read_order = memory_order::seq_cst;
72  static constexpr memory_order write_order = memory_order::seq_cst;
73 };
74 
75 inline constexpr memory_order getLoadOrder(memory_order order) {
76  switch (order) {
77  case memory_order_relaxed:
78  return memory_order_relaxed;
79 
80  case memory_order_acquire:
81  case memory_order::__consume_unsupported:
82  case memory_order_acq_rel:
83  case memory_order_release:
84  return memory_order_acquire;
85 
86  case memory_order_seq_cst:
87  return memory_order_seq_cst;
88  }
89 }
90 
91 template <typename T, typename = void> struct bit_equal;
92 
93 template <typename T>
94 struct bit_equal<T, typename detail::enable_if_t<std::is_integral<T>::value>> {
95  bool operator()(const T &lhs, const T &rhs) { return lhs == rhs; }
96 };
97 
98 template <> struct bit_equal<float> {
99  bool operator()(const float &lhs, const float &rhs) {
100  auto LhsInt = sycl::bit_cast<uint32_t>(lhs);
101  auto RhsInt = sycl::bit_cast<uint32_t>(rhs);
102  return LhsInt == RhsInt;
103  }
104 };
105 
106 template <> struct bit_equal<double> {
107  bool operator()(const double &lhs, const double &rhs) {
108  auto LhsInt = sycl::bit_cast<uint64_t>(lhs);
109  auto RhsInt = sycl::bit_cast<uint64_t>(rhs);
110  return LhsInt == RhsInt;
111  }
112 };
113 
114 // Functionality for any atomic of type T, reused by partial specializations
115 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
116  access::address_space AddressSpace>
118  static_assert(
120  "Invalid atomic type. Valid types are int, unsigned int, long, "
121  "unsigned long, long long, unsigned long long, float, double "
122  "and pointer types");
124  "Invalid atomic address_space. Valid address spaces are: "
125  "global_space, local_space, global_device_space");
126  static_assert(
128  "Invalid default memory_order for atomics. Valid defaults are: "
129  "relaxed, acq_rel, seq_cst");
130 
131 public:
132  using value_type = T;
133  static constexpr size_t required_alignment = sizeof(T);
134  static constexpr bool is_always_lock_free =
136  static constexpr memory_order default_read_order =
138  static constexpr memory_order default_write_order =
140  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
141  static constexpr memory_scope default_scope = DefaultScope;
142 
143  bool is_lock_free() const noexcept {
145  }
146 
147 #ifdef __SYCL_DEVICE_ONLY__
148  explicit atomic_ref_base(T &ref) : ptr(multi_ptr<T, AddressSpace>(&ref)) {}
149 #else
150  // FIXME: This reinterpret_cast is UB, but happens to work for now
151  explicit atomic_ref_base(T &ref)
152  : ptr(reinterpret_cast<std::atomic<T> *>(&ref)) {}
153 #endif
154  // Our implementation of copy constructor could be trivial
155  // Defined this way for consistency with standard atomic_ref
156  atomic_ref_base(const atomic_ref_base &ref) noexcept { ptr = ref.ptr; };
157  atomic_ref_base &operator=(const atomic_ref_base &) = delete;
158 
159  void store(T operand, memory_order order = default_write_order,
160  memory_scope scope = default_scope) const noexcept {
161 #ifdef __SYCL_DEVICE_ONLY__
162  detail::spirv::AtomicStore(ptr, scope, order, operand);
163 #else
164  (void)scope;
165  ptr->store(operand, detail::getStdMemoryOrder(order));
166 #endif
167  }
168 
169  T operator=(T desired) const noexcept {
170  store(desired);
171  return desired;
172  }
173 
174  T load(memory_order order = default_read_order,
175  memory_scope scope = default_scope) const noexcept {
176 #ifdef __SYCL_DEVICE_ONLY__
177  return detail::spirv::AtomicLoad(ptr, scope, order);
178 #else
179  (void)scope;
180  return ptr->load(detail::getStdMemoryOrder(order));
181 #endif
182  }
183 
184  operator T() const noexcept { return load(); }
185 
186  T exchange(T operand, memory_order order = default_read_modify_write_order,
187  memory_scope scope = default_scope) const noexcept {
188 #ifdef __SYCL_DEVICE_ONLY__
189  return detail::spirv::AtomicExchange(ptr, scope, order, operand);
190 #else
191  (void)scope;
192  return ptr->exchange(operand, detail::getStdMemoryOrder(order));
193 #endif
194  }
195 
196  bool
197  compare_exchange_strong(T &expected, T desired, memory_order success,
198  memory_order failure,
199  memory_scope scope = default_scope) const noexcept {
200 #ifdef __SYCL_DEVICE_ONLY__
201  T value = detail::spirv::AtomicCompareExchange(ptr, scope, success, failure,
202  desired, expected);
203  bool succeeded = detail::bit_equal<T>()(value, expected);
204  if (!succeeded) {
205  expected = value;
206  }
207  return succeeded;
208 #else
209  (void)scope;
210  return ptr->compare_exchange_strong(expected, desired,
212  detail::getStdMemoryOrder(failure));
213 #endif
214  }
215 
216  bool
217  compare_exchange_strong(T &expected, T desired,
218  memory_order order = default_read_modify_write_order,
219  memory_scope scope = default_scope) const noexcept {
220  return compare_exchange_strong(expected, desired, order, order, scope);
221  }
222 
223  bool
224  compare_exchange_weak(T &expected, T desired, memory_order success,
225  memory_order failure,
226  memory_scope scope = default_scope) const noexcept {
227  // SPIR-V AtomicCompareExchangeWeak is deprecated and equivalent to
228  // AtomicCompareExchange. For now, use AtomicCompareExchange on device and
229  // compare_exchange_weak on host
230 #ifdef __SYCL_DEVICE_ONLY__
231  return compare_exchange_strong(expected, desired, success, failure, scope);
232 #else
233  (void)scope;
234  return ptr->compare_exchange_weak(expected, desired,
236  detail::getStdMemoryOrder(failure));
237 #endif
238  }
239 
240  bool
241  compare_exchange_weak(T &expected, T desired,
242  memory_order order = default_read_modify_write_order,
243  memory_scope scope = default_scope) const noexcept {
244  return compare_exchange_weak(expected, desired, order, order, scope);
245  }
246 
247 protected:
248 #ifdef __SYCL_DEVICE_ONLY__
250 #else
251  std::atomic<T> *ptr;
252 #endif
253 };
254 
255 // Hook allowing partial specializations to inherit atomic_ref_base
256 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
257  access::address_space AddressSpace, typename = void>
259  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
260 public:
261  using atomic_ref_base<T, DefaultOrder, DefaultScope,
262  AddressSpace>::atomic_ref_base;
263 };
264 
265 // Partial specialization for integral types
266 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
267  access::address_space AddressSpace>
268 class atomic_ref_impl<T, DefaultOrder, DefaultScope, AddressSpace,
269  typename detail::enable_if_t<std::is_integral<T>::value>>
270  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
271 
272 public:
273  using value_type = T;
275  static constexpr size_t required_alignment = sizeof(T);
276  static constexpr bool is_always_lock_free =
278  static constexpr memory_order default_read_order =
280  static constexpr memory_order default_write_order =
282  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
283  static constexpr memory_scope default_scope = DefaultScope;
284 
285  using atomic_ref_base<T, DefaultOrder, DefaultScope,
286  AddressSpace>::atomic_ref_base;
288  using atomic_ref_base<T, DefaultOrder, DefaultScope,
289  AddressSpace>::compare_exchange_weak;
291 
292  T fetch_add(T operand, memory_order order = default_read_modify_write_order,
293  memory_scope scope = default_scope) const noexcept {
294 #ifdef __SYCL_DEVICE_ONLY__
295  return detail::spirv::AtomicIAdd(ptr, scope, order, operand);
296 #else
297  (void)scope;
298  return ptr->fetch_add(operand, detail::getStdMemoryOrder(order));
299 #endif
300  }
301 
302  T operator+=(T operand) const noexcept {
303  return fetch_add(operand) + operand;
304  }
305 
306  T operator++(int) const noexcept {
307  // TODO: use AtomicIIncrement as an optimization
308  return fetch_add(1);
309  }
310 
311  T operator++() const noexcept {
312  // TODO: use AtomicIIncrement as an optimization
313  return fetch_add(1) + 1;
314  }
315 
316  T fetch_sub(T operand, memory_order order = default_read_modify_write_order,
317  memory_scope scope = default_scope) const noexcept {
318 #ifdef __SYCL_DEVICE_ONLY__
319  return detail::spirv::AtomicISub(ptr, scope, order, operand);
320 #else
321  (void)scope;
322  return ptr->fetch_sub(operand, detail::getStdMemoryOrder(order));
323 #endif
324  }
325 
326  T operator-=(T operand) const noexcept {
327  return fetch_sub(operand) - operand;
328  }
329 
330  T operator--(int) const noexcept {
331  // TODO: use AtomicIDecrement as an optimization
332  return fetch_sub(1);
333  }
334 
335  T operator--() const noexcept {
336  // TODO: use AtomicIDecrement as an optimization
337  return fetch_sub(1) - 1;
338  }
339 
340  T fetch_and(T operand, memory_order order = default_read_modify_write_order,
341  memory_scope scope = default_scope) const noexcept {
342 #ifdef __SYCL_DEVICE_ONLY__
343  return detail::spirv::AtomicAnd(ptr, scope, order, operand);
344 #else
345  (void)scope;
346  return ptr->fetch_and(operand, detail::getStdMemoryOrder(order));
347 #endif
348  }
349 
350  T operator&=(T operand) const noexcept {
351  return fetch_and(operand) & operand;
352  }
353 
354  T fetch_or(T operand, memory_order order = default_read_modify_write_order,
355  memory_scope scope = default_scope) const noexcept {
356 #ifdef __SYCL_DEVICE_ONLY__
357  return detail::spirv::AtomicOr(ptr, scope, order, operand);
358 #else
359  (void)scope;
360  return ptr->fetch_or(operand, detail::getStdMemoryOrder(order));
361 #endif
362  }
363 
364  T operator|=(T operand) const noexcept { return fetch_or(operand) | operand; }
365 
366  T fetch_xor(T operand, memory_order order = default_read_modify_write_order,
367  memory_scope scope = default_scope) const noexcept {
368 #ifdef __SYCL_DEVICE_ONLY__
369  return detail::spirv::AtomicXor(ptr, scope, order, operand);
370 #else
371  (void)scope;
372  return ptr->fetch_xor(operand, detail::getStdMemoryOrder(order));
373 #endif
374  }
375 
376  T operator^=(T operand) const noexcept {
377  return fetch_xor(operand) ^ operand;
378  }
379 
380  T fetch_min(T operand, memory_order order = default_read_modify_write_order,
381  memory_scope scope = default_scope) const noexcept {
382 #ifdef __SYCL_DEVICE_ONLY__
383  return detail::spirv::AtomicMin(ptr, scope, order, operand);
384 #else
385  auto load_order = detail::getLoadOrder(order);
386  T old = load(load_order, scope);
387  while (operand < old &&
388  !compare_exchange_weak(old, operand, order, scope)) {
389  }
390  return old;
391 #endif
392  }
393 
394  T fetch_max(T operand, memory_order order = default_read_modify_write_order,
395  memory_scope scope = default_scope) const noexcept {
396 #ifdef __SYCL_DEVICE_ONLY__
397  return detail::spirv::AtomicMax(ptr, scope, order, operand);
398 #else
399  auto load_order = detail::getLoadOrder(order);
400  T old = load(load_order, scope);
401  while (operand > old &&
402  !compare_exchange_weak(old, operand, order, scope)) {
403  }
404  return old;
405 #endif
406  }
407 
408 private:
410 };
411 
412 // Partial specialization for floating-point types
413 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
414  access::address_space AddressSpace>
416  T, DefaultOrder, DefaultScope, AddressSpace,
417  typename detail::enable_if_t<std::is_floating_point<T>::value>>
418  : public atomic_ref_base<T, DefaultOrder, DefaultScope, AddressSpace> {
419 
420 public:
421  using value_type = T;
423  static constexpr size_t required_alignment = sizeof(T);
424  static constexpr bool is_always_lock_free =
426  static constexpr memory_order default_read_order =
428  static constexpr memory_order default_write_order =
430  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
431  static constexpr memory_scope default_scope = DefaultScope;
432 
433  using atomic_ref_base<T, DefaultOrder, DefaultScope,
434  AddressSpace>::atomic_ref_base;
436  using atomic_ref_base<T, DefaultOrder, DefaultScope,
437  AddressSpace>::compare_exchange_weak;
439 
440  T fetch_add(T operand, memory_order order = default_read_modify_write_order,
441  memory_scope scope = default_scope) const noexcept {
442 // TODO: Remove the "native atomics" macro check once implemented for all
443 // backends
444 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
445  return detail::spirv::AtomicFAdd(ptr, scope, order, operand);
446 #else
447  auto load_order = detail::getLoadOrder(order);
448  T expected;
449  T desired;
450  do {
451  expected =
452  load(load_order, scope); // performs better with load in CAS loop.
453  desired = expected + operand;
454  } while (!compare_exchange_weak(expected, desired, order, scope));
455  return expected;
456 #endif
457  }
458 
459  T operator+=(T operand) const noexcept {
460  return fetch_add(operand) + operand;
461  }
462 
463  T fetch_sub(T operand, memory_order order = default_read_modify_write_order,
464  memory_scope scope = default_scope) const noexcept {
465 // TODO: Remove the "native atomics" macro check once implemented for all
466 // backends
467 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
468  return detail::spirv::AtomicFAdd(ptr, scope, order, -operand);
469 #else
470  auto load_order = detail::getLoadOrder(order);
471  T expected = load(load_order, scope);
472  T desired;
473  do {
474  desired = expected - operand;
475  } while (!compare_exchange_weak(expected, desired, order, scope));
476  return expected;
477 #endif
478  }
479 
480  T operator-=(T operand) const noexcept {
481  return fetch_sub(operand) - operand;
482  }
483 
484  T fetch_min(T operand, memory_order order = default_read_modify_write_order,
485  memory_scope scope = default_scope) const noexcept {
486 // TODO: Remove the "native atomics" macro check once implemented for all
487 // backends
488 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
489  return detail::spirv::AtomicMin(ptr, scope, order, operand);
490 #else
491  auto load_order = detail::getLoadOrder(order);
492  T old = load(load_order, scope);
493  while (operand < old &&
494  !compare_exchange_weak(old, operand, order, scope)) {
495  }
496  return old;
497 #endif
498  }
499 
500  T fetch_max(T operand, memory_order order = default_read_modify_write_order,
501  memory_scope scope = default_scope) const noexcept {
502 // TODO: Remove the "native atomics" macro check once implemented for all
503 // backends
504 #if defined(__SYCL_DEVICE_ONLY__) && defined(SYCL_USE_NATIVE_FP_ATOMICS)
505  return detail::spirv::AtomicMax(ptr, scope, order, operand);
506 #else
507  auto load_order = detail::getLoadOrder(order);
508  T old = load(load_order, scope);
509  while (operand > old &&
510  !compare_exchange_weak(old, operand, order, scope)) {
511  }
512  return old;
513 #endif
514  }
515 
516 private:
518 };
519 
520 // Partial specialization for pointer types
521 // Arithmetic is emulated because target's representation of T* is unknown
522 // TODO: Find a way to use intptr_t or uintptr_t atomics instead
523 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
524  access::address_space AddressSpace>
525 class atomic_ref_impl<T *, DefaultOrder, DefaultScope, AddressSpace>
526  : public atomic_ref_base<uintptr_t, DefaultOrder, DefaultScope,
527  AddressSpace> {
528 
529 private:
530  using base_type =
532 
533 public:
534  using value_type = T *;
535  using difference_type = ptrdiff_t;
536  static constexpr size_t required_alignment = sizeof(T *);
537  static constexpr bool is_always_lock_free =
539  static constexpr memory_order default_read_order =
541  static constexpr memory_order default_write_order =
543  static constexpr memory_order default_read_modify_write_order = DefaultOrder;
544  static constexpr memory_scope default_scope = DefaultScope;
545 
546  using base_type::is_lock_free;
547 
548  atomic_ref_impl(T *&ref) : base_type(reinterpret_cast<uintptr_t &>(ref)) {}
549 
550  void store(T *operand, memory_order order = default_write_order,
551  memory_scope scope = default_scope) const noexcept {
552  base_type::store(reinterpret_cast<uintptr_t>(operand), order, scope);
553  }
554 
555  T *operator=(T *desired) const noexcept {
556  store(desired);
557  return desired;
558  }
559 
560  T *load(memory_order order = default_read_order,
561  memory_scope scope = default_scope) const noexcept {
562  return reinterpret_cast<T *>(base_type::load(order, scope));
563  }
564 
565  operator T *() const noexcept { return load(); }
566 
567  T *exchange(T *operand, memory_order order = default_read_modify_write_order,
568  memory_scope scope = default_scope) const noexcept {
569  return reinterpret_cast<T *>(base_type::exchange(
570  reinterpret_cast<uintptr_t>(operand), order, scope));
571  }
572 
574  memory_order order = default_read_modify_write_order,
575  memory_scope scope = default_scope) const noexcept {
576  // TODO: Find a way to avoid compare_exchange here
577  auto load_order = detail::getLoadOrder(order);
578  T *expected;
579  T *desired;
580  do {
581  expected = load(load_order, scope);
582  desired = expected + operand;
583  } while (!compare_exchange_weak(expected, desired, order, scope));
584  return expected;
585  }
586 
587  T *operator+=(difference_type operand) const noexcept {
588  return fetch_add(operand) + operand;
589  }
590 
591  T *operator++(int) const noexcept { return fetch_add(difference_type(1)); }
592 
593  T *operator++() const noexcept {
594  return fetch_add(difference_type(1)) + difference_type(1);
595  }
596 
598  memory_order order = default_read_modify_write_order,
599  memory_scope scope = default_scope) const noexcept {
600  // TODO: Find a way to avoid compare_exchange here
601  auto load_order = detail::getLoadOrder(order);
602  T *expected = load(load_order, scope);
603  T *desired;
604  do {
605  desired = expected - operand;
606  } while (!compare_exchange_weak(expected, desired, order, scope));
607  return expected;
608  }
609 
610  T *operator-=(difference_type operand) const noexcept {
611  return fetch_sub(operand) - operand;
612  }
613 
614  T *operator--(int) const noexcept { return fetch_sub(difference_type(1)); }
615 
616  T *operator--() const noexcept {
617  return fetch_sub(difference_type(1)) - difference_type(1);
618  }
619 
620  bool
621  compare_exchange_strong(T *&expected, T *desired, memory_order success,
622  memory_order failure,
623  memory_scope scope = default_scope) const noexcept {
624  return base_type::compare_exchange_strong(
625  reinterpret_cast<uintptr_t &>(expected),
626  reinterpret_cast<uintptr_t>(desired), success, failure, scope);
627  }
628 
629  bool
630  compare_exchange_strong(T *&expected, T *desired,
631  memory_order order = default_read_modify_write_order,
632  memory_scope scope = default_scope) const noexcept {
633  return compare_exchange_strong(expected, desired, order, order, scope);
634  }
635 
636  bool
637  compare_exchange_weak(T *&expected, T *desired, memory_order success,
638  memory_order failure,
639  memory_scope scope = default_scope) const noexcept {
640  return base_type::compare_exchange_weak(
641  reinterpret_cast<uintptr_t &>(expected),
642  reinterpret_cast<uintptr_t>(desired), success, failure, scope);
643  }
644 
645  bool
646  compare_exchange_weak(T *&expected, T *desired,
647  memory_order order = default_read_modify_write_order,
648  memory_scope scope = default_scope) const noexcept {
649  return compare_exchange_weak(expected, desired, order, order, scope);
650  }
651 
652 private:
653  using base_type::ptr;
654 };
655 
656 } // namespace detail
657 
658 template <typename T, memory_order DefaultOrder, memory_scope DefaultScope,
659  access::address_space AddressSpace>
661  : public detail::atomic_ref_impl<T, DefaultOrder, DefaultScope,
662  AddressSpace> {
663 public:
664  using detail::atomic_ref_impl<T, DefaultOrder, DefaultScope,
665  AddressSpace>::atomic_ref_impl;
666  using detail::atomic_ref_impl<T, DefaultOrder, DefaultScope,
667  AddressSpace>::operator=;
668 };
669 
670 } // namespace oneapi
671 } // namespace ext
672 
673 } // namespace sycl
674 } // __SYCL_INLINE_NAMESPACE(cl)
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::store
void store(T *operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:550
cl::sycl::ext::oneapi::detail::atomic_ref_base::compare_exchange_strong
bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:197
cl::sycl::ext::oneapi::detail::atomic_ref_base::operator=
T operator=(T desired) const noexcept
Definition: atomic_ref.hpp:169
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::exchange
T * exchange(T *operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:567
cl::sycl::ext::oneapi::detail::atomic_ref_base::is_lock_free
bool is_lock_free() const noexcept
Definition: atomic_ref.hpp:143
cl::sycl::memory_order
memory_order
Definition: memory_enums.hpp:16
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_add
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:440
T
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::value_type
T * value_type
Definition: atomic_ref.hpp:534
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator++
T operator++(int) const noexcept
Definition: atomic_ref.hpp:306
__SYCL2020_DEPRECATED
#define __SYCL2020_DEPRECATED(message)
Definition: defines_elementary.hpp:56
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator--
T * operator--() const noexcept
Definition: atomic_ref.hpp:616
cl::sycl::detail::atomic_ref_base
Definition: atomic_ref.hpp:116
cl::sycl::detail::IsValidAtomicAddressSpace
Definition: atomic.hpp:47
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::difference_type
ptrdiff_t difference_type
Definition: atomic_ref.hpp:535
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_max
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:394
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::load
T * load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:560
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_max
T fetch_max(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:500
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_sub
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:316
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_min
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:484
cl::sycl::ext::oneapi::detail::atomic_ref_base::load
T load(memory_order order=default_read_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:174
cl::sycl::ext::oneapi::detail::atomic_ref_base::atomic_ref_base
atomic_ref_base(const atomic_ref_base &ref) noexcept
Definition: atomic_ref.hpp:156
cl::sycl::ext::oneapi::detail::atomic_ref_base::compare_exchange_weak
bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:224
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_weak
bool compare_exchange_weak(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:637
cl::sycl::detail::atomic_ref_base::value_type
T value_type
Definition: atomic_ref.hpp:132
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator^=
T operator^=(T operand) const noexcept
Definition: atomic_ref.hpp:376
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_xor
T fetch_xor(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:366
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator+=
T * operator+=(difference_type operand) const noexcept
Definition: atomic_ref.hpp:587
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator--
T operator--() const noexcept
Definition: atomic_ref.hpp:335
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::difference_type
value_type difference_type
Definition: atomic_ref.hpp:422
cl::sycl::ext::oneapi::detail::bit_equal< double >::operator()
bool operator()(const double &lhs, const double &rhs)
Definition: atomic_ref.hpp:107
cl::sycl::ext::oneapi::detail::getLoadOrder
constexpr memory_order getLoadOrder(memory_order order)
Definition: atomic_ref.hpp:75
sycl
Definition: invoke_simd.hpp:68
cl::sycl::ext::oneapi::detail::atomic_ref_base::ptr
std::atomic< T > * ptr
Definition: atomic_ref.hpp:251
bool_constant
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::atomic_ref_impl
atomic_ref_impl(T *&ref)
Definition: atomic_ref.hpp:548
cl::sycl::multi_ptr
Provides constructors for address space qualified and non address space qualified pointers to allow i...
Definition: atomic.hpp:32
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator-=
T operator-=(T operand) const noexcept
Definition: atomic_ref.hpp:326
access.hpp
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_min
T fetch_min(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:380
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator+=
T operator+=(T operand) const noexcept
Definition: atomic_ref.hpp:302
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator++
T * operator++() const noexcept
Definition: atomic_ref.hpp:593
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_strong
bool compare_exchange_strong(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:630
cl::sycl::ext::oneapi::detail::atomic_ref_base::store
void store(T operand, memory_order order=default_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:159
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_strong
bool compare_exchange_strong(T *&expected, T *desired, memory_order success, memory_order failure, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:621
cl::sycl::memory_order::acq_rel
@ acq_rel
cl::sycl::ext::oneapi::detail::memory_scope
cl::sycl::ext::oneapi::memory_scope memory_scope
Definition: atomic_ref.hpp:34
cl::sycl::ext::oneapi::detail::memory_order
cl::sycl::ext::oneapi::memory_order memory_order
Definition: atomic_ref.hpp:33
cl::sycl::memory_scope
memory_scope
Definition: memory_enums.hpp:26
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_or
T fetch_or(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:354
cl::sycl::detail::getStdMemoryOrder
static std::memory_order getStdMemoryOrder(__spv::MemorySemanticsMask::Flag)
Definition: atomic.hpp:81
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_and
T fetch_and(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:340
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator--
T * operator--(int) const noexcept
Definition: atomic_ref.hpp:614
cl::sycl::ext::oneapi::detail::bit_equal< T, typename detail::enable_if_t< std::is_integral< T >::value > >::operator()
bool operator()(const T &lhs, const T &rhs)
Definition: atomic_ref.hpp:95
cl::sycl::detail::bit_equal
Definition: atomic_ref.hpp:90
atomic_enums.hpp
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator&=
T operator&=(T operand) const noexcept
Definition: atomic_ref.hpp:350
multi_ptr.hpp
spirv.hpp
cl::sycl::ext::oneapi::detail::atomic_ref_base::value_type
T value_type
Definition: atomic_ref.hpp:132
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::fetch_add
T fetch_add(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:292
cl
We provide new interfaces for matrix muliply in this patch:
Definition: access.hpp:13
cl::sycl::ext::oneapi::detail::atomic_ref_base::compare_exchange_weak
bool compare_exchange_weak(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:241
cl::sycl::memory_order::seq_cst
@ seq_cst
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::operator-=
T operator-=(T operand) const noexcept
Definition: atomic_ref.hpp:480
cl::sycl::ext::oneapi::detail::atomic_ref_base::compare_exchange_strong
bool compare_exchange_strong(T &expected, T desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:217
cl::sycl::detail::IsValidDefaultOrder
bool_constant< Order==memory_order::relaxed||Order==memory_order::acq_rel||Order==memory_order::seq_cst > IsValidDefaultOrder
Definition: atomic_ref.hpp:55
cl::sycl::detail::IsValidAtomicRefType
Definition: atomic_ref.hpp:32
cl::sycl::atomic_ref
Definition: atomic_ref.hpp:661
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator++
T operator++() const noexcept
Definition: atomic_ref.hpp:311
cl::sycl::memory_order::relaxed
@ relaxed
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator=
T * operator=(T *desired) const noexcept
Definition: atomic_ref.hpp:555
cl::sycl::access::address_space
address_space
Definition: access.hpp:45
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::fetch_sub
T * fetch_sub(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:597
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::difference_type
value_type difference_type
Definition: atomic_ref.hpp:274
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator-=
T * operator-=(difference_type operand) const noexcept
Definition: atomic_ref.hpp:610
atomic.hpp
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::fetch_add
T * fetch_add(difference_type operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:573
cl::sycl::ext::oneapi::detail::bit_equal< float >::operator()
bool operator()(const float &lhs, const float &rhs)
Definition: atomic_ref.hpp:99
std
Definition: accessor.hpp:2616
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator|=
T operator|=(T operand) const noexcept
Definition: atomic_ref.hpp:364
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::operator+=
T operator+=(T operand) const noexcept
Definition: atomic_ref.hpp:459
cl::sycl::detail::memory_order_traits
Definition: atomic_ref.hpp:57
cl::sycl::errc::success
@ success
cl::sycl::detail::atomic_ref_impl
Definition: atomic_ref.hpp:258
cl::sycl::instead
std::uint8_t instead
Definition: aliases.hpp:68
cl::sycl::ext::oneapi::detail::atomic_ref_base::atomic_ref_base
atomic_ref_base(T &ref)
Definition: atomic_ref.hpp:151
cl::sycl::detail
Definition: access.hpp:100
cl::sycl::detail::enable_if_t
typename std::enable_if< B, T >::type enable_if_t
Definition: stl_type_traits.hpp:24
cl::sycl::ext::oneapi::detail::atomic_ref_base::exchange
T exchange(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:186
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_integral< T >::value > >::operator--
T operator--(int) const noexcept
Definition: atomic_ref.hpp:330
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::operator++
T * operator++(int) const noexcept
Definition: atomic_ref.hpp:591
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T *, DefaultOrder, DefaultScope, AddressSpace >::compare_exchange_weak
bool compare_exchange_weak(T *&expected, T *desired, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:646
__SYCL_INLINE_NAMESPACE
#define __SYCL_INLINE_NAMESPACE(X)
Definition: defines_elementary.hpp:12
cl::sycl::ext::oneapi::detail::atomic_ref_impl< T, DefaultOrder, DefaultScope, AddressSpace, typename detail::enable_if_t< std::is_floating_point< T >::value > >::fetch_sub
T fetch_sub(T operand, memory_order order=default_read_modify_write_order, memory_scope scope=default_scope) const noexcept
Definition: atomic_ref.hpp:463