DPC++ Runtime
Runtime libraries for oneAPI DPC++
atomic.hpp
Go to the documentation of this file.
1 //==---------------- atomic.hpp - SYCL atomics -----------------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
11 #include <CL/__spirv/spirv_types.hpp> // for Scope, MemorySemanticsMask
12 #include <sycl/access/access.hpp> // for address_space, decorated
13 #include <sycl/detail/defines_elementary.hpp> // for __SYCL2020_DEPRECATED
14 #include <sycl/detail/helpers.hpp> // for getSPIRVMemorySemanticsMask
15 #include <sycl/memory_enums.hpp> // for memory_order, getStdMemoryO...
16 #include <sycl/multi_ptr.hpp> // for multi_ptr
17 
18 #include <type_traits> // for is_same
19 
20 #ifndef __SYCL_DEVICE_ONLY__
21 #include <atomic> // for atomic, memory_order
22 #endif
23 
24 #define __SYCL_STATIC_ASSERT_NOT_FLOAT(T) \
25  static_assert(!std::is_same<T, float>::value, \
26  "SYCL atomic function not available for float type")
27 
28 namespace sycl {
29 inline namespace _V1 {
30 
31 // Forward declaration
32 template <typename pointerT, access::address_space addressSpace,
33  access::decorated isDecorated>
34 class multi_ptr;
35 
36 namespace detail {
37 
39 
40 template <typename T> struct IsValidAtomicType {
41  static constexpr bool value =
42  (std::is_same_v<T, int> || std::is_same_v<T, unsigned int> ||
43  std::is_same_v<T, long> || std::is_same_v<T, unsigned long> ||
44  std::is_same_v<T, long long> || std::is_same_v<T, unsigned long long> ||
45  std::is_same_v<T, float>);
46 };
47 
48 template <sycl::access::address_space AS> struct IsValidAtomicAddressSpace {
49  static constexpr bool value =
53 };
54 
55 // Type trait to translate a sycl::access::address_space to
56 // a SPIR-V memory scope
57 template <access::address_space AS> struct GetSpirvMemoryScope {};
58 template <> struct GetSpirvMemoryScope<access::address_space::global_space> {
59  static constexpr auto scope = __spv::Scope::Device;
60 };
61 template <>
63  access::address_space::ext_intel_global_device_space> {
64  static constexpr auto scope = __spv::Scope::Device;
65 };
66 template <> struct GetSpirvMemoryScope<access::address_space::local_space> {
67  static constexpr auto scope = __spv::Scope::Workgroup;
68 };
69 
70 } // namespace detail
71 } // namespace _V1
72 } // namespace sycl
73 
74 #ifndef __SYCL_DEVICE_ONLY__
75 // host implementation of SYCL atomics
76 namespace sycl {
77 inline namespace _V1 {
78 namespace detail {
79 // Translate sycl::memory_order or __spv::MemorySemanticsMask::Flag
80 // into std::memory_order
81 // Only relaxed memory semantics are supported currently
84 }
85 } // namespace detail
86 } // namespace _V1
87 } // namespace sycl
88 
89 // std::atomic version of atomic SPIR-V builtins
90 
91 template <typename T>
92 void __spirv_AtomicStore(std::atomic<T> *Ptr, __spv::Scope::Flag,
94  Ptr->store(V, ::sycl::detail::getStdMemoryOrder(MS));
95 }
96 
97 template <typename T>
98 T __spirv_AtomicLoad(const std::atomic<T> *Ptr, __spv::Scope::Flag,
100  return Ptr->load(::sycl::detail::getStdMemoryOrder(MS));
101 }
102 
103 template <typename T>
106  return Ptr->exchange(V, ::sycl::detail::getStdMemoryOrder(MS));
107 }
108 
109 template <typename T>
110 extern T __spirv_AtomicIAdd(std::atomic<T> *Ptr, __spv::Scope::Flag,
112  return Ptr->fetch_add(V, ::sycl::detail::getStdMemoryOrder(MS));
113 }
114 
115 template <typename T>
116 extern T __spirv_AtomicISub(std::atomic<T> *Ptr, __spv::Scope::Flag,
118  return Ptr->fetch_sub(V, ::sycl::detail::getStdMemoryOrder(MS));
119 }
120 
121 template <typename T>
122 extern T __spirv_AtomicAnd(std::atomic<T> *Ptr, __spv::Scope::Flag,
124  return Ptr->fetch_and(V, ::sycl::detail::getStdMemoryOrder(MS));
125 }
126 
127 template <typename T>
128 extern T __spirv_AtomicOr(std::atomic<T> *Ptr, __spv::Scope::Flag,
130  return Ptr->fetch_or(V, ::sycl::detail::getStdMemoryOrder(MS));
131 }
132 
133 template <typename T>
134 extern T __spirv_AtomicXor(std::atomic<T> *Ptr, __spv::Scope::Flag,
136  return Ptr->fetch_xor(V, ::sycl::detail::getStdMemoryOrder(MS));
137 }
138 
139 template <typename T>
140 extern T __spirv_AtomicMin(std::atomic<T> *Ptr, __spv::Scope::Flag,
143  T Val = Ptr->load(MemoryOrder);
144  while (V < Val) {
145  if (Ptr->compare_exchange_strong(Val, V, MemoryOrder, MemoryOrder))
146  break;
147  Val = Ptr->load(MemoryOrder);
148  }
149  return Val;
150 }
151 
152 template <typename T>
153 extern T __spirv_AtomicMax(std::atomic<T> *Ptr, __spv::Scope::Flag,
156  T Val = Ptr->load(MemoryOrder);
157  while (V > Val) {
158  if (Ptr->compare_exchange_strong(Val, V, MemoryOrder, MemoryOrder))
159  break;
160  Val = Ptr->load(MemoryOrder);
161  }
162  return Val;
163 }
164 
165 #endif // !defined(__SYCL_DEVICE_ONLY__)
166 
167 namespace sycl {
168 inline namespace _V1 {
169 
170 template <typename T, access::address_space addressSpace =
173  "sycl::atomic is deprecated since SYCL 2020") atomic {
174  friend class atomic<T, access::address_space::global_space>;
175  static_assert(detail::IsValidAtomicType<T>::value,
176  "Invalid SYCL atomic type. Valid types are: int, "
177  "unsigned int, long, unsigned long, long long, unsigned "
178  "long long, float");
179  static_assert(detail::IsValidAtomicAddressSpace<addressSpace>::value,
180  "Invalid SYCL atomic address_space. Valid address spaces are: "
181  "global_space, local_space, ext_intel_global_device_space");
182  static constexpr auto SpirvScope =
183  detail::GetSpirvMemoryScope<addressSpace>::scope;
184 
185  template <typename pointerT, access::decorated IsDecorated>
186  static auto
187  GetDecoratedPtr(multi_ptr<pointerT, addressSpace, IsDecorated> ptr) {
188  if constexpr (IsDecorated == access::decorated::legacy)
189  return ptr.get();
190  else
191  return ptr.get_decorated();
192  }
193 
194 public:
195  template <typename pointerT, access::decorated IsDecorated>
196 #ifdef __SYCL_DEVICE_ONLY__
197  atomic(multi_ptr<pointerT, addressSpace, IsDecorated> ptr)
198  : Ptr(GetDecoratedPtr(ptr))
199 #else
200  atomic(multi_ptr<pointerT, addressSpace, IsDecorated> ptr)
201  : Ptr(reinterpret_cast<std::atomic<T> *>(ptr.get()))
202 #endif
203  {
204  static_assert(sizeof(T) == sizeof(pointerT),
205  "T and pointerT must be same size");
206  }
207 
208 #ifdef __ENABLE_USM_ADDR_SPACE__
209  // Create atomic in global_space with one from ext_intel_global_device_space
210  template <access::address_space _Space = addressSpace,
211  typename = typename std::enable_if_t<
212  _Space == addressSpace &&
213  addressSpace == access::address_space::global_space>>
214  atomic(const atomic<T, access::address_space::ext_intel_global_device_space>
215  &RHS) {
216  Ptr = RHS.Ptr;
217  }
218 
219  template <access::address_space _Space = addressSpace,
220  typename = typename std::enable_if_t<
221  _Space == addressSpace &&
222  addressSpace == access::address_space::global_space>>
223  atomic(
224  atomic<T, access::address_space::ext_intel_global_device_space> &&RHS) {
225  Ptr = RHS.Ptr;
226  }
227 #endif // __ENABLE_USM_ADDR_SPACE__
228 
229  void store(T Operand, memory_order Order = memory_order::relaxed) {
230  __spirv_AtomicStore(Ptr, SpirvScope,
231  detail::getSPIRVMemorySemanticsMask(Order), Operand);
232  }
233 
234 #ifdef __SYCL_DEVICE_ONLY__
235  template <typename T2 = T>
236  std::enable_if_t<!std::is_same<cl_float, T2>::value, T>
237  load(memory_order Order = memory_order::relaxed) const {
238  return __spirv_AtomicLoad(Ptr, SpirvScope,
240  }
241  template <typename T2 = T>
242  std::enable_if_t<std::is_same<cl_float, T2>::value, T>
243  load(memory_order Order = memory_order::relaxed) const {
244  auto *TmpPtr = reinterpret_cast<typename multi_ptr<
245  cl_int, addressSpace, access::decorated::yes>::pointer>(Ptr);
246  cl_int TmpVal = __spirv_AtomicLoad(
247  TmpPtr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order));
248  cl_float ResVal = sycl::bit_cast<cl_float>(TmpVal);
249  return ResVal;
250  }
251 #else
252  T load(memory_order Order = memory_order::relaxed) const {
253  return __spirv_AtomicLoad(Ptr, SpirvScope,
255  }
256 #endif
257 
258  T exchange(T Operand, memory_order Order = memory_order::relaxed) {
259  return __spirv_AtomicExchange(
260  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
261  }
262 
263  bool
264  compare_exchange_strong(T &Expected, T Desired,
265  memory_order SuccessOrder = memory_order::relaxed,
266  memory_order FailOrder = memory_order::relaxed) {
268 #ifdef __SYCL_DEVICE_ONLY__
269  T Value = __spirv_AtomicCompareExchange(
270  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(SuccessOrder),
271  detail::getSPIRVMemorySemanticsMask(FailOrder), Desired, Expected);
272 
273  if (Value == Expected)
274  return true;
275 
276  Expected = Value;
277  return false;
278 #else
279  return Ptr->compare_exchange_strong(Expected, Desired,
280  detail::getStdMemoryOrder(SuccessOrder),
281  detail::getStdMemoryOrder(FailOrder));
282 #endif
283  }
284 
285  T fetch_add(T Operand, memory_order Order = memory_order::relaxed) {
287  return __spirv_AtomicIAdd(
288  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
289  }
290 
291  T fetch_sub(T Operand, memory_order Order = memory_order::relaxed) {
293  return __spirv_AtomicISub(
294  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
295  }
296 
297  T fetch_and(T Operand, memory_order Order = memory_order::relaxed) {
299  return __spirv_AtomicAnd(
300  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
301  }
302 
303  T fetch_or(T Operand, memory_order Order = memory_order::relaxed) {
305  return __spirv_AtomicOr(
306  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
307  }
308 
309  T fetch_xor(T Operand, memory_order Order = memory_order::relaxed) {
311  return __spirv_AtomicXor(
312  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
313  }
314 
315  T fetch_min(T Operand, memory_order Order = memory_order::relaxed) {
317  return __spirv_AtomicMin(
318  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
319  }
320 
321  T fetch_max(T Operand, memory_order Order = memory_order::relaxed) {
323  return __spirv_AtomicMax(
324  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
325  }
326 
327 private:
328 #ifdef __SYCL_DEVICE_ONLY__
329  typename detail::DecoratedType<T, addressSpace>::type *Ptr;
330 #else
331  std::atomic<T> *Ptr;
332 #endif
333 };
334 
335 template <typename T, access::address_space addressSpace>
336 void atomic_store(atomic<T, addressSpace> Object, T Operand,
337  memory_order MemoryOrder = memory_order::relaxed) {
338  Object.store(Operand, MemoryOrder);
339 }
340 
341 template <typename T, access::address_space addressSpace>
342 T atomic_load(atomic<T, addressSpace> Object,
343  memory_order MemoryOrder = memory_order::relaxed) {
344  return Object.load(MemoryOrder);
345 }
346 
347 template <typename T, access::address_space addressSpace>
348 T atomic_exchange(atomic<T, addressSpace> Object, T Operand,
349  memory_order MemoryOrder = memory_order::relaxed) {
350  return Object.exchange(Operand, MemoryOrder);
351 }
352 
353 template <typename T, access::address_space addressSpace>
355  atomic<T, addressSpace> Object, T &Expected, T Desired,
356  memory_order SuccessOrder = memory_order::relaxed,
357  memory_order FailOrder = memory_order::relaxed) {
358  return Object.compare_exchange_strong(Expected, Desired, SuccessOrder,
359  FailOrder);
360 }
361 
362 template <typename T, access::address_space addressSpace>
363 T atomic_fetch_add(atomic<T, addressSpace> Object, T Operand,
364  memory_order MemoryOrder = memory_order::relaxed) {
365  return Object.fetch_add(Operand, MemoryOrder);
366 }
367 
368 template <typename T, access::address_space addressSpace>
369 T atomic_fetch_sub(atomic<T, addressSpace> Object, T Operand,
370  memory_order MemoryOrder = memory_order::relaxed) {
371  return Object.fetch_sub(Operand, MemoryOrder);
372 }
373 
374 template <typename T, access::address_space addressSpace>
375 T atomic_fetch_and(atomic<T, addressSpace> Object, T Operand,
376  memory_order MemoryOrder = memory_order::relaxed) {
377  return Object.fetch_and(Operand, MemoryOrder);
378 }
379 
380 template <typename T, access::address_space addressSpace>
381 T atomic_fetch_or(atomic<T, addressSpace> Object, T Operand,
382  memory_order MemoryOrder = memory_order::relaxed) {
383  return Object.fetch_or(Operand, MemoryOrder);
384 }
385 
386 template <typename T, access::address_space addressSpace>
387 T atomic_fetch_xor(atomic<T, addressSpace> Object, T Operand,
388  memory_order MemoryOrder = memory_order::relaxed) {
389  return Object.fetch_xor(Operand, MemoryOrder);
390 }
391 
392 template <typename T, access::address_space addressSpace>
393 T atomic_fetch_min(atomic<T, addressSpace> Object, T Operand,
394  memory_order MemoryOrder = memory_order::relaxed) {
395  return Object.fetch_min(Operand, MemoryOrder);
396 }
397 
398 template <typename T, access::address_space addressSpace>
399 T atomic_fetch_max(atomic<T, addressSpace> Object, T Operand,
400  memory_order MemoryOrder = memory_order::relaxed) {
401  return Object.fetch_max(Operand, MemoryOrder);
402 }
403 
404 } // namespace _V1
405 } // namespace sycl
406 
407 #undef __SYCL_STATIC_ASSERT_NOT_FLOAT
sycl::memory_order memory_order
Definition: atomic.hpp:38
constexpr __spv::MemorySemanticsMask::Flag getSPIRVMemorySemanticsMask(memory_order)
Definition: helpers.hpp:198
std::memory_order getStdMemoryOrder(__spv::MemorySemanticsMask::Flag)
Definition: atomic.hpp:82
std::int32_t cl_int
Definition: aliases.hpp:134
T atomic_fetch_xor(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:387
constexpr auto memory_order_relaxed
T atomic_load(atomic< T, addressSpace > Object, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:342
T atomic_exchange(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:348
T atomic_fetch_and(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:375
bool atomic_compare_exchange_strong(atomic< T, addressSpace > Object, T &Expected, T Desired, memory_order SuccessOrder=memory_order::relaxed, memory_order FailOrder=memory_order::relaxed)
Definition: atomic.hpp:354
signed char __SYCL2020_DEPRECATED
Definition: aliases.hpp:94
T atomic_fetch_min(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:393
pointer get() const
Definition: multi_ptr.hpp:544
T atomic_fetch_add(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:363
std::conditional_t< is_decorated, decorated_type *, std::add_pointer_t< value_type > > pointer
Definition: multi_ptr.hpp:459
T atomic_fetch_sub(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:369
T atomic_fetch_max(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:399
void atomic_store(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:336
T atomic_fetch_or(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:381
Definition: access.hpp:18
static constexpr bool value
Definition: atomic.hpp:41
T __spirv_AtomicOr(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:128
void __spirv_AtomicStore(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:92
T __spirv_AtomicExchange(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:104
#define __SYCL_STATIC_ASSERT_NOT_FLOAT(T)
Definition: atomic.hpp:24
T __spirv_AtomicLoad(const std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS)
Definition: atomic.hpp:98
T __spirv_AtomicMax(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:153
T __spirv_AtomicIAdd(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:110
T __spirv_AtomicAnd(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:122
T __spirv_AtomicXor(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:134
T __spirv_AtomicISub(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:116
T __spirv_AtomicMin(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:140