DPC++ Runtime
Runtime libraries for oneAPI DPC++
atomic.hpp
Go to the documentation of this file.
1 //==---------------- atomic.hpp - SYCL atomics -----------------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
11 #include <CL/__spirv/spirv_ops.hpp>
12 #include <sycl/access/access.hpp>
13 #include <sycl/detail/cl.h>
14 #include <sycl/detail/helpers.hpp>
15 #include <sycl/memory_enums.hpp>
16 
17 #ifndef __SYCL_DEVICE_ONLY__
18 #include <atomic>
19 #else
20 #include <cstring>
21 #endif
22 #include <type_traits>
23 
24 #define __SYCL_STATIC_ASSERT_NOT_FLOAT(T) \
25  static_assert(!std::is_same<T, float>::value, \
26  "SYCL atomic function not available for float type")
27 
28 namespace sycl {
30 
31 // Forward declaration
32 template <typename pointerT, access::address_space addressSpace,
33  access::decorated isDecorated>
34 class multi_ptr;
35 
36 namespace detail {
37 
39 
40 template <typename T> struct IsValidAtomicType {
41  static constexpr bool value =
42  (std::is_same<T, int>::value || std::is_same<T, unsigned int>::value ||
43  std::is_same<T, long>::value || std::is_same<T, unsigned long>::value ||
44  std::is_same<T, long long>::value ||
45  std::is_same<T, unsigned long long>::value ||
46  std::is_same<T, float>::value);
47 };
48 
49 template <sycl::access::address_space AS> struct IsValidAtomicAddressSpace {
50  static constexpr bool value =
51  (AS == access::address_space::global_space ||
52  AS == access::address_space::local_space ||
53  AS == access::address_space::ext_intel_global_device_space);
54 };
55 
56 // Type trait to translate a sycl::access::address_space to
57 // a SPIR-V memory scope
58 template <access::address_space AS> struct GetSpirvMemoryScope {};
59 template <> struct GetSpirvMemoryScope<access::address_space::global_space> {
60  static constexpr auto scope = __spv::Scope::Device;
61 };
62 template <>
64  access::address_space::ext_intel_global_device_space> {
65  static constexpr auto scope = __spv::Scope::Device;
66 };
67 template <> struct GetSpirvMemoryScope<access::address_space::local_space> {
68  static constexpr auto scope = __spv::Scope::Workgroup;
69 };
70 
71 } // namespace detail
72 } // __SYCL_INLINE_VER_NAMESPACE(_V1)
73 } // namespace sycl
74 
75 #ifndef __SYCL_DEVICE_ONLY__
76 // host implementation of SYCL atomics
77 namespace sycl {
79 namespace detail {
80 // Translate sycl::memory_order or __spv::MemorySemanticsMask::Flag
81 // into std::memory_order
82 // Only relaxed memory semantics are supported currently
83 static inline std::memory_order
85  return std::memory_order_relaxed;
86 }
87 } // namespace detail
88 } // __SYCL_INLINE_VER_NAMESPACE(_V1)
89 } // namespace sycl
90 
91 // std::atomic version of atomic SPIR-V builtins
92 
93 template <typename T>
94 void __spirv_AtomicStore(std::atomic<T> *Ptr, __spv::Scope::Flag,
96  Ptr->store(V, ::sycl::detail::getStdMemoryOrder(MS));
97 }
98 
99 template <typename T>
100 T __spirv_AtomicLoad(const std::atomic<T> *Ptr, __spv::Scope::Flag,
102  return Ptr->load(::sycl::detail::getStdMemoryOrder(MS));
103 }
104 
105 template <typename T>
108  return Ptr->exchange(V, ::sycl::detail::getStdMemoryOrder(MS));
109 }
110 
111 template <typename T>
112 extern T __spirv_AtomicIAdd(std::atomic<T> *Ptr, __spv::Scope::Flag,
114  return Ptr->fetch_add(V, ::sycl::detail::getStdMemoryOrder(MS));
115 }
116 
117 template <typename T>
118 extern T __spirv_AtomicISub(std::atomic<T> *Ptr, __spv::Scope::Flag,
120  return Ptr->fetch_sub(V, ::sycl::detail::getStdMemoryOrder(MS));
121 }
122 
123 template <typename T>
124 extern T __spirv_AtomicAnd(std::atomic<T> *Ptr, __spv::Scope::Flag,
126  return Ptr->fetch_and(V, ::sycl::detail::getStdMemoryOrder(MS));
127 }
128 
129 template <typename T>
130 extern T __spirv_AtomicOr(std::atomic<T> *Ptr, __spv::Scope::Flag,
132  return Ptr->fetch_or(V, ::sycl::detail::getStdMemoryOrder(MS));
133 }
134 
135 template <typename T>
136 extern T __spirv_AtomicXor(std::atomic<T> *Ptr, __spv::Scope::Flag,
138  return Ptr->fetch_xor(V, ::sycl::detail::getStdMemoryOrder(MS));
139 }
140 
141 template <typename T>
142 extern T __spirv_AtomicMin(std::atomic<T> *Ptr, __spv::Scope::Flag,
145  T Val = Ptr->load(MemoryOrder);
146  while (V < Val) {
147  if (Ptr->compare_exchange_strong(Val, V, MemoryOrder, MemoryOrder))
148  break;
149  Val = Ptr->load(MemoryOrder);
150  }
151  return Val;
152 }
153 
154 template <typename T>
155 extern T __spirv_AtomicMax(std::atomic<T> *Ptr, __spv::Scope::Flag,
158  T Val = Ptr->load(MemoryOrder);
159  while (V > Val) {
160  if (Ptr->compare_exchange_strong(Val, V, MemoryOrder, MemoryOrder))
161  break;
162  Val = Ptr->load(MemoryOrder);
163  }
164  return Val;
165 }
166 
167 #endif // !defined(__SYCL_DEVICE_ONLY__)
168 
169 namespace sycl {
171 
172 template <typename T, access::address_space addressSpace =
173  access::address_space::global_space>
175  "sycl::atomic is deprecated since SYCL 2020") atomic {
176  friend class atomic<T, access::address_space::global_space>;
177  static_assert(detail::IsValidAtomicType<T>::value,
178  "Invalid SYCL atomic type. Valid types are: int, "
179  "unsigned int, long, unsigned long, long long, unsigned "
180  "long long, float");
181  static_assert(detail::IsValidAtomicAddressSpace<addressSpace>::value,
182  "Invalid SYCL atomic address_space. Valid address spaces are: "
183  "global_space, local_space, ext_intel_global_device_space");
184  static constexpr auto SpirvScope =
185  detail::GetSpirvMemoryScope<addressSpace>::scope;
186 
187  template <typename pointerT, access::decorated IsDecorated>
188  static auto
189  GetDecoratedPtr(multi_ptr<pointerT, addressSpace, IsDecorated> ptr) {
190  if constexpr (IsDecorated == access::decorated::legacy)
191  return ptr.get();
192  else
193  return ptr.get_decorated();
194  }
195 
196 public:
197  template <typename pointerT, access::decorated IsDecorated>
198 #ifdef __SYCL_DEVICE_ONLY__
199  atomic(multi_ptr<pointerT, addressSpace, IsDecorated> ptr)
200  : Ptr(GetDecoratedPtr(ptr))
201 #else
202  atomic(multi_ptr<pointerT, addressSpace, IsDecorated> ptr)
203  : Ptr(reinterpret_cast<std::atomic<T> *>(ptr.get()))
204 #endif
205  {
206  static_assert(sizeof(T) == sizeof(pointerT),
207  "T and pointerT must be same size");
208  }
209 
210 #ifdef __ENABLE_USM_ADDR_SPACE__
211  // Create atomic in global_space with one from ext_intel_global_device_space
212  template <access::address_space _Space = addressSpace,
213  typename = typename std::enable_if_t<
214  _Space == addressSpace &&
215  addressSpace == access::address_space::global_space>>
216  atomic(const atomic<T, access::address_space::ext_intel_global_device_space>
217  &RHS) {
218  Ptr = RHS.Ptr;
219  }
220 
221  template <access::address_space _Space = addressSpace,
222  typename = typename std::enable_if_t<
223  _Space == addressSpace &&
224  addressSpace == access::address_space::global_space>>
225  atomic(
226  atomic<T, access::address_space::ext_intel_global_device_space> &&RHS) {
227  Ptr = RHS.Ptr;
228  }
229 #endif // __ENABLE_USM_ADDR_SPACE__
230 
231  void store(T Operand, memory_order Order = memory_order::relaxed) {
232  __spirv_AtomicStore(Ptr, SpirvScope,
233  detail::getSPIRVMemorySemanticsMask(Order), Operand);
234  }
235 
236 #ifdef __SYCL_DEVICE_ONLY__
237  template <typename T2 = T>
238  std::enable_if_t<!std::is_same<cl_float, T2>::value, T>
239  load(memory_order Order = memory_order::relaxed) const {
240  return __spirv_AtomicLoad(Ptr, SpirvScope,
242  }
243  template <typename T2 = T>
244  std::enable_if_t<std::is_same<cl_float, T2>::value, T>
245  load(memory_order Order = memory_order::relaxed) const {
246  auto *TmpPtr = reinterpret_cast<typename multi_ptr<
247  cl_int, addressSpace, access::decorated::yes>::pointer>(Ptr);
248  cl_int TmpVal = __spirv_AtomicLoad(
249  TmpPtr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order));
250  cl_float ResVal = bit_cast<cl_float>(TmpVal);
251  return ResVal;
252  }
253 #else
254  T load(memory_order Order = memory_order::relaxed) const {
255  return __spirv_AtomicLoad(Ptr, SpirvScope,
257  }
258 #endif
259 
260  T exchange(T Operand, memory_order Order = memory_order::relaxed) {
261  return __spirv_AtomicExchange(
262  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
263  }
264 
265  bool
266  compare_exchange_strong(T &Expected, T Desired,
267  memory_order SuccessOrder = memory_order::relaxed,
268  memory_order FailOrder = memory_order::relaxed) {
270 #ifdef __SYCL_DEVICE_ONLY__
271  T Value = __spirv_AtomicCompareExchange(
272  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(SuccessOrder),
273  detail::getSPIRVMemorySemanticsMask(FailOrder), Desired, Expected);
274 
275  if (Value == Expected)
276  return true;
277 
278  Expected = Value;
279  return false;
280 #else
281  return Ptr->compare_exchange_strong(Expected, Desired,
282  detail::getStdMemoryOrder(SuccessOrder),
283  detail::getStdMemoryOrder(FailOrder));
284 #endif
285  }
286 
287  T fetch_add(T Operand, memory_order Order = memory_order::relaxed) {
289  return __spirv_AtomicIAdd(
290  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
291  }
292 
293  T fetch_sub(T Operand, memory_order Order = memory_order::relaxed) {
295  return __spirv_AtomicISub(
296  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
297  }
298 
299  T fetch_and(T Operand, memory_order Order = memory_order::relaxed) {
301  return __spirv_AtomicAnd(
302  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
303  }
304 
305  T fetch_or(T Operand, memory_order Order = memory_order::relaxed) {
307  return __spirv_AtomicOr(
308  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
309  }
310 
311  T fetch_xor(T Operand, memory_order Order = memory_order::relaxed) {
313  return __spirv_AtomicXor(
314  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
315  }
316 
317  T fetch_min(T Operand, memory_order Order = memory_order::relaxed) {
319  return __spirv_AtomicMin(
320  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
321  }
322 
323  T fetch_max(T Operand, memory_order Order = memory_order::relaxed) {
325  return __spirv_AtomicMax(
326  Ptr, SpirvScope, detail::getSPIRVMemorySemanticsMask(Order), Operand);
327  }
328 
329 private:
330 #ifdef __SYCL_DEVICE_ONLY__
331  typename detail::DecoratedType<T, addressSpace>::type *Ptr;
332 #else
333  std::atomic<T> *Ptr;
334 #endif
335 };
336 
337 template <typename T, access::address_space addressSpace>
338 void atomic_store(atomic<T, addressSpace> Object, T Operand,
339  memory_order MemoryOrder = memory_order::relaxed) {
340  Object.store(Operand, MemoryOrder);
341 }
342 
343 template <typename T, access::address_space addressSpace>
344 T atomic_load(atomic<T, addressSpace> Object,
345  memory_order MemoryOrder = memory_order::relaxed) {
346  return Object.load(MemoryOrder);
347 }
348 
349 template <typename T, access::address_space addressSpace>
350 T atomic_exchange(atomic<T, addressSpace> Object, T Operand,
351  memory_order MemoryOrder = memory_order::relaxed) {
352  return Object.exchange(Operand, MemoryOrder);
353 }
354 
355 template <typename T, access::address_space addressSpace>
357  atomic<T, addressSpace> Object, T &Expected, T Desired,
358  memory_order SuccessOrder = memory_order::relaxed,
359  memory_order FailOrder = memory_order::relaxed) {
360  return Object.compare_exchange_strong(Expected, Desired, SuccessOrder,
361  FailOrder);
362 }
363 
364 template <typename T, access::address_space addressSpace>
365 T atomic_fetch_add(atomic<T, addressSpace> Object, T Operand,
366  memory_order MemoryOrder = memory_order::relaxed) {
367  return Object.fetch_add(Operand, MemoryOrder);
368 }
369 
370 template <typename T, access::address_space addressSpace>
371 T atomic_fetch_sub(atomic<T, addressSpace> Object, T Operand,
372  memory_order MemoryOrder = memory_order::relaxed) {
373  return Object.fetch_sub(Operand, MemoryOrder);
374 }
375 
376 template <typename T, access::address_space addressSpace>
377 T atomic_fetch_and(atomic<T, addressSpace> Object, T Operand,
378  memory_order MemoryOrder = memory_order::relaxed) {
379  return Object.fetch_and(Operand, MemoryOrder);
380 }
381 
382 template <typename T, access::address_space addressSpace>
383 T atomic_fetch_or(atomic<T, addressSpace> Object, T Operand,
384  memory_order MemoryOrder = memory_order::relaxed) {
385  return Object.fetch_or(Operand, MemoryOrder);
386 }
387 
388 template <typename T, access::address_space addressSpace>
389 T atomic_fetch_xor(atomic<T, addressSpace> Object, T Operand,
390  memory_order MemoryOrder = memory_order::relaxed) {
391  return Object.fetch_xor(Operand, MemoryOrder);
392 }
393 
394 template <typename T, access::address_space addressSpace>
395 T atomic_fetch_min(atomic<T, addressSpace> Object, T Operand,
396  memory_order MemoryOrder = memory_order::relaxed) {
397  return Object.fetch_min(Operand, MemoryOrder);
398 }
399 
400 template <typename T, access::address_space addressSpace>
401 T atomic_fetch_max(atomic<T, addressSpace> Object, T Operand,
402  memory_order MemoryOrder = memory_order::relaxed) {
403  return Object.fetch_max(Operand, MemoryOrder);
404 }
405 
406 } // __SYCL_INLINE_VER_NAMESPACE(_V1)
407 } // namespace sycl
408 
409 #undef __SYCL_STATIC_ASSERT_NOT_FLOAT
T __spirv_AtomicOr(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:130
void __spirv_AtomicStore(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:94
T __spirv_AtomicExchange(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:106
#define __SYCL_STATIC_ASSERT_NOT_FLOAT(T)
Definition: atomic.hpp:24
T __spirv_AtomicLoad(const std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS)
Definition: atomic.hpp:100
T __spirv_AtomicMax(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:155
T __spirv_AtomicIAdd(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:112
T __spirv_AtomicAnd(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:124
T __spirv_AtomicXor(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:136
T __spirv_AtomicISub(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:118
T __spirv_AtomicMin(std::atomic< T > *Ptr, __spv::Scope::Flag, __spv::MemorySemanticsMask::Flag MS, T V)
Definition: atomic.hpp:142
#define __SYCL_INLINE_VER_NAMESPACE(X)
#define __SYCL2020_DEPRECATED(message)
constexpr tuple_element< I, tuple< Types... > >::type & get(sycl::detail::tuple< Types... > &Arg) noexcept
Definition: tuple.hpp:199
sycl::memory_order memory_order
Definition: atomic.hpp:38
constexpr __spv::MemorySemanticsMask::Flag getSPIRVMemorySemanticsMask(memory_order)
Definition: helpers.hpp:197
static std::memory_order getStdMemoryOrder(__spv::MemorySemanticsMask::Flag)
Definition: atomic.hpp:84
T atomic_fetch_xor(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:389
T atomic_load(atomic< T, addressSpace > Object, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:344
T atomic_exchange(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:350
T atomic_fetch_and(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:377
bool atomic_compare_exchange_strong(atomic< T, addressSpace > Object, T &Expected, T Desired, memory_order SuccessOrder=memory_order::relaxed, memory_order FailOrder=memory_order::relaxed)
Definition: atomic.hpp:356
T atomic_fetch_min(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:395
std::int32_t cl_int
Definition: aliases.hpp:83
T atomic_fetch_add(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:365
float cl_float
Definition: aliases.hpp:88
T atomic_fetch_sub(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:371
T atomic_fetch_max(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:401
void atomic_store(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:338
T atomic_fetch_or(atomic< T, addressSpace > Object, T Operand, memory_order MemoryOrder=memory_order::relaxed)
Definition: atomic.hpp:383
---— Error handling, matching OpenCL plugin semantics.
Definition: access.hpp:14