DPC++ Runtime
Runtime libraries for oneAPI DPC++
device_architecture.hpp
Go to the documentation of this file.
1 //===- device_architecture.hpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
11 namespace sycl {
12 inline namespace _V1 {
13 namespace ext::oneapi::experimental {
14 
15 enum class architecture {
16  // If new element is added to this enum:
17  //
18  // Update
19  // - sycl_ext_oneapi_device_architecture specification doc
20  // - "-fsycl-targets" description in sycl/doc/UsersManual.md
21  //
22  // Add
23  // - __SYCL_TARGET_<ARCH>__ to the compiler driver and to all places below
24  // - the unique ID of the new architecture in SYCL RT source code to support
25  // querying the device architecture
26  //
27  x86_64,
58  // NVIDIA architectures
73  // AMD architectures
112  // Update "detail::max_architecture" below if you add new elements here!
125 };
126 
127 } // namespace ext::oneapi::experimental
128 
129 namespace detail {
130 
133 
134 #ifndef __SYCL_TARGET_INTEL_X86_64__
135 #define __SYCL_TARGET_INTEL_X86_64__ 0
136 #endif
137 #ifndef __SYCL_TARGET_INTEL_GPU_BDW__
138 #define __SYCL_TARGET_INTEL_GPU_BDW__ 0
139 #endif
140 #ifndef __SYCL_TARGET_INTEL_GPU_SKL__
141 #define __SYCL_TARGET_INTEL_GPU_SKL__ 0
142 #endif
143 #ifndef __SYCL_TARGET_INTEL_GPU_KBL__
144 #define __SYCL_TARGET_INTEL_GPU_KBL__ 0
145 #endif
146 #ifndef __SYCL_TARGET_INTEL_GPU_CFL__
147 #define __SYCL_TARGET_INTEL_GPU_CFL__ 0
148 #endif
149 #ifndef __SYCL_TARGET_INTEL_GPU_APL__
150 #define __SYCL_TARGET_INTEL_GPU_APL__ 0
151 #endif
152 #ifndef __SYCL_TARGET_INTEL_GPU_GLK__
153 #define __SYCL_TARGET_INTEL_GPU_GLK__ 0
154 #endif
155 #ifndef __SYCL_TARGET_INTEL_GPU_WHL__
156 #define __SYCL_TARGET_INTEL_GPU_WHL__ 0
157 #endif
158 #ifndef __SYCL_TARGET_INTEL_GPU_AML__
159 #define __SYCL_TARGET_INTEL_GPU_AML__ 0
160 #endif
161 #ifndef __SYCL_TARGET_INTEL_GPU_CML__
162 #define __SYCL_TARGET_INTEL_GPU_CML__ 0
163 #endif
164 #ifndef __SYCL_TARGET_INTEL_GPU_ICLLP__
165 #define __SYCL_TARGET_INTEL_GPU_ICLLP__ 0
166 #endif
167 #ifndef __SYCL_TARGET_INTEL_GPU_EHL__
168 #define __SYCL_TARGET_INTEL_GPU_EHL__ 0
169 #endif
170 #ifndef __SYCL_TARGET_INTEL_GPU_TGLLP__
171 #define __SYCL_TARGET_INTEL_GPU_TGLLP__ 0
172 #endif
173 #ifndef __SYCL_TARGET_INTEL_GPU_RKL__
174 #define __SYCL_TARGET_INTEL_GPU_RKL__ 0
175 #endif
176 #ifndef __SYCL_TARGET_INTEL_GPU_ADL_S__
177 #define __SYCL_TARGET_INTEL_GPU_ADL_S__ 0
178 #endif
179 #ifndef __SYCL_TARGET_INTEL_GPU_ADL_P__
180 #define __SYCL_TARGET_INTEL_GPU_ADL_P__ 0
181 #endif
182 #ifndef __SYCL_TARGET_INTEL_GPU_ADL_N__
183 #define __SYCL_TARGET_INTEL_GPU_ADL_N__ 0
184 #endif
185 #ifndef __SYCL_TARGET_INTEL_GPU_DG1__
186 #define __SYCL_TARGET_INTEL_GPU_DG1__ 0
187 #endif
188 #ifndef __SYCL_TARGET_INTEL_GPU_ACM_G10__
189 #define __SYCL_TARGET_INTEL_GPU_ACM_G10__ 0
190 #endif
191 #ifndef __SYCL_TARGET_INTEL_GPU_ACM_G11__
192 #define __SYCL_TARGET_INTEL_GPU_ACM_G11__ 0
193 #endif
194 #ifndef __SYCL_TARGET_INTEL_GPU_ACM_G12__
195 #define __SYCL_TARGET_INTEL_GPU_ACM_G12__ 0
196 #endif
197 #ifndef __SYCL_TARGET_INTEL_GPU_PVC__
198 #define __SYCL_TARGET_INTEL_GPU_PVC__ 0
199 #endif
200 #ifndef __SYCL_TARGET_INTEL_GPU_PVC_VG__
201 #define __SYCL_TARGET_INTEL_GPU_PVC_VG__ 0
202 #endif
203 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM50__
204 #define __SYCL_TARGET_NVIDIA_GPU_SM50__ 0
205 #endif
206 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM52__
207 #define __SYCL_TARGET_NVIDIA_GPU_SM52__ 0
208 #endif
209 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM53__
210 #define __SYCL_TARGET_NVIDIA_GPU_SM53__ 0
211 #endif
212 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM60__
213 #define __SYCL_TARGET_NVIDIA_GPU_SM60__ 0
214 #endif
215 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM61__
216 #define __SYCL_TARGET_NVIDIA_GPU_SM61__ 0
217 #endif
218 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM62__
219 #define __SYCL_TARGET_NVIDIA_GPU_SM62__ 0
220 #endif
221 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM70__
222 #define __SYCL_TARGET_NVIDIA_GPU_SM70__ 0
223 #endif
224 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM72__
225 #define __SYCL_TARGET_NVIDIA_GPU_SM72__ 0
226 #endif
227 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM75__
228 #define __SYCL_TARGET_NVIDIA_GPU_SM75__ 0
229 #endif
230 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM80__
231 #define __SYCL_TARGET_NVIDIA_GPU_SM80__ 0
232 #endif
233 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM86__
234 #define __SYCL_TARGET_NVIDIA_GPU_SM86__ 0
235 #endif
236 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM87__
237 #define __SYCL_TARGET_NVIDIA_GPU_SM87__ 0
238 #endif
239 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM89__
240 #define __SYCL_TARGET_NVIDIA_GPU_SM89__ 0
241 #endif
242 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM90__
243 #define __SYCL_TARGET_NVIDIA_GPU_SM90__ 0
244 #endif
245 #ifndef __SYCL_TARGET_AMD_GPU_GFX700__
246 #define __SYCL_TARGET_AMD_GPU_GFX700__ 0
247 #endif
248 #ifndef __SYCL_TARGET_AMD_GPU_GFX701__
249 #define __SYCL_TARGET_AMD_GPU_GFX701__ 0
250 #endif
251 #ifndef __SYCL_TARGET_AMD_GPU_GFX702__
252 #define __SYCL_TARGET_AMD_GPU_GFX702__ 0
253 #endif
254 #ifndef __SYCL_TARGET_AMD_GPU_GFX801__
255 #define __SYCL_TARGET_AMD_GPU_GFX801__ 0
256 #endif
257 #ifndef __SYCL_TARGET_AMD_GPU_GFX802__
258 #define __SYCL_TARGET_AMD_GPU_GFX802__ 0
259 #endif
260 #ifndef __SYCL_TARGET_AMD_GPU_GFX803__
261 #define __SYCL_TARGET_AMD_GPU_GFX803__ 0
262 #endif
263 #ifndef __SYCL_TARGET_AMD_GPU_GFX805__
264 #define __SYCL_TARGET_AMD_GPU_GFX805__ 0
265 #endif
266 #ifndef __SYCL_TARGET_AMD_GPU_GFX810__
267 #define __SYCL_TARGET_AMD_GPU_GFX810__ 0
268 #endif
269 #ifndef __SYCL_TARGET_AMD_GPU_GFX900__
270 #define __SYCL_TARGET_AMD_GPU_GFX900__ 0
271 #endif
272 #ifndef __SYCL_TARGET_AMD_GPU_GFX902__
273 #define __SYCL_TARGET_AMD_GPU_GFX902__ 0
274 #endif
275 #ifndef __SYCL_TARGET_AMD_GPU_GFX904__
276 #define __SYCL_TARGET_AMD_GPU_GFX904__ 0
277 #endif
278 #ifndef __SYCL_TARGET_AMD_GPU_GFX906__
279 #define __SYCL_TARGET_AMD_GPU_GFX906__ 0
280 #endif
281 #ifndef __SYCL_TARGET_AMD_GPU_GFX908__
282 #define __SYCL_TARGET_AMD_GPU_GFX908__ 0
283 #endif
284 #ifndef __SYCL_TARGET_AMD_GPU_GFX909__
285 #define __SYCL_TARGET_AMD_GPU_GFX909__ 0
286 #endif
287 #ifndef __SYCL_TARGET_AMD_GPU_GFX90A__
288 #define __SYCL_TARGET_AMD_GPU_GFX90A__ 0
289 #endif
290 #ifndef __SYCL_TARGET_AMD_GPU_GFX90C__
291 #define __SYCL_TARGET_AMD_GPU_GFX90C__ 0
292 #endif
293 #ifndef __SYCL_TARGET_AMD_GPU_GFX940__
294 #define __SYCL_TARGET_AMD_GPU_GFX940__ 0
295 #endif
296 #ifndef __SYCL_TARGET_AMD_GPU_GFX941__
297 #define __SYCL_TARGET_AMD_GPU_GFX941__ 0
298 #endif
299 #ifndef __SYCL_TARGET_AMD_GPU_GFX942__
300 #define __SYCL_TARGET_AMD_GPU_GFX942__ 0
301 #endif
302 #ifndef __SYCL_TARGET_AMD_GPU_GFX1010__
303 #define __SYCL_TARGET_AMD_GPU_GFX1010__ 0
304 #endif
305 #ifndef __SYCL_TARGET_AMD_GPU_GFX1011__
306 #define __SYCL_TARGET_AMD_GPU_GFX1011__ 0
307 #endif
308 #ifndef __SYCL_TARGET_AMD_GPU_GFX1012__
309 #define __SYCL_TARGET_AMD_GPU_GFX1012__ 0
310 #endif
311 #ifndef __SYCL_TARGET_AMD_GPU_GFX1013__
312 #define __SYCL_TARGET_AMD_GPU_GFX1013__ 0
313 #endif
314 #ifndef __SYCL_TARGET_AMD_GPU_GFX1030__
315 #define __SYCL_TARGET_AMD_GPU_GFX1030__ 0
316 #endif
317 #ifndef __SYCL_TARGET_AMD_GPU_GFX1031__
318 #define __SYCL_TARGET_AMD_GPU_GFX1031__ 0
319 #endif
320 #ifndef __SYCL_TARGET_AMD_GPU_GFX1032__
321 #define __SYCL_TARGET_AMD_GPU_GFX1032__ 0
322 #endif
323 #ifndef __SYCL_TARGET_AMD_GPU_GFX1033__
324 #define __SYCL_TARGET_AMD_GPU_GFX1033__ 0
325 #endif
326 #ifndef __SYCL_TARGET_AMD_GPU_GFX1034__
327 #define __SYCL_TARGET_AMD_GPU_GFX1034__ 0
328 #endif
329 #ifndef __SYCL_TARGET_AMD_GPU_GFX1035__
330 #define __SYCL_TARGET_AMD_GPU_GFX1035__ 0
331 #endif
332 #ifndef __SYCL_TARGET_AMD_GPU_GFX1036__
333 #define __SYCL_TARGET_AMD_GPU_GFX1036__ 0
334 #endif
335 #ifndef __SYCL_TARGET_AMD_GPU_GFX1100__
336 #define __SYCL_TARGET_AMD_GPU_GFX1100__ 0
337 #endif
338 #ifndef __SYCL_TARGET_AMD_GPU_GFX1101__
339 #define __SYCL_TARGET_AMD_GPU_GFX1101__ 0
340 #endif
341 #ifndef __SYCL_TARGET_AMD_GPU_GFX1102__
342 #define __SYCL_TARGET_AMD_GPU_GFX1102__ 0
343 #endif
344 #ifndef __SYCL_TARGET_AMD_GPU_GFX1103__
345 #define __SYCL_TARGET_AMD_GPU_GFX1103__ 0
346 #endif
347 #ifndef __SYCL_TARGET_AMD_GPU_GFX1150__
348 #define __SYCL_TARGET_AMD_GPU_GFX1150__ 0
349 #endif
350 #ifndef __SYCL_TARGET_AMD_GPU_GFX1151__
351 #define __SYCL_TARGET_AMD_GPU_GFX1151__ 0
352 #endif
353 #ifndef __SYCL_TARGET_AMD_GPU_GFX1200__
354 #define __SYCL_TARGET_AMD_GPU_GFX1200__ 0
355 #endif
356 #ifndef __SYCL_TARGET_AMD_GPU_GFX1201__
357 #define __SYCL_TARGET_AMD_GPU_GFX1201__ 0
358 #endif
359 
360 // This is true when the translation unit is compiled in AOT mode with target
361 // names that supports the "if_architecture_is" features. If an unsupported
362 // target name is specified via "-fsycl-targets", the associated invocation of
363 // the device compiler will set this variable to false, and that will trigger
364 // an error for code that uses "if_architecture_is".
365 static constexpr bool is_allowable_aot_mode =
441 
443  // Allocate an array of size == size of
444  // ext::oneapi::experimental::architecture enum.
445  bool arr[static_cast<int>(max_architecture) + 1];
446 
448 
449  constexpr IsAOTForArchitectureClass() : arr() {
450  arr[static_cast<int>(arch::x86_64)] = __SYCL_TARGET_INTEL_X86_64__ == 1;
451  arr[static_cast<int>(arch::intel_gpu_bdw)] =
453  arr[static_cast<int>(arch::intel_gpu_skl)] =
455  arr[static_cast<int>(arch::intel_gpu_kbl)] =
457  arr[static_cast<int>(arch::intel_gpu_cfl)] =
459  arr[static_cast<int>(arch::intel_gpu_apl)] =
461  arr[static_cast<int>(arch::intel_gpu_glk)] =
463  arr[static_cast<int>(arch::intel_gpu_whl)] =
465  arr[static_cast<int>(arch::intel_gpu_aml)] =
467  arr[static_cast<int>(arch::intel_gpu_cml)] =
469  arr[static_cast<int>(arch::intel_gpu_icllp)] =
471  arr[static_cast<int>(arch::intel_gpu_ehl)] =
473  arr[static_cast<int>(arch::intel_gpu_tgllp)] =
475  arr[static_cast<int>(arch::intel_gpu_rkl)] =
477  arr[static_cast<int>(arch::intel_gpu_adl_s)] =
479  arr[static_cast<int>(arch::intel_gpu_adl_p)] =
481  arr[static_cast<int>(arch::intel_gpu_adl_n)] =
483  arr[static_cast<int>(arch::intel_gpu_dg1)] =
485  arr[static_cast<int>(arch::intel_gpu_acm_g10)] =
487  arr[static_cast<int>(arch::intel_gpu_acm_g11)] =
489  arr[static_cast<int>(arch::intel_gpu_acm_g12)] =
491  arr[static_cast<int>(arch::intel_gpu_pvc)] =
493  arr[static_cast<int>(arch::intel_gpu_pvc_vg)] =
495  arr[static_cast<int>(arch::nvidia_gpu_sm_50)] =
497  arr[static_cast<int>(arch::nvidia_gpu_sm_52)] =
499  arr[static_cast<int>(arch::nvidia_gpu_sm_53)] =
501  arr[static_cast<int>(arch::nvidia_gpu_sm_60)] =
503  arr[static_cast<int>(arch::nvidia_gpu_sm_61)] =
505  arr[static_cast<int>(arch::nvidia_gpu_sm_62)] =
507  arr[static_cast<int>(arch::nvidia_gpu_sm_70)] =
509  arr[static_cast<int>(arch::nvidia_gpu_sm_72)] =
511  arr[static_cast<int>(arch::nvidia_gpu_sm_75)] =
513  arr[static_cast<int>(arch::nvidia_gpu_sm_80)] =
515  arr[static_cast<int>(arch::nvidia_gpu_sm_86)] =
517  arr[static_cast<int>(arch::nvidia_gpu_sm_87)] =
519  arr[static_cast<int>(arch::nvidia_gpu_sm_89)] =
521  arr[static_cast<int>(arch::nvidia_gpu_sm_90)] =
523  arr[static_cast<int>(arch::amd_gpu_gfx700)] =
525  arr[static_cast<int>(arch::amd_gpu_gfx701)] =
527  arr[static_cast<int>(arch::amd_gpu_gfx702)] =
529  arr[static_cast<int>(arch::amd_gpu_gfx801)] =
531  arr[static_cast<int>(arch::amd_gpu_gfx802)] =
533  arr[static_cast<int>(arch::amd_gpu_gfx803)] =
535  arr[static_cast<int>(arch::amd_gpu_gfx805)] =
537  arr[static_cast<int>(arch::amd_gpu_gfx810)] =
539  arr[static_cast<int>(arch::amd_gpu_gfx900)] =
541  arr[static_cast<int>(arch::amd_gpu_gfx902)] =
543  arr[static_cast<int>(arch::amd_gpu_gfx904)] =
545  arr[static_cast<int>(arch::amd_gpu_gfx906)] =
547  arr[static_cast<int>(arch::amd_gpu_gfx908)] =
549  arr[static_cast<int>(arch::amd_gpu_gfx909)] =
551  arr[static_cast<int>(arch::amd_gpu_gfx90a)] =
553  arr[static_cast<int>(arch::amd_gpu_gfx90c)] =
555  arr[static_cast<int>(arch::amd_gpu_gfx940)] =
557  arr[static_cast<int>(arch::amd_gpu_gfx941)] =
559  arr[static_cast<int>(arch::amd_gpu_gfx942)] =
561  arr[static_cast<int>(arch::amd_gpu_gfx1010)] =
563  arr[static_cast<int>(arch::amd_gpu_gfx1011)] =
565  arr[static_cast<int>(arch::amd_gpu_gfx1012)] =
567  arr[static_cast<int>(arch::amd_gpu_gfx1030)] =
569  arr[static_cast<int>(arch::amd_gpu_gfx1031)] =
571  arr[static_cast<int>(arch::amd_gpu_gfx1032)] =
573  arr[static_cast<int>(arch::amd_gpu_gfx1033)] =
575  arr[static_cast<int>(arch::amd_gpu_gfx1034)] =
577  arr[static_cast<int>(arch::amd_gpu_gfx1035)] =
579  arr[static_cast<int>(arch::amd_gpu_gfx1036)] =
581  arr[static_cast<int>(arch::amd_gpu_gfx1100)] =
583  arr[static_cast<int>(arch::amd_gpu_gfx1101)] =
585  arr[static_cast<int>(arch::amd_gpu_gfx1102)] =
587  arr[static_cast<int>(arch::amd_gpu_gfx1103)] =
589  arr[static_cast<int>(arch::amd_gpu_gfx1150)] =
591  arr[static_cast<int>(arch::amd_gpu_gfx1151)] =
593  arr[static_cast<int>(arch::amd_gpu_gfx1200)] =
595  arr[static_cast<int>(arch::amd_gpu_gfx1201)] =
597  }
598 };
599 
600 // One entry for each enumerator in "architecture" telling whether the AOT
601 // target matches that architecture.
603 
604 // Reads the value of "is_allowable_aot_mode" via a template to defer triggering
605 // static_assert() until template instantiation time.
606 template <ext::oneapi::experimental::architecture... Archs>
607 constexpr static bool allowable_aot_mode() {
608  return is_allowable_aot_mode;
609 }
610 
611 // Tells if the current device has one of the architectures in the parameter
612 // pack.
613 template <ext::oneapi::experimental::architecture... Archs>
614 constexpr static bool device_architecture_is() {
615  return (is_aot_for_architecture.arr[static_cast<int>(Archs)] || ...);
616 }
617 
618 // Helper object used to implement "else_if_architecture_is" and "otherwise".
619 // The "MakeCall" template parameter tells whether a previous clause in the
620 // "if-elseif-elseif ..." chain was true. When "MakeCall" is false, some
621 // previous clause was true, so none of the subsequent
622 // "else_if_architecture_is" or "otherwise" member functions should call the
623 // user's function.
624 template <bool MakeCall> class if_architecture_helper {
625 public:
626  template <ext::oneapi::experimental::architecture... Archs, typename T>
627  constexpr auto else_if_architecture_is(T fnTrue) {
628  if constexpr (MakeCall && device_architecture_is<Archs...>()) {
629  fnTrue();
631  } else {
632  (void)fnTrue;
634  }
635  }
636 
637  template <typename T> constexpr void otherwise(T fn) {
638  if constexpr (MakeCall) {
639  fn();
640  }
641  }
642 };
643 } // namespace detail
644 
645 namespace ext::oneapi::experimental {
646 
647 template <architecture... Archs, typename T>
648 constexpr static auto if_architecture_is(T fnTrue) {
649  static_assert(sycl::detail::allowable_aot_mode<Archs...>(),
650  "The if_architecture_is function may only be used when AOT "
651  "compiling with '-fsycl-targets=spir64_x86_64' or "
652  "'-fsycl-targets=*_gpu_*'");
653  if constexpr (sycl::detail::device_architecture_is<Archs...>()) {
654  fnTrue();
655  return sycl::detail::if_architecture_helper<false>{};
656  } else {
657  (void)fnTrue;
658  return sycl::detail::if_architecture_helper<true>{};
659  }
660 }
661 
662 } // namespace ext::oneapi::experimental
663 } // namespace _V1
664 } // namespace sycl
#define __SYCL_TARGET_INTEL_X86_64__
#define __SYCL_TARGET_INTEL_GPU_PVC_VG__
#define __SYCL_TARGET_AMD_GPU_GFX1102__
#define __SYCL_TARGET_AMD_GPU_GFX803__
#define __SYCL_TARGET_INTEL_GPU_GLK__
#define __SYCL_TARGET_AMD_GPU_GFX810__
#define __SYCL_TARGET_NVIDIA_GPU_SM61__
#define __SYCL_TARGET_INTEL_GPU_WHL__
#define __SYCL_TARGET_AMD_GPU_GFX802__
#define __SYCL_TARGET_AMD_GPU_GFX1100__
#define __SYCL_TARGET_NVIDIA_GPU_SM53__
#define __SYCL_TARGET_AMD_GPU_GFX90C__
#define __SYCL_TARGET_INTEL_GPU_CFL__
#define __SYCL_TARGET_AMD_GPU_GFX908__
#define __SYCL_TARGET_AMD_GPU_GFX90A__
#define __SYCL_TARGET_AMD_GPU_GFX1010__
#define __SYCL_TARGET_INTEL_GPU_DG1__
#define __SYCL_TARGET_AMD_GPU_GFX1031__
#define __SYCL_TARGET_AMD_GPU_GFX902__
#define __SYCL_TARGET_AMD_GPU_GFX1103__
#define __SYCL_TARGET_NVIDIA_GPU_SM89__
#define __SYCL_TARGET_INTEL_GPU_ACM_G11__
#define __SYCL_TARGET_INTEL_GPU_ACM_G10__
#define __SYCL_TARGET_INTEL_GPU_PVC__
#define __SYCL_TARGET_AMD_GPU_GFX1033__
#define __SYCL_TARGET_AMD_GPU_GFX805__
#define __SYCL_TARGET_AMD_GPU_GFX1201__
#define __SYCL_TARGET_AMD_GPU_GFX1035__
#define __SYCL_TARGET_AMD_GPU_GFX942__
#define __SYCL_TARGET_AMD_GPU_GFX1200__
#define __SYCL_TARGET_AMD_GPU_GFX1012__
#define __SYCL_TARGET_INTEL_GPU_KBL__
#define __SYCL_TARGET_AMD_GPU_GFX1151__
#define __SYCL_TARGET_AMD_GPU_GFX1101__
#define __SYCL_TARGET_INTEL_GPU_SKL__
#define __SYCL_TARGET_NVIDIA_GPU_SM50__
#define __SYCL_TARGET_AMD_GPU_GFX1011__
#define __SYCL_TARGET_AMD_GPU_GFX1030__
#define __SYCL_TARGET_AMD_GPU_GFX900__
#define __SYCL_TARGET_INTEL_GPU_ADL_S__
#define __SYCL_TARGET_INTEL_GPU_ADL_N__
#define __SYCL_TARGET_INTEL_GPU_EHL__
#define __SYCL_TARGET_AMD_GPU_GFX904__
#define __SYCL_TARGET_NVIDIA_GPU_SM52__
#define __SYCL_TARGET_AMD_GPU_GFX941__
#define __SYCL_TARGET_AMD_GPU_GFX1036__
#define __SYCL_TARGET_INTEL_GPU_ADL_P__
#define __SYCL_TARGET_AMD_GPU_GFX801__
#define __SYCL_TARGET_NVIDIA_GPU_SM80__
#define __SYCL_TARGET_INTEL_GPU_TGLLP__
#define __SYCL_TARGET_INTEL_GPU_RKL__
#define __SYCL_TARGET_NVIDIA_GPU_SM90__
#define __SYCL_TARGET_INTEL_GPU_AML__
#define __SYCL_TARGET_INTEL_GPU_ICLLP__
#define __SYCL_TARGET_NVIDIA_GPU_SM72__
#define __SYCL_TARGET_AMD_GPU_GFX906__
#define __SYCL_TARGET_INTEL_GPU_BDW__
#define __SYCL_TARGET_NVIDIA_GPU_SM86__
#define __SYCL_TARGET_AMD_GPU_GFX702__
#define __SYCL_TARGET_NVIDIA_GPU_SM60__
#define __SYCL_TARGET_INTEL_GPU_ACM_G12__
#define __SYCL_TARGET_AMD_GPU_GFX1013__
#define __SYCL_TARGET_AMD_GPU_GFX940__
#define __SYCL_TARGET_AMD_GPU_GFX1150__
#define __SYCL_TARGET_NVIDIA_GPU_SM62__
#define __SYCL_TARGET_INTEL_GPU_APL__
#define __SYCL_TARGET_AMD_GPU_GFX701__
#define __SYCL_TARGET_AMD_GPU_GFX1034__
#define __SYCL_TARGET_INTEL_GPU_CML__
#define __SYCL_TARGET_NVIDIA_GPU_SM70__
#define __SYCL_TARGET_AMD_GPU_GFX700__
#define __SYCL_TARGET_AMD_GPU_GFX1032__
#define __SYCL_TARGET_AMD_GPU_GFX909__
#define __SYCL_TARGET_NVIDIA_GPU_SM87__
#define __SYCL_TARGET_NVIDIA_GPU_SM75__
constexpr static bool allowable_aot_mode()
static constexpr IsAOTForArchitectureClass is_aot_for_architecture
constexpr static bool device_architecture_is()
static constexpr ext::oneapi::experimental::architecture max_architecture
static constexpr bool is_allowable_aot_mode
constexpr static auto if_architecture_is(T fnTrue)
Definition: access.hpp:18
bool arr[static_cast< int >(max_architecture)+1]