DPC++ Runtime
Runtime libraries for oneAPI DPC++
device_architecture.hpp
Go to the documentation of this file.
1 //===- device_architecture.hpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #pragma once
10 
11 #include <cstdint> // for uint64_t
12 #include <optional>
13 #include <utility> // for std::integer_sequence
14 
15 namespace sycl {
16 inline namespace _V1 {
17 namespace ext::oneapi::experimental {
18 
19 enum class architecture : uint64_t {
20 #define __SYCL_ARCHITECTURE(NAME, VAL) NAME = VAL,
21 #define __SYCL_ARCHITECTURE_ALIAS(NAME, VAL) NAME = VAL,
22 #include <sycl/ext/oneapi/experimental/architectures.def>
23 #undef __SYCL_ARCHITECTURE
24 #undef __SYCL_ARCHITECTURE_ALIAS
25 };
26 
27 enum class arch_category {
28  // If new element is added to this enum:
29  //
30  // Add
31  // - "detail::min_<new_category>_architecture" variable below
32  // - "detail::max_<new_category>_architecture" variable below
33  //
34  // Update
35  // - "detail::get_category_min_architecture()" function below
36  // - "detail::get_category_max_architecture()" function below
37  // - "detail::get_device_architecture_category()" function below
38  // - sycl_ext_oneapi_device_architecture specification doc
39  //
40  intel_gpu = 0,
41  nvidia_gpu = 1,
42  amd_gpu = 2,
43  // TODO: add intel_cpu = 3,
44 };
45 
46 } // namespace ext::oneapi::experimental
47 
48 namespace detail {
49 
52  ext::oneapi::experimental::architecture::intel_gpu_bdw;
55  ext::oneapi::experimental::architecture::intel_gpu_lnl_m;
56 
59  ext::oneapi::experimental::architecture::nvidia_gpu_sm_50;
62  ext::oneapi::experimental::architecture::nvidia_gpu_sm_90a;
63 
66  ext::oneapi::experimental::architecture::amd_gpu_gfx700;
69  ext::oneapi::experimental::architecture::amd_gpu_gfx1201;
70 
71 #ifndef __SYCL_TARGET_INTEL_X86_64__
72 #define __SYCL_TARGET_INTEL_X86_64__ 0
73 #endif
74 #ifndef __SYCL_TARGET_INTEL_GPU_BDW__
75 #define __SYCL_TARGET_INTEL_GPU_BDW__ 0
76 #endif
77 #ifndef __SYCL_TARGET_INTEL_GPU_SKL__
78 #define __SYCL_TARGET_INTEL_GPU_SKL__ 0
79 #endif
80 #ifndef __SYCL_TARGET_INTEL_GPU_KBL__
81 #define __SYCL_TARGET_INTEL_GPU_KBL__ 0
82 #endif
83 #ifndef __SYCL_TARGET_INTEL_GPU_CFL__
84 #define __SYCL_TARGET_INTEL_GPU_CFL__ 0
85 #endif
86 #ifndef __SYCL_TARGET_INTEL_GPU_APL__
87 #define __SYCL_TARGET_INTEL_GPU_APL__ 0
88 #endif
89 #ifndef __SYCL_TARGET_INTEL_GPU_GLK__
90 #define __SYCL_TARGET_INTEL_GPU_GLK__ 0
91 #endif
92 #ifndef __SYCL_TARGET_INTEL_GPU_WHL__
93 #define __SYCL_TARGET_INTEL_GPU_WHL__ 0
94 #endif
95 #ifndef __SYCL_TARGET_INTEL_GPU_AML__
96 #define __SYCL_TARGET_INTEL_GPU_AML__ 0
97 #endif
98 #ifndef __SYCL_TARGET_INTEL_GPU_CML__
99 #define __SYCL_TARGET_INTEL_GPU_CML__ 0
100 #endif
101 #ifndef __SYCL_TARGET_INTEL_GPU_ICLLP__
102 #define __SYCL_TARGET_INTEL_GPU_ICLLP__ 0
103 #endif
104 #ifndef __SYCL_TARGET_INTEL_GPU_EHL__
105 #define __SYCL_TARGET_INTEL_GPU_EHL__ 0
106 #endif
107 #ifndef __SYCL_TARGET_INTEL_GPU_TGLLP__
108 #define __SYCL_TARGET_INTEL_GPU_TGLLP__ 0
109 #endif
110 #ifndef __SYCL_TARGET_INTEL_GPU_RKL__
111 #define __SYCL_TARGET_INTEL_GPU_RKL__ 0
112 #endif
113 #ifndef __SYCL_TARGET_INTEL_GPU_ADL_S__
114 #define __SYCL_TARGET_INTEL_GPU_ADL_S__ 0
115 #endif
116 #ifndef __SYCL_TARGET_INTEL_GPU_ADL_P__
117 #define __SYCL_TARGET_INTEL_GPU_ADL_P__ 0
118 #endif
119 #ifndef __SYCL_TARGET_INTEL_GPU_ADL_N__
120 #define __SYCL_TARGET_INTEL_GPU_ADL_N__ 0
121 #endif
122 #ifndef __SYCL_TARGET_INTEL_GPU_DG1__
123 #define __SYCL_TARGET_INTEL_GPU_DG1__ 0
124 #endif
125 #ifndef __SYCL_TARGET_INTEL_GPU_ACM_G10__
126 #define __SYCL_TARGET_INTEL_GPU_ACM_G10__ 0
127 #endif
128 #ifndef __SYCL_TARGET_INTEL_GPU_ACM_G11__
129 #define __SYCL_TARGET_INTEL_GPU_ACM_G11__ 0
130 #endif
131 #ifndef __SYCL_TARGET_INTEL_GPU_ACM_G12__
132 #define __SYCL_TARGET_INTEL_GPU_ACM_G12__ 0
133 #endif
134 #ifndef __SYCL_TARGET_INTEL_GPU_PVC__
135 #define __SYCL_TARGET_INTEL_GPU_PVC__ 0
136 #endif
137 #ifndef __SYCL_TARGET_INTEL_GPU_PVC_VG__
138 #define __SYCL_TARGET_INTEL_GPU_PVC_VG__ 0
139 #endif
140 #ifndef __SYCL_TARGET_INTEL_GPU_MTL_U__
141 #define __SYCL_TARGET_INTEL_GPU_MTL_U__ 0
142 #endif
143 #ifndef __SYCL_TARGET_INTEL_GPU_MTL_H__
144 #define __SYCL_TARGET_INTEL_GPU_MTL_H__ 0
145 #endif
146 #ifndef __SYCL_TARGET_INTEL_GPU_ARL_H__
147 #define __SYCL_TARGET_INTEL_GPU_ARL_H__ 0
148 #endif
149 #ifndef __SYCL_TARGET_INTEL_GPU_BMG_G21__
150 #define __SYCL_TARGET_INTEL_GPU_BMG_G21__ 0
151 #endif
152 #ifndef __SYCL_TARGET_INTEL_GPU_LNL_M__
153 #define __SYCL_TARGET_INTEL_GPU_LNL_M__ 0
154 #endif
155 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM50__
156 #define __SYCL_TARGET_NVIDIA_GPU_SM50__ 0
157 #endif
158 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM52__
159 #define __SYCL_TARGET_NVIDIA_GPU_SM52__ 0
160 #endif
161 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM53__
162 #define __SYCL_TARGET_NVIDIA_GPU_SM53__ 0
163 #endif
164 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM60__
165 #define __SYCL_TARGET_NVIDIA_GPU_SM60__ 0
166 #endif
167 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM61__
168 #define __SYCL_TARGET_NVIDIA_GPU_SM61__ 0
169 #endif
170 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM62__
171 #define __SYCL_TARGET_NVIDIA_GPU_SM62__ 0
172 #endif
173 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM70__
174 #define __SYCL_TARGET_NVIDIA_GPU_SM70__ 0
175 #endif
176 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM72__
177 #define __SYCL_TARGET_NVIDIA_GPU_SM72__ 0
178 #endif
179 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM75__
180 #define __SYCL_TARGET_NVIDIA_GPU_SM75__ 0
181 #endif
182 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM80__
183 #define __SYCL_TARGET_NVIDIA_GPU_SM80__ 0
184 #endif
185 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM86__
186 #define __SYCL_TARGET_NVIDIA_GPU_SM86__ 0
187 #endif
188 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM87__
189 #define __SYCL_TARGET_NVIDIA_GPU_SM87__ 0
190 #endif
191 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM89__
192 #define __SYCL_TARGET_NVIDIA_GPU_SM89__ 0
193 #endif
194 #ifndef __SYCL_TARGET_NVIDIA_GPU_SM90__
195 #define __SYCL_TARGET_NVIDIA_GPU_SM90__ 0
196 #endif
197 #ifndef __SYCL_TARGET_AMD_GPU_GFX700__
198 #define __SYCL_TARGET_AMD_GPU_GFX700__ 0
199 #endif
200 #ifndef __SYCL_TARGET_AMD_GPU_GFX701__
201 #define __SYCL_TARGET_AMD_GPU_GFX701__ 0
202 #endif
203 #ifndef __SYCL_TARGET_AMD_GPU_GFX702__
204 #define __SYCL_TARGET_AMD_GPU_GFX702__ 0
205 #endif
206 #ifndef __SYCL_TARGET_AMD_GPU_GFX801__
207 #define __SYCL_TARGET_AMD_GPU_GFX801__ 0
208 #endif
209 #ifndef __SYCL_TARGET_AMD_GPU_GFX802__
210 #define __SYCL_TARGET_AMD_GPU_GFX802__ 0
211 #endif
212 #ifndef __SYCL_TARGET_AMD_GPU_GFX803__
213 #define __SYCL_TARGET_AMD_GPU_GFX803__ 0
214 #endif
215 #ifndef __SYCL_TARGET_AMD_GPU_GFX805__
216 #define __SYCL_TARGET_AMD_GPU_GFX805__ 0
217 #endif
218 #ifndef __SYCL_TARGET_AMD_GPU_GFX810__
219 #define __SYCL_TARGET_AMD_GPU_GFX810__ 0
220 #endif
221 #ifndef __SYCL_TARGET_AMD_GPU_GFX900__
222 #define __SYCL_TARGET_AMD_GPU_GFX900__ 0
223 #endif
224 #ifndef __SYCL_TARGET_AMD_GPU_GFX902__
225 #define __SYCL_TARGET_AMD_GPU_GFX902__ 0
226 #endif
227 #ifndef __SYCL_TARGET_AMD_GPU_GFX904__
228 #define __SYCL_TARGET_AMD_GPU_GFX904__ 0
229 #endif
230 #ifndef __SYCL_TARGET_AMD_GPU_GFX906__
231 #define __SYCL_TARGET_AMD_GPU_GFX906__ 0
232 #endif
233 #ifndef __SYCL_TARGET_AMD_GPU_GFX908__
234 #define __SYCL_TARGET_AMD_GPU_GFX908__ 0
235 #endif
236 #ifndef __SYCL_TARGET_AMD_GPU_GFX909__
237 #define __SYCL_TARGET_AMD_GPU_GFX909__ 0
238 #endif
239 #ifndef __SYCL_TARGET_AMD_GPU_GFX90A__
240 #define __SYCL_TARGET_AMD_GPU_GFX90A__ 0
241 #endif
242 #ifndef __SYCL_TARGET_AMD_GPU_GFX90C__
243 #define __SYCL_TARGET_AMD_GPU_GFX90C__ 0
244 #endif
245 #ifndef __SYCL_TARGET_AMD_GPU_GFX940__
246 #define __SYCL_TARGET_AMD_GPU_GFX940__ 0
247 #endif
248 #ifndef __SYCL_TARGET_AMD_GPU_GFX941__
249 #define __SYCL_TARGET_AMD_GPU_GFX941__ 0
250 #endif
251 #ifndef __SYCL_TARGET_AMD_GPU_GFX942__
252 #define __SYCL_TARGET_AMD_GPU_GFX942__ 0
253 #endif
254 #ifndef __SYCL_TARGET_AMD_GPU_GFX1010__
255 #define __SYCL_TARGET_AMD_GPU_GFX1010__ 0
256 #endif
257 #ifndef __SYCL_TARGET_AMD_GPU_GFX1011__
258 #define __SYCL_TARGET_AMD_GPU_GFX1011__ 0
259 #endif
260 #ifndef __SYCL_TARGET_AMD_GPU_GFX1012__
261 #define __SYCL_TARGET_AMD_GPU_GFX1012__ 0
262 #endif
263 #ifndef __SYCL_TARGET_AMD_GPU_GFX1013__
264 #define __SYCL_TARGET_AMD_GPU_GFX1013__ 0
265 #endif
266 #ifndef __SYCL_TARGET_AMD_GPU_GFX1030__
267 #define __SYCL_TARGET_AMD_GPU_GFX1030__ 0
268 #endif
269 #ifndef __SYCL_TARGET_AMD_GPU_GFX1031__
270 #define __SYCL_TARGET_AMD_GPU_GFX1031__ 0
271 #endif
272 #ifndef __SYCL_TARGET_AMD_GPU_GFX1032__
273 #define __SYCL_TARGET_AMD_GPU_GFX1032__ 0
274 #endif
275 #ifndef __SYCL_TARGET_AMD_GPU_GFX1033__
276 #define __SYCL_TARGET_AMD_GPU_GFX1033__ 0
277 #endif
278 #ifndef __SYCL_TARGET_AMD_GPU_GFX1034__
279 #define __SYCL_TARGET_AMD_GPU_GFX1034__ 0
280 #endif
281 #ifndef __SYCL_TARGET_AMD_GPU_GFX1035__
282 #define __SYCL_TARGET_AMD_GPU_GFX1035__ 0
283 #endif
284 #ifndef __SYCL_TARGET_AMD_GPU_GFX1036__
285 #define __SYCL_TARGET_AMD_GPU_GFX1036__ 0
286 #endif
287 #ifndef __SYCL_TARGET_AMD_GPU_GFX1100__
288 #define __SYCL_TARGET_AMD_GPU_GFX1100__ 0
289 #endif
290 #ifndef __SYCL_TARGET_AMD_GPU_GFX1101__
291 #define __SYCL_TARGET_AMD_GPU_GFX1101__ 0
292 #endif
293 #ifndef __SYCL_TARGET_AMD_GPU_GFX1102__
294 #define __SYCL_TARGET_AMD_GPU_GFX1102__ 0
295 #endif
296 #ifndef __SYCL_TARGET_AMD_GPU_GFX1103__
297 #define __SYCL_TARGET_AMD_GPU_GFX1103__ 0
298 #endif
299 #ifndef __SYCL_TARGET_AMD_GPU_GFX1150__
300 #define __SYCL_TARGET_AMD_GPU_GFX1150__ 0
301 #endif
302 #ifndef __SYCL_TARGET_AMD_GPU_GFX1151__
303 #define __SYCL_TARGET_AMD_GPU_GFX1151__ 0
304 #endif
305 #ifndef __SYCL_TARGET_AMD_GPU_GFX1200__
306 #define __SYCL_TARGET_AMD_GPU_GFX1200__ 0
307 #endif
308 #ifndef __SYCL_TARGET_AMD_GPU_GFX1201__
309 #define __SYCL_TARGET_AMD_GPU_GFX1201__ 0
310 #endif
311 
312 // This is true when the translation unit is compiled in AOT mode with target
313 // names that supports the "if_architecture_is" features. If an unsupported
314 // target name is specified via "-fsycl-targets", the associated invocation of
315 // the device compiler will set this variable to false, and that will trigger
316 // an error for code that uses "if_architecture_is".
317 static constexpr bool is_allowable_aot_mode =
398 
399 constexpr static std::optional<ext::oneapi::experimental::architecture>
401  // TODO: re-write the logic below when sycl_ext_oneapi_device_architecture
402  // will support targets more than one in -fsycl-targets
403 #if __SYCL_TARGET_INTEL_X86_64__
404  return ext::oneapi::experimental::architecture::x86_64;
405 #endif
406 #if __SYCL_TARGET_INTEL_GPU_BDW__
407  return ext::oneapi::experimental::architecture::intel_gpu_bdw;
408 #endif
409 #if __SYCL_TARGET_INTEL_GPU_SKL__
410  return ext::oneapi::experimental::architecture::intel_gpu_skl;
411 #endif
412 #if __SYCL_TARGET_INTEL_GPU_KBL__
413  return ext::oneapi::experimental::architecture::intel_gpu_kbl;
414 #endif
415 #if __SYCL_TARGET_INTEL_GPU_CFL__
416  return ext::oneapi::experimental::architecture::intel_gpu_cfl;
417 #endif
418 #if __SYCL_TARGET_INTEL_GPU_APL__
419  return ext::oneapi::experimental::architecture::intel_gpu_apl;
420 #endif
421 #if __SYCL_TARGET_INTEL_GPU_GLK__
422  return ext::oneapi::experimental::architecture::intel_gpu_glk;
423 #endif
424 #if __SYCL_TARGET_INTEL_GPU_WHL__
425  return ext::oneapi::experimental::architecture::intel_gpu_whl;
426 #endif
427 #if __SYCL_TARGET_INTEL_GPU_AML__
428  return ext::oneapi::experimental::architecture::intel_gpu_aml;
429 #endif
430 #if __SYCL_TARGET_INTEL_GPU_CML__
431  return ext::oneapi::experimental::architecture::intel_gpu_cml;
432 #endif
433 #if __SYCL_TARGET_INTEL_GPU_ICLLP__
434  return ext::oneapi::experimental::architecture::intel_gpu_icllp;
435 #endif
436 #if __SYCL_TARGET_INTEL_GPU_EHL__
437  return ext::oneapi::experimental::architecture::intel_gpu_ehl;
438 #endif
439 #if __SYCL_TARGET_INTEL_GPU_TGLLP__
440  return ext::oneapi::experimental::architecture::intel_gpu_tgllp;
441 #endif
442 #if __SYCL_TARGET_INTEL_GPU_RKL__
443  return ext::oneapi::experimental::architecture::intel_gpu_rkl;
444 #endif
445 #if __SYCL_TARGET_INTEL_GPU_ADL_S__
446  return ext::oneapi::experimental::architecture::intel_gpu_adl_s;
447 #endif
448 #if __SYCL_TARGET_INTEL_GPU_ADL_P__
449  return ext::oneapi::experimental::architecture::intel_gpu_adl_p;
450 #endif
451 #if __SYCL_TARGET_INTEL_GPU_ADL_P__
452  return ext::oneapi::experimental::architecture::intel_gpu_adl_p;
453 #endif
454 #if __SYCL_TARGET_INTEL_GPU_ADL_N__
455  return ext::oneapi::experimental::architecture::intel_gpu_adl_n;
456 #endif
457 #if __SYCL_TARGET_INTEL_GPU_DG1__
458  return ext::oneapi::experimental::architecture::intel_gpu_dg1;
459 #endif
460 #if __SYCL_TARGET_INTEL_GPU_ACM_G10__
461  return ext::oneapi::experimental::architecture::intel_gpu_acm_g10;
462 #endif
463 #if __SYCL_TARGET_INTEL_GPU_ACM_G11__
464  return ext::oneapi::experimental::architecture::intel_gpu_acm_g11;
465 #endif
466 #if __SYCL_TARGET_INTEL_GPU_ACM_G12__
467  return ext::oneapi::experimental::architecture::intel_gpu_acm_g12;
468 #endif
469 #if __SYCL_TARGET_INTEL_GPU_PVC__
470  return ext::oneapi::experimental::architecture::intel_gpu_pvc;
471 #endif
472 #if __SYCL_TARGET_INTEL_GPU_PVC_VG__
473  return ext::oneapi::experimental::architecture::intel_gpu_pvc_vg;
474 #endif
475 #if __SYCL_TARGET_INTEL_GPU_MTL_U__
476  return ext::oneapi::experimental::architecture::intel_gpu_mtl_u;
477 #endif
478 #if __SYCL_TARGET_INTEL_GPU_MTL_H__
479  return ext::oneapi::experimental::architecture::intel_gpu_mtl_h;
480 #endif
481 #if __SYCL_TARGET_INTEL_GPU_ARL_H__
482  return ext::oneapi::experimental::architecture::intel_gpu_arl_h;
483 #endif
484 #if __SYCL_TARGET_INTEL_GPU_BMG_G21__
485  return ext::oneapi::experimental::architecture::intel_gpu_bmg_g21;
486 #endif
487 #if __SYCL_TARGET_INTEL_GPU_LNL_M__
488  return ext::oneapi::experimental::architecture::intel_gpu_lnl_m;
489 #endif
490 #if __SYCL_TARGET_NVIDIA_GPU_SM50__
491  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_50;
492 #endif
493 #if __SYCL_TARGET_NVIDIA_GPU_SM52__
494  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_52;
495 #endif
496 #if __SYCL_TARGET_NVIDIA_GPU_SM53__
497  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_53;
498 #endif
499 #if __SYCL_TARGET_NVIDIA_GPU_SM60__
500  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_60;
501 #endif
502 #if __SYCL_TARGET_NVIDIA_GPU_SM61__
503  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_61;
504 #endif
505 #if __SYCL_TARGET_NVIDIA_GPU_SM62__
506  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_62;
507 #endif
508 #if __SYCL_TARGET_NVIDIA_GPU_SM70__
509  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_70;
510 #endif
511 #if __SYCL_TARGET_NVIDIA_GPU_SM72__
512  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_72;
513 #endif
514 #if __SYCL_TARGET_NVIDIA_GPU_SM75__
515  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_75;
516 #endif
517 #if __SYCL_TARGET_NVIDIA_GPU_SM80__
518  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_80;
519 #endif
520 #if __SYCL_TARGET_NVIDIA_GPU_SM86__
521  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_86;
522 #endif
523 #if __SYCL_TARGET_NVIDIA_GPU_SM87__
524  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_87;
525 #endif
526 #if __SYCL_TARGET_NVIDIA_GPU_SM89__
527  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_89;
528 #endif
529 #if __SYCL_TARGET_NVIDIA_GPU_SM90__
530  return ext::oneapi::experimental::architecture::nvidia_gpu_sm_90;
531 #endif
532 #if __SYCL_TARGET_AMD_GPU_GFX700__
533  return ext::oneapi::experimental::architecture::amd_gpu_gfx700;
534 #endif
535 #if __SYCL_TARGET_AMD_GPU_GFX701__
536  return ext::oneapi::experimental::architecture::amd_gpu_gfx701;
537 #endif
538 #if __SYCL_TARGET_AMD_GPU_GFX702__
539  return ext::oneapi::experimental::architecture::amd_gpu_gfx702;
540 #endif
541 #if __SYCL_TARGET_AMD_GPU_GFX801__
542  return ext::oneapi::experimental::architecture::amd_gpu_gfx801;
543 #endif
544 #if __SYCL_TARGET_AMD_GPU_GFX802__
545  return ext::oneapi::experimental::architecture::amd_gpu_gfx802;
546 #endif
547 #if __SYCL_TARGET_AMD_GPU_GFX803__
548  return ext::oneapi::experimental::architecture::amd_gpu_gfx803;
549 #endif
550 #if __SYCL_TARGET_AMD_GPU_GFX805__
551  return ext::oneapi::experimental::architecture::amd_gpu_gfx805;
552 #endif
553 #if __SYCL_TARGET_AMD_GPU_GFX810__
554  return ext::oneapi::experimental::architecture::amd_gpu_gfx810;
555 #endif
556 #if __SYCL_TARGET_AMD_GPU_GFX900__
557  return ext::oneapi::experimental::architecture::amd_gpu_gfx900;
558 #endif
559 #if __SYCL_TARGET_AMD_GPU_GFX902__
560  return ext::oneapi::experimental::architecture::amd_gpu_gfx902;
561 #endif
562 #if __SYCL_TARGET_AMD_GPU_GFX904__
563  return ext::oneapi::experimental::architecture::amd_gpu_gfx904;
564 #endif
565 #if __SYCL_TARGET_AMD_GPU_GFX906__
566  return ext::oneapi::experimental::architecture::amd_gpu_gfx906;
567 #endif
568 #if __SYCL_TARGET_AMD_GPU_GFX908__
569  return ext::oneapi::experimental::architecture::amd_gpu_gfx908;
570 #endif
571 #if __SYCL_TARGET_AMD_GPU_GFX909__
572  return ext::oneapi::experimental::architecture::amd_gpu_gfx909;
573 #endif
574 #if __SYCL_TARGET_AMD_GPU_GFX90a__
575  return ext::oneapi::experimental::architecture::amd_gpu_gfx90a;
576 #endif
577 #if __SYCL_TARGET_AMD_GPU_GFX90c__
578  return ext::oneapi::experimental::architecture::amd_gpu_gfx90c;
579 #endif
580 #if __SYCL_TARGET_AMD_GPU_GFX940__
581  return ext::oneapi::experimental::architecture::amd_gpu_gfx940;
582 #endif
583 #if __SYCL_TARGET_AMD_GPU_GFX941__
584  return ext::oneapi::experimental::architecture::amd_gpu_gfx941;
585 #endif
586 #if __SYCL_TARGET_AMD_GPU_GFX942__
587  return ext::oneapi::experimental::architecture::amd_gpu_gfx942;
588 #endif
589 #if __SYCL_TARGET_AMD_GPU_GFX1010__
590  return ext::oneapi::experimental::architecture::amd_gpu_gfx1010;
591 #endif
592 #if __SYCL_TARGET_AMD_GPU_GFX1011__
593  return ext::oneapi::experimental::architecture::amd_gpu_gfx1011;
594 #endif
595 #if __SYCL_TARGET_AMD_GPU_GFX1012__
596  return ext::oneapi::experimental::architecture::amd_gpu_gfx1012;
597 #endif
598 #if __SYCL_TARGET_AMD_GPU_GFX1030__
599  return ext::oneapi::experimental::architecture::amd_gpu_gfx1030;
600 #endif
601 #if __SYCL_TARGET_AMD_GPU_GFX1031__
602  return ext::oneapi::experimental::architecture::amd_gpu_gfx1031;
603 #endif
604 #if __SYCL_TARGET_AMD_GPU_GFX1032__
605  return ext::oneapi::experimental::architecture::amd_gpu_gfx1032;
606 #endif
607 #if __SYCL_TARGET_AMD_GPU_GFX1033__
608  return ext::oneapi::experimental::architecture::amd_gpu_gfx1033;
609 #endif
610 #if __SYCL_TARGET_AMD_GPU_GFX1034__
611  return ext::oneapi::experimental::architecture::amd_gpu_gfx1034;
612 #endif
613 #if __SYCL_TARGET_AMD_GPU_GFX1035__
614  return ext::oneapi::experimental::architecture::amd_gpu_gfx1035;
615 #endif
616 #if __SYCL_TARGET_AMD_GPU_GFX1036__
617  return ext::oneapi::experimental::architecture::amd_gpu_gfx1036;
618 #endif
619 #if __SYCL_TARGET_AMD_GPU_GFX1100__
620  return ext::oneapi::experimental::architecture::amd_gpu_gfx1100;
621 #endif
622 #if __SYCL_TARGET_AMD_GPU_GFX1101__
623  return ext::oneapi::experimental::architecture::amd_gpu_gfx1101;
624 #endif
625 #if __SYCL_TARGET_AMD_GPU_GFX1102__
626  return ext::oneapi::experimental::architecture::amd_gpu_gfx1102;
627 #endif
628 #if __SYCL_TARGET_AMD_GPU_GFX1103__
629  return ext::oneapi::experimental::architecture::amd_gpu_gfx1103;
630 #endif
631 #if __SYCL_TARGET_AMD_GPU_GFX1150__
632  return ext::oneapi::experimental::architecture::amd_gpu_gfx1150;
633 #endif
634 #if __SYCL_TARGET_AMD_GPU_GFX1151__
635  return ext::oneapi::experimental::architecture::amd_gpu_gfx1151;
636 #endif
637 #if __SYCL_TARGET_AMD_GPU_GFX1200__
638  return ext::oneapi::experimental::architecture::amd_gpu_gfx1200;
639 #endif
640 #if __SYCL_TARGET_AMD_GPU_GFX1201__
641  return ext::oneapi::experimental::architecture::amd_gpu_gfx1201;
642 #endif
643  return std::nullopt;
644 }
645 
646 // Tells if the AOT target matches that architecture.
647 constexpr static bool
649  constexpr std::optional<ext::oneapi::experimental::architecture>
650  current_arch = get_current_architecture_aot();
651  if (current_arch.has_value())
652  return arch == *current_arch;
653  return false;
654 }
655 
656 // Reads the value of "is_allowable_aot_mode" via a template to defer triggering
657 // static_assert() until template instantiation time.
658 template <ext::oneapi::experimental::architecture... Archs>
659 constexpr static bool allowable_aot_mode() {
660  return is_allowable_aot_mode;
661 }
662 
663 // Tells if the current device has one of the architectures in the parameter
664 // pack.
665 template <ext::oneapi::experimental::architecture... Archs>
666 constexpr static bool device_architecture_is() {
667  return (is_aot_for_architecture(Archs) || ...);
668 }
669 
670 static constexpr std::optional<ext::oneapi::experimental::architecture>
677  } else if (Category == ext::oneapi::experimental::arch_category::amd_gpu) {
679  } // add "else if " when adding new category, "else" not needed
680  return std::nullopt;
681 }
682 
683 static constexpr std::optional<ext::oneapi::experimental::architecture>
690  } else if (Category == ext::oneapi::experimental::arch_category::amd_gpu) {
692  } // add "else if " when adding new category, "else" not needed
693  return std::nullopt;
694 }
695 
696 template <ext::oneapi::experimental::arch_category Category>
697 constexpr static bool device_architecture_is_in_category_aot() {
698  constexpr std::optional<ext::oneapi::experimental::architecture>
699  category_min_arch = get_category_min_architecture(Category);
700  constexpr std::optional<ext::oneapi::experimental::architecture>
701  category_max_arch = get_category_max_architecture(Category);
702  constexpr std::optional<ext::oneapi::experimental::architecture>
703  current_arch = get_current_architecture_aot();
704 
705  if (category_min_arch.has_value() && category_max_arch.has_value() &&
706  current_arch.has_value())
707  if ((*category_min_arch <= *current_arch) &&
708  (*current_arch <= *category_max_arch))
709  return true;
710 
711  return false;
712 }
713 
714 template <ext::oneapi::experimental::arch_category... Categories>
715 constexpr static bool device_architecture_is_in_categories() {
716  return (device_architecture_is_in_category_aot<Categories>() || ...);
717 }
718 
719 constexpr static std::optional<ext::oneapi::experimental::arch_category>
721  auto arch_is_in_segment =
724  if ((min <= arch) && (arch <= max))
725  return true;
726  return false;
727  };
728 
729  if (arch_is_in_segment(min_intel_gpu_architecture,
732  if (arch_is_in_segment(min_nvidia_gpu_architecture,
735  if (arch_is_in_segment(min_amd_gpu_architecture, max_amd_gpu_architecture))
737  // add "if " when adding new category
738 
739  return std::nullopt;
740 }
741 
742 template <ext::oneapi::experimental::architecture Arch, typename Compare>
743 constexpr static bool device_architecture_comparison_aot(Compare comp) {
744  constexpr std::optional<ext::oneapi::experimental::arch_category>
745  input_arch_category = get_device_architecture_category(Arch);
746  constexpr std::optional<ext::oneapi::experimental::architecture>
747  current_arch = get_current_architecture_aot();
748 
749  if (input_arch_category.has_value() && current_arch.has_value()) {
750  std::optional<ext::oneapi::experimental::arch_category>
751  current_arch_category = get_device_architecture_category(*current_arch);
752  if (current_arch_category.has_value() &&
753  (*input_arch_category == *current_arch_category))
754  return comp(*current_arch, Arch);
755  }
756  return false;
757 }
758 
761  ext::oneapi::experimental::architecture b) constexpr { return a < b; };
764  ext::oneapi::experimental::architecture b) constexpr { return a <= b; };
767  ext::oneapi::experimental::architecture b) constexpr { return a > b; };
770  ext::oneapi::experimental::architecture b) constexpr { return a >= b; };
771 
772 // Helper object used to implement "else_if_architecture_is",
773 // "else_if_architecture_is_*" and "otherwise". The "MakeCall" template
774 // parameter tells whether a previous clause in the "if-elseif-elseif ..." chain
775 // was true. When "MakeCall" is false, some previous clause was true, so none
776 // of the subsequent "else_if_architecture_is", "else_if_architecture_is_*" or
777 // "otherwise" member functions should call the user's function.
778 template <bool MakeCall> class if_architecture_helper {
779 public:
784  template <ext::oneapi::experimental::architecture... Archs, typename T>
785  constexpr auto else_if_architecture_is(T fn) {
786  if constexpr (MakeCall && device_architecture_is<Archs...>()) {
787  fn();
789  } else {
790  (void)fn;
792  }
793  }
794 
799  template <ext::oneapi::experimental::arch_category... Categories, typename T>
800  constexpr auto else_if_architecture_is(T fn) {
801  if constexpr (MakeCall &&
802  device_architecture_is_in_categories<Categories...>()) {
803  fn();
805  } else {
806  (void)fn;
808  }
809  }
810 
815  template <ext::oneapi::experimental::architecture Arch, typename T>
816  constexpr auto else_if_architecture_is_lt(T fn) {
817  if constexpr (MakeCall &&
818  sycl::detail::device_architecture_comparison_aot<Arch>(
820  fn();
821  return sycl::detail::if_architecture_helper<false>{};
822  } else {
823  (void)fn;
824  return sycl::detail::if_architecture_helper<MakeCall>{};
825  }
826  }
827 
833  template <ext::oneapi::experimental::architecture Arch, typename T>
834  constexpr auto else_if_architecture_is_le(T fn) {
835  if constexpr (MakeCall &&
836  sycl::detail::device_architecture_comparison_aot<Arch>(
838  fn();
839  return sycl::detail::if_architecture_helper<false>{};
840  } else {
841  (void)fn;
842  return sycl::detail::if_architecture_helper<MakeCall>{};
843  }
844  }
845 
850  template <ext::oneapi::experimental::architecture Arch, typename T>
851  constexpr auto else_if_architecture_is_gt(T fn) {
852  if constexpr (MakeCall &&
853  sycl::detail::device_architecture_comparison_aot<Arch>(
855  fn();
856  return sycl::detail::if_architecture_helper<false>{};
857  } else {
858  (void)fn;
859  return sycl::detail::if_architecture_helper<MakeCall>{};
860  }
861  }
862 
868  template <ext::oneapi::experimental::architecture Arch, typename T>
869  constexpr auto else_if_architecture_is_ge(T fn) {
870  if constexpr (MakeCall &&
871  sycl::detail::device_architecture_comparison_aot<Arch>(
873  fn();
874  return sycl::detail::if_architecture_helper<false>{};
875  } else {
876  (void)fn;
877  return sycl::detail::if_architecture_helper<MakeCall>{};
878  }
879  }
880 
887  ext::oneapi::experimental::architecture Arch2, typename T>
888  constexpr auto else_if_architecture_is_between(T fn) {
889  if constexpr (MakeCall &&
890  sycl::detail::device_architecture_comparison_aot<Arch1>(
892  sycl::detail::device_architecture_comparison_aot<Arch2>(
894  fn();
895  return sycl::detail::if_architecture_helper<false>{};
896  } else {
897  (void)fn;
898  return sycl::detail::if_architecture_helper<MakeCall>{};
899  }
900  }
901 
902  template <typename T> constexpr void otherwise(T fn) {
903  if constexpr (MakeCall) {
904  fn();
905  }
906  }
907 };
908 } // namespace detail
909 
910 namespace ext::oneapi::experimental {
911 
912 namespace detail {
913 // Call the callable object "fn" only when this code runs on a device which
914 // has a certain set of aspects or a particular architecture.
915 //
916 // Condition is a parameter pack of int's that define a simple expression
917 // language which tells the set of aspects or architectures that the device
918 // must have in order to enable the call. See the "Condition*" values below.
919 template <typename T, typename... Condition>
920 #ifdef __SYCL_DEVICE_ONLY__
921 [[__sycl_detail__::add_ir_attributes_function(
922  "sycl-call-if-on-device-conditionally", true)]]
923 #endif
924 void call_if_on_device_conditionally(T fn, Condition...) {
925  fn();
926 }
927 
928 // The "Condition" parameter pack above is a sequence of int's that define an
929 // expression tree. Each node represents a boolean subexpression:
930 //
931 // ConditionAspect - Next int is a value from "enum aspect". The
932 // subexpression is true if the device has this
933 // aspect.
934 // ConditionArchitecture - Next int is a value from "enum architecture". The
935 // subexpression is true if the device has this
936 // architecture.
937 // ConditionNot - Next int is the root of another subexpression S1.
938 // This subexpression is true if S1 is false.
939 // ConditionAnd - Next int is the root of another subexpression S1.
940 // The int following that subexpression is the root
941 // of another subexpression S2. This subexpression
942 // is true if both S1 and S2 are true.
943 // ConditionOr - Next int is the root of another subexpression S1.
944 // The int following that subexpression is the root
945 // of another subexpression S2. This subexpression
946 // is true if either S1 or S2 are true.
947 //
948 // These values are stored in the application's executable, so they are
949 // effectively part of the ABI. Therefore, any change to an existing value
950 // is an ABI break.
951 //
952 // There is no programmatic reason for the values to be negative. They are
953 // negative only by convention to make it easier for humans to distinguish them
954 // from aspect or architecture values (which are positive).
955 static constexpr int ConditionAspect = -1;
956 static constexpr int ConditionArchitecture = -2;
957 static constexpr int ConditionNot = -3;
958 static constexpr int ConditionAnd = -4;
959 static constexpr int ConditionOr = -5;
960 
961 // Metaprogramming helper to construct a ConditionOr expression for a sequence
962 // of architectures. "ConditionAnyArchitectureBuilder<Archs...>::seq" is an
963 // "std::integer_sequence" representing the expression.
965 
966 template <architecture Arch, architecture... Archs>
967 struct ConditionAnyArchitectureBuilder<Arch, Archs...> {
968  template <int I1, int I2, int I3, int... Is>
969  static auto append(std::integer_sequence<int, Is...>) {
970  return std::integer_sequence<int, I1, I2, I3, Is...>{};
971  }
972  using rest = typename ConditionAnyArchitectureBuilder<Archs...>::seq;
973  static constexpr int arch = static_cast<int>(Arch);
974  using seq =
975  decltype(append<ConditionOr, ConditionArchitecture, arch>(rest{}));
976 };
977 
978 template <architecture Arch> struct ConditionAnyArchitectureBuilder<Arch> {
979  static constexpr int arch = static_cast<int>(Arch);
980  using seq = std::integer_sequence<int, ConditionArchitecture, arch>;
981 };
982 
983 // Metaprogramming helper to construct a ConditionNot expression.
984 // ConditionNotBuilder<Exp>::seq" is an "std::integer_sequence" representing
985 // the expression.
986 template <typename Exp> struct ConditionNotBuilder {
987  template <int I, int... Is>
988  static auto append(std::integer_sequence<int, Is...>) {
989  return std::integer_sequence<int, I, Is...>{};
990  }
991  using rest = typename Exp::seq;
992  using seq = decltype(append<ConditionNot>(rest{}));
993 };
994 
995 // Metaprogramming helper to construct a ConditionAnd expression.
996 // "ConditionAndBuilder<Exp1, Exp2>::seq" is an "std::integer_sequence"
997 // representing the expression.
998 template <typename Exp1, typename Exp2> struct ConditionAndBuilder {
999  template <int I, int... I1s, int... I2s>
1000  static auto append(std::integer_sequence<int, I1s...>,
1001  std::integer_sequence<int, I2s...>) {
1002  return std::integer_sequence<int, I, I1s..., I2s...>{};
1003  }
1004  using rest1 = typename Exp1::seq;
1005  using rest2 = typename Exp2::seq;
1006  using seq = decltype(append<ConditionAnd>(rest1{}, rest2{}));
1007 };
1008 
1009 // Metaprogramming helper to construct a ConditionOr expression.
1010 // "ConditionOrBuilder<Exp1, Exp2>::seq" is an "std::integer_sequence"
1011 // representing the expression.
1012 template <typename Exp1, typename Exp2> struct ConditionOrBuilder {
1013  template <int I, int... I1s, int... I2s>
1014  static auto append(std::integer_sequence<int, I1s...>,
1015  std::integer_sequence<int, I2s...>) {
1016  return std::integer_sequence<int, I, I1s..., I2s...>{};
1017  }
1018  using rest1 = typename Exp1::seq;
1019  using rest2 = typename Exp2::seq;
1020  using seq = decltype(append<ConditionOr>(rest1{}, rest2{}));
1021 };
1022 
1023 // Helper function to call call_if_on_device_conditionally() while converting
1024 // the "std::integer_sequence" for a condition expression into individual
1025 // arguments of type int.
1026 template <typename T, int... Is>
1028  std::integer_sequence<int, Is...>) {
1030 }
1031 
1032 // Same sort of helper object for "else_if_architecture_is".
1033 template <typename MakeCallIf> class if_architecture_is_helper {
1034 public:
1035  template <architecture... Archs, typename T,
1036  typename = std::enable_if<std::is_invocable_v<T>>>
1038  using make_call_if =
1039  ConditionAndBuilder<MakeCallIf,
1041  using make_else_call_if = ConditionAndBuilder<
1042  MakeCallIf,
1044 
1045  using cond = typename make_call_if::seq;
1048  }
1049 
1050  template <typename T> void otherwise(T fn) {
1051  using cond = typename MakeCallIf::seq;
1053  }
1054 };
1055 
1056 } // namespace detail
1057 
1058 #ifdef SYCL_EXT_ONEAPI_DEVICE_ARCHITECTURE_NEW_DESIGN_IMPL
1059 template <architecture... Archs, typename T>
1060 static auto if_architecture_is(T fn) {
1061  using make_call_if = detail::ConditionAnyArchitectureBuilder<Archs...>;
1062  using make_else_call_if = detail::ConditionNotBuilder<make_call_if>;
1063 
1064  using cond = typename make_call_if::seq;
1066  return detail::if_architecture_is_helper<make_else_call_if>{};
1067 }
1068 #else
1072 template <architecture... Archs, typename T>
1073 constexpr static auto if_architecture_is(T fn) {
1074  static_assert(sycl::detail::allowable_aot_mode<Archs...>(),
1075  "The if_architecture_is function may only be used when AOT "
1076  "compiling with '-fsycl-targets=spir64_x86_64' or "
1077  "'-fsycl-targets=*_gpu_*'");
1078  if constexpr (sycl::detail::device_architecture_is<Archs...>()) {
1079  fn();
1080  return sycl::detail::if_architecture_helper<false>{};
1081  } else {
1082  (void)fn;
1083  return sycl::detail::if_architecture_helper<true>{};
1084  }
1085 }
1086 #endif // SYCL_EXT_ONEAPI_DEVICE_ARCHITECTURE_NEW_DESIGN_IMPL
1087 
1091 template <arch_category... Categories, typename T>
1092 constexpr static auto if_architecture_is(T fn) {
1094  Categories...>()) {
1095  fn();
1096  return sycl::detail::if_architecture_helper<false>{};
1097  } else {
1098  (void)fn;
1099  return sycl::detail::if_architecture_helper<true>{};
1100  }
1101 }
1102 
1106 template <architecture Arch, typename T>
1107 constexpr static auto if_architecture_is_lt(T fn) {
1108  if constexpr (sycl::detail::device_architecture_comparison_aot<Arch>(
1110  fn();
1111  return sycl::detail::if_architecture_helper<false>{};
1112  } else {
1113  (void)fn;
1114  return sycl::detail::if_architecture_helper<true>{};
1115  }
1116 }
1117 
1121 template <architecture Arch, typename T>
1122 constexpr static auto if_architecture_is_le(T fn) {
1123  if constexpr (sycl::detail::device_architecture_comparison_aot<Arch>(
1125  fn();
1126  return sycl::detail::if_architecture_helper<false>{};
1127  } else {
1128  (void)fn;
1129  return sycl::detail::if_architecture_helper<true>{};
1130  }
1131 }
1132 
1136 template <architecture Arch, typename T>
1137 constexpr static auto if_architecture_is_gt(T fn) {
1138  if constexpr (sycl::detail::device_architecture_comparison_aot<Arch>(
1140  fn();
1141  return sycl::detail::if_architecture_helper<false>{};
1142  } else {
1143  (void)fn;
1144  return sycl::detail::if_architecture_helper<true>{};
1145  }
1146 }
1147 
1151 template <architecture Arch, typename T>
1152 constexpr static auto if_architecture_is_ge(T fn) {
1153  if constexpr (sycl::detail::device_architecture_comparison_aot<Arch>(
1155  fn();
1156  return sycl::detail::if_architecture_helper<false>{};
1157  } else {
1158  (void)fn;
1159  return sycl::detail::if_architecture_helper<true>{};
1160  }
1161 }
1162 
1167 template <architecture Arch1, architecture Arch2, typename T>
1168 constexpr static auto if_architecture_is_between(T fn) {
1169  if constexpr (sycl::detail::device_architecture_comparison_aot<Arch1>(
1171  sycl::detail::device_architecture_comparison_aot<Arch2>(
1173  fn();
1174  return sycl::detail::if_architecture_helper<false>{};
1175  } else {
1176  (void)fn;
1177  return sycl::detail::if_architecture_helper<true>{};
1178  }
1179 }
1180 
1181 } // namespace ext::oneapi::experimental
1182 } // namespace _V1
1183 } // namespace sycl
constexpr auto else_if_architecture_is_lt(T fn)
The condition is true only if the object F comes from a previous call whose associated condition is f...
constexpr auto else_if_architecture_is_gt(T fn)
The condition is true only if the object F comes from a previous call whose associated condition is f...
constexpr auto else_if_architecture_is_le(T fn)
The condition is true only if the object F comes from a previous call whose associated condition is f...
constexpr auto else_if_architecture_is(T fn)
The condition is true only if the object F comes from a previous call whose associated condition is f...
constexpr auto else_if_architecture_is_between(T fn)
The condition is true only if the object F comes from a previous call whose associated condition is f...
constexpr auto else_if_architecture_is_ge(T fn)
The condition is true only if the object F comes from a previous call whose associated condition is f...
#define __SYCL_TARGET_INTEL_X86_64__
#define __SYCL_TARGET_INTEL_GPU_PVC_VG__
#define __SYCL_TARGET_AMD_GPU_GFX1102__
#define __SYCL_TARGET_AMD_GPU_GFX803__
#define __SYCL_TARGET_INTEL_GPU_GLK__
#define __SYCL_TARGET_AMD_GPU_GFX810__
#define __SYCL_TARGET_NVIDIA_GPU_SM61__
#define __SYCL_TARGET_INTEL_GPU_WHL__
#define __SYCL_TARGET_AMD_GPU_GFX802__
#define __SYCL_TARGET_AMD_GPU_GFX1100__
#define __SYCL_TARGET_NVIDIA_GPU_SM53__
#define __SYCL_TARGET_AMD_GPU_GFX90C__
#define __SYCL_TARGET_INTEL_GPU_CFL__
#define __SYCL_TARGET_AMD_GPU_GFX908__
#define __SYCL_TARGET_AMD_GPU_GFX90A__
#define __SYCL_TARGET_AMD_GPU_GFX1010__
#define __SYCL_TARGET_INTEL_GPU_DG1__
#define __SYCL_TARGET_AMD_GPU_GFX1031__
#define __SYCL_TARGET_AMD_GPU_GFX902__
#define __SYCL_TARGET_AMD_GPU_GFX1103__
#define __SYCL_TARGET_NVIDIA_GPU_SM89__
#define __SYCL_TARGET_INTEL_GPU_ACM_G11__
#define __SYCL_TARGET_INTEL_GPU_ACM_G10__
#define __SYCL_TARGET_INTEL_GPU_LNL_M__
#define __SYCL_TARGET_INTEL_GPU_PVC__
#define __SYCL_TARGET_AMD_GPU_GFX1033__
#define __SYCL_TARGET_AMD_GPU_GFX805__
#define __SYCL_TARGET_AMD_GPU_GFX1201__
#define __SYCL_TARGET_AMD_GPU_GFX1035__
#define __SYCL_TARGET_INTEL_GPU_MTL_H__
#define __SYCL_TARGET_AMD_GPU_GFX942__
#define __SYCL_TARGET_AMD_GPU_GFX1200__
#define __SYCL_TARGET_AMD_GPU_GFX1012__
#define __SYCL_TARGET_INTEL_GPU_KBL__
#define __SYCL_TARGET_AMD_GPU_GFX1151__
#define __SYCL_TARGET_INTEL_GPU_BMG_G21__
#define __SYCL_TARGET_AMD_GPU_GFX1101__
#define __SYCL_TARGET_INTEL_GPU_SKL__
#define __SYCL_TARGET_NVIDIA_GPU_SM50__
#define __SYCL_TARGET_AMD_GPU_GFX1011__
#define __SYCL_TARGET_AMD_GPU_GFX1030__
#define __SYCL_TARGET_AMD_GPU_GFX900__
#define __SYCL_TARGET_INTEL_GPU_ADL_S__
#define __SYCL_TARGET_INTEL_GPU_ADL_N__
#define __SYCL_TARGET_INTEL_GPU_EHL__
#define __SYCL_TARGET_AMD_GPU_GFX904__
#define __SYCL_TARGET_NVIDIA_GPU_SM52__
#define __SYCL_TARGET_AMD_GPU_GFX941__
#define __SYCL_TARGET_AMD_GPU_GFX1036__
#define __SYCL_TARGET_INTEL_GPU_ADL_P__
#define __SYCL_TARGET_AMD_GPU_GFX801__
#define __SYCL_TARGET_NVIDIA_GPU_SM80__
#define __SYCL_TARGET_INTEL_GPU_TGLLP__
#define __SYCL_TARGET_INTEL_GPU_RKL__
#define __SYCL_TARGET_NVIDIA_GPU_SM90__
#define __SYCL_TARGET_INTEL_GPU_AML__
#define __SYCL_TARGET_INTEL_GPU_ICLLP__
#define __SYCL_TARGET_NVIDIA_GPU_SM72__
#define __SYCL_TARGET_AMD_GPU_GFX906__
#define __SYCL_TARGET_INTEL_GPU_BDW__
#define __SYCL_TARGET_NVIDIA_GPU_SM86__
#define __SYCL_TARGET_AMD_GPU_GFX702__
#define __SYCL_TARGET_NVIDIA_GPU_SM60__
#define __SYCL_TARGET_INTEL_GPU_ACM_G12__
#define __SYCL_TARGET_AMD_GPU_GFX1013__
#define __SYCL_TARGET_AMD_GPU_GFX940__
#define __SYCL_TARGET_AMD_GPU_GFX1150__
#define __SYCL_TARGET_NVIDIA_GPU_SM62__
#define __SYCL_TARGET_INTEL_GPU_APL__
#define __SYCL_TARGET_AMD_GPU_GFX701__
#define __SYCL_TARGET_INTEL_GPU_ARL_H__
#define __SYCL_TARGET_AMD_GPU_GFX1034__
#define __SYCL_TARGET_INTEL_GPU_CML__
#define __SYCL_TARGET_INTEL_GPU_MTL_U__
#define __SYCL_TARGET_NVIDIA_GPU_SM70__
#define __SYCL_TARGET_AMD_GPU_GFX700__
#define __SYCL_TARGET_AMD_GPU_GFX1032__
#define __SYCL_TARGET_AMD_GPU_GFX909__
#define __SYCL_TARGET_NVIDIA_GPU_SM87__
#define __SYCL_TARGET_NVIDIA_GPU_SM75__
constexpr static bool allowable_aot_mode()
constexpr auto device_arch_compare_op_lt
static constexpr ext::oneapi::experimental::architecture min_amd_gpu_architecture
static constexpr std::optional< ext::oneapi::experimental::architecture > get_category_min_architecture(ext::oneapi::experimental::arch_category Category)
constexpr static bool device_architecture_comparison_aot(Compare comp)
constexpr static bool device_architecture_is_in_category_aot()
static constexpr ext::oneapi::experimental::architecture max_amd_gpu_architecture
static constexpr ext::oneapi::experimental::architecture min_intel_gpu_architecture
constexpr static bool device_architecture_is_in_categories()
static constexpr ext::oneapi::experimental::architecture min_nvidia_gpu_architecture
constexpr auto device_arch_compare_op_gt
constexpr static std::optional< ext::oneapi::experimental::arch_category > get_device_architecture_category(ext::oneapi::experimental::architecture arch)
constexpr static bool device_architecture_is()
constexpr static bool is_aot_for_architecture(ext::oneapi::experimental::architecture arch)
constexpr static std::optional< ext::oneapi::experimental::architecture > get_current_architecture_aot()
static constexpr std::optional< ext::oneapi::experimental::architecture > get_category_max_architecture(ext::oneapi::experimental::arch_category Category)
static constexpr ext::oneapi::experimental::architecture max_nvidia_gpu_architecture
constexpr auto device_arch_compare_op_ge
constexpr auto device_arch_compare_op_le
static constexpr bool is_allowable_aot_mode
static constexpr ext::oneapi::experimental::architecture max_intel_gpu_architecture
void call_if_on_device_conditionally_helper(T fn, std::integer_sequence< int, Is... >)
constexpr static auto if_architecture_is_lt(T fn)
The condition is true only if the device which executes the if_architecture_is_lt function has an arc...
constexpr static auto if_architecture_is(T fn)
The condition is true only if the device which executes the if_architecture_is function has any one o...
constexpr static auto if_architecture_is_between(T fn)
The condition is true only if the device which executes the if_architecture_is_between function has a...
constexpr static auto if_architecture_is_le(T fn)
The condition is true only if the device which executes the if_architecture_is_le function has an arc...
constexpr static auto if_architecture_is_ge(T fn)
The condition is true only if the device which executes the if_architecture_is_ge function has an arc...
constexpr static auto if_architecture_is_gt(T fn)
The condition is true only if the device which executes the if_architecture_is_gt function has an arc...
auto autodecltype(a) b
Definition: access.hpp:18
decltype(append< ConditionAnd >(rest1{}, rest2{})) seq
static auto append(std::integer_sequence< int, I1s... >, std::integer_sequence< int, I2s... >)
static auto append(std::integer_sequence< int, Is... >)
decltype(append< ConditionOr >(rest1{}, rest2{})) seq
static auto append(std::integer_sequence< int, I1s... >, std::integer_sequence< int, I2s... >)