clang  19.0.0git
NVPTX.cpp
Go to the documentation of this file.
1 //===- NVPTX.cpp ----------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 #include "llvm/IR/IntrinsicsNVPTX.h"
12 
13 using namespace clang;
14 using namespace clang::CodeGen;
15 
16 //===----------------------------------------------------------------------===//
17 // NVPTX ABI Implementation
18 //===----------------------------------------------------------------------===//
19 
20 namespace {
21 
22 class NVPTXTargetCodeGenInfo;
23 
24 class NVPTXABIInfo : public ABIInfo {
25  NVPTXTargetCodeGenInfo &CGInfo;
26 
27 public:
28  NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
29  : ABIInfo(CGT), CGInfo(Info) {}
30 
33 
34  void computeInfo(CGFunctionInfo &FI) const override;
35  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
36  QualType Ty) const override;
37  bool isUnsupportedType(QualType T) const;
38  ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
39 };
40 
41 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
42 public:
43  NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
44  : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
45 
46  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
47  CodeGen::CodeGenModule &M) const override;
48  bool shouldEmitStaticExternCAliases() const override;
49 
50  llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
51  llvm::PointerType *T,
52  QualType QT) const override;
53 
54  llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
55  // On the device side, surface reference is represented as an object handle
56  // in 64-bit integer.
57  return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
58  }
59 
60  llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
61  // On the device side, texture reference is represented as an object handle
62  // in 64-bit integer.
63  return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
64  }
65 
66  bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
67  LValue Src) const override {
68  emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
69  return true;
70  }
71 
72  bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
73  LValue Src) const override {
74  emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
75  return true;
76  }
77 
78  // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
79  // resulting MDNode to the nvvm.annotations MDNode.
80  static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
81  int Operand);
82 
83 private:
84  static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
85  LValue Src) {
86  llvm::Value *Handle = nullptr;
87  llvm::Constant *C =
88  llvm::dyn_cast<llvm::Constant>(Src.getAddress().emitRawPointer(CGF));
89  // Lookup `addrspacecast` through the constant pointer if any.
90  if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
91  C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
92  if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
93  // Load the handle from the specific global variable using
94  // `nvvm.texsurf.handle.internal` intrinsic.
95  Handle = CGF.EmitRuntimeCall(
96  CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
97  {GV->getType()}),
98  {GV}, "texsurf_handle");
99  } else
100  Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
101  CGF.EmitStoreOfScalar(Handle, Dst);
102  }
103 };
104 
105 /// Checks if the type is unsupported directly by the current target.
106 bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
107  ASTContext &Context = getContext();
108  if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
109  return true;
110  if (!Context.getTargetInfo().hasFloat128Type() &&
111  (T->isFloat128Type() ||
112  (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
113  return true;
114  if (const auto *EIT = T->getAs<BitIntType>())
115  return EIT->getNumBits() >
116  (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
117  if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
118  Context.getTypeSize(T) > 64U)
119  return true;
120  if (const auto *AT = T->getAsArrayTypeUnsafe())
121  return isUnsupportedType(AT->getElementType());
122  const auto *RT = T->getAs<RecordType>();
123  if (!RT)
124  return false;
125  const RecordDecl *RD = RT->getDecl();
126 
127  // If this is a C++ record, check the bases first.
128  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
129  for (const CXXBaseSpecifier &I : CXXRD->bases())
130  if (isUnsupportedType(I.getType()))
131  return true;
132 
133  for (const FieldDecl *I : RD->fields())
134  if (isUnsupportedType(I->getType()))
135  return true;
136  return false;
137 }
138 
139 /// Coerce the given type into an array with maximum allowed size of elements.
140 ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
141  unsigned MaxSize) const {
142  // Alignment and Size are measured in bits.
143  const uint64_t Size = getContext().getTypeSize(Ty);
144  const uint64_t Alignment = getContext().getTypeAlign(Ty);
145  const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
146  llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
147  const uint64_t NumElements = (Size + Div - 1) / Div;
148  return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
149 }
150 
152  if (RetTy->isVoidType())
153  return ABIArgInfo::getIgnore();
154 
155  if (getContext().getLangOpts().OpenMP &&
156  getContext().getLangOpts().OpenMPIsTargetDevice &&
157  isUnsupportedType(RetTy))
158  return coerceToIntArrayWithLimit(RetTy, 64);
159 
160  // note: this is different from default ABI
161  if (!RetTy->isScalarType())
162  return ABIArgInfo::getDirect();
163 
164  // Treat an enum type as its underlying type.
165  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
166  RetTy = EnumTy->getDecl()->getIntegerType();
167 
168  return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
170 }
171 
173  // Treat an enum type as its underlying type.
174  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
175  Ty = EnumTy->getDecl()->getIntegerType();
176 
177  // Return aggregates type as indirect by value
178  if (isAggregateTypeForABI(Ty)) {
179  // Under CUDA device compilation, tex/surf builtin types are replaced with
180  // object types and passed directly.
181  if (getContext().getLangOpts().CUDAIsDevice) {
183  return ABIArgInfo::getDirect(
184  CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
186  return ABIArgInfo::getDirect(
187  CGInfo.getCUDADeviceBuiltinTextureDeviceType());
188  }
189  return getNaturalAlignIndirect(Ty, /* byval */ true);
190  }
191 
192  if (const auto *EIT = Ty->getAs<BitIntType>()) {
193  if ((EIT->getNumBits() > 128) ||
194  (!getContext().getTargetInfo().hasInt128Type() &&
195  EIT->getNumBits() > 64))
196  return getNaturalAlignIndirect(Ty, /* byval */ true);
197  }
198 
199  return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
201 }
202 
203 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
204  if (!getCXXABI().classifyReturnType(FI))
206  for (auto &I : FI.arguments())
207  I.info = classifyArgumentType(I.type);
208 
209  // Always honor user-specified calling convention.
211  return;
212 
213  FI.setEffectiveCallingConvention(getRuntimeCC());
214 }
215 
216 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
217  QualType Ty) const {
218  llvm_unreachable("NVPTX does not support varargs");
219 }
220 
221 void NVPTXTargetCodeGenInfo::setTargetAttributes(
222  const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
223  if (GV->isDeclaration())
224  return;
225  const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
226  if (VD) {
227  if (M.getLangOpts().CUDA) {
229  addNVVMMetadata(GV, "surface", 1);
230  else if (VD->getType()->isCUDADeviceBuiltinTextureType())
231  addNVVMMetadata(GV, "texture", 1);
232  return;
233  }
234  }
235 
236  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
237  if (!FD) return;
238 
239  llvm::Function *F = cast<llvm::Function>(GV);
240 
241  // Perform special handling in OpenCL mode
242  if (M.getLangOpts().OpenCL || M.getLangOpts().SYCLIsDevice) {
243  // Use OpenCL function attributes to check for kernel functions
244  // By default, all functions are device functions
245  if (FD->hasAttr<OpenCLKernelAttr>()) {
246  // OpenCL __kernel functions get kernel metadata
247  // Create !{<func-ref>, metadata !"kernel", i32 1} node
248  addNVVMMetadata(F, "kernel", 1);
249  // And kernel functions are not subject to inlining
250  F->addFnAttr(llvm::Attribute::NoInline);
251  }
252  bool HasMaxWorkGroupSize = false;
253  bool HasMinWorkGroupPerCU = false;
254  if (const auto *MWGS = FD->getAttr<SYCLIntelMaxWorkGroupSizeAttr>()) {
255  auto MaxThreads = (*MWGS->getZDimVal()).getExtValue() *
256  (*MWGS->getYDimVal()).getExtValue() *
257  (*MWGS->getXDimVal()).getExtValue();
258  if (MaxThreads > 0) {
259  addNVVMMetadata(F, "maxntidx", MaxThreads);
260  HasMaxWorkGroupSize = true;
261  }
262  }
263 
264  auto attrValue = [&](Expr *E) {
265  const auto *CE = cast<ConstantExpr>(E);
266  std::optional<llvm::APInt> Val = CE->getResultAsAPSInt();
267  return Val->getZExtValue();
268  };
269 
270  if (const auto *MWGPCU =
271  FD->getAttr<SYCLIntelMinWorkGroupsPerComputeUnitAttr>()) {
272  if (!HasMaxWorkGroupSize && FD->hasAttr<OpenCLKernelAttr>()) {
273  M.getDiags().Report(D->getLocation(),
274  diag::warn_launch_bounds_missing_attr)
275  << MWGPCU << 0;
276  } else {
277  // The value is guaranteed to be > 0, pass it to the metadata.
278  addNVVMMetadata(F, "minnctapersm", attrValue(MWGPCU->getValue()));
279  HasMinWorkGroupPerCU = true;
280  }
281  }
282 
283  if (const auto *MWGPMP =
284  FD->getAttr<SYCLIntelMaxWorkGroupsPerMultiprocessorAttr>()) {
285  if ((!HasMaxWorkGroupSize || !HasMinWorkGroupPerCU) &&
286  FD->hasAttr<OpenCLKernelAttr>()) {
287  M.getDiags().Report(D->getLocation(),
288  diag::warn_launch_bounds_missing_attr)
289  << MWGPMP << 1;
290  } else {
291  // The value is guaranteed to be > 0, pass it to the metadata.
292  addNVVMMetadata(F, "maxclusterrank", attrValue(MWGPMP->getValue()));
293  }
294  }
295  }
296 
297  // Perform special handling in CUDA mode.
298  if (M.getLangOpts().CUDA) {
299  // CUDA __global__ functions get a kernel metadata entry. Since
300  // __global__ functions cannot be called from the device, we do not
301  // need to set the noinline attribute.
302  if (FD->hasAttr<CUDAGlobalAttr>()) {
303  // Create !{<func-ref>, metadata !"kernel", i32 1} node
304  addNVVMMetadata(F, "kernel", 1);
305  }
306  if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>())
308  }
309 
310  // Attach kernel metadata directly if compiling for NVPTX.
311  if (FD->hasAttr<NVPTXKernelAttr>()) {
312  addNVVMMetadata(F, "kernel", 1);
313  }
314 }
315 
316 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
317  StringRef Name, int Operand) {
318  llvm::Module *M = GV->getParent();
319  llvm::LLVMContext &Ctx = M->getContext();
320 
321  // Get "nvvm.annotations" metadata node
322  llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
323 
324  llvm::Metadata *MDVals[] = {
325  llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
326  llvm::ConstantAsMetadata::get(
327  llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
328  // Append metadata to nvvm.annotations
329  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
330 }
331 
332 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
333  return false;
334 }
335 
336 llvm::Constant *
337 NVPTXTargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
338  llvm::PointerType *PT,
339  QualType QT) const {
340  auto &Ctx = CGM.getContext();
341  if (PT->getAddressSpace() != Ctx.getTargetAddressSpace(LangAS::opencl_local))
342  return llvm::ConstantPointerNull::get(PT);
343 
344  auto NPT = llvm::PointerType::get(
345  PT->getContext(), Ctx.getTargetAddressSpace(LangAS::opencl_generic));
346  return llvm::ConstantExpr::getAddrSpaceCast(
347  llvm::ConstantPointerNull::get(NPT), PT);
348 }
349 }
350 
352  const CUDALaunchBoundsAttr *Attr,
353  int32_t *MaxThreadsVal,
354  int32_t *MinBlocksVal,
355  int32_t *MaxClusterRankVal) {
356  // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
357  llvm::APSInt MaxThreads(32);
358  MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(getContext());
359  if (MaxThreads > 0) {
360  if (MaxThreadsVal)
361  *MaxThreadsVal = MaxThreads.getExtValue();
362  if (F) {
363  // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
364  NVPTXTargetCodeGenInfo::addNVVMMetadata(F, "maxntidx",
365  MaxThreads.getExtValue());
366  }
367  }
368 
369  // min and max blocks is an optional argument for CUDALaunchBoundsAttr. If it
370  // was not specified in __launch_bounds__ or if the user specified a 0 value,
371  // we don't have to add a PTX directive.
372  if (Attr->getMinBlocks()) {
373  llvm::APSInt MinBlocks(32);
374  MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(getContext());
375  if (MinBlocks > 0) {
376  if (MinBlocksVal)
377  *MinBlocksVal = MinBlocks.getExtValue();
378  if (F) {
379  // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
380  NVPTXTargetCodeGenInfo::addNVVMMetadata(F, "minctasm",
381  MinBlocks.getExtValue());
382  }
383  }
384  }
385  if (Attr->getMaxBlocks()) {
386  llvm::APSInt MaxBlocks(32);
387  MaxBlocks = Attr->getMaxBlocks()->EvaluateKnownConstInt(getContext());
388  if (MaxBlocks > 0) {
389  if (MaxClusterRankVal)
390  *MaxClusterRankVal = MaxBlocks.getExtValue();
391  if (F) {
392  // Create !{<func-ref>, metadata !"maxclusterrank", i32 <val>} node
393  NVPTXTargetCodeGenInfo::addNVVMMetadata(F, "maxclusterrank",
394  MaxBlocks.getExtValue());
395  }
396  }
397  }
398 }
399 
400 std::unique_ptr<TargetCodeGenInfo>
402  return std::make_unique<NVPTXTargetCodeGenInfo>(CGM.getTypes());
403 }
llvm::APSInt APSInt
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:185
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2355
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:760
Attr - This represents one attribute.
Definition: Attr.h:46
A fixed int type of a specified bitwidth.
Definition: Type.h:7254
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Definition: ABIInfo.h:45
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
void setEffectiveCallingConvention(unsigned Value)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
This class organizes the cross-function state that is used while generating LLVM code.
void handleCUDALaunchBoundsAttr(llvm::Function *F, const CUDALaunchBoundsAttr *A, int32_t *MaxThreadsVal=nullptr, int32_t *MinBlocksVal=nullptr, int32_t *MaxClusterRankVal=nullptr)
Emit the IR encoding to attach the CUDA launch bounds attribute to F.
Definition: NVPTX.cpp:351
const LangOptions & getLangOpts() const
DiagnosticsEngine & getDiags() const
ASTContext & getContext() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
LValue - This represents an lvalue references.
Definition: CGValue.h:181
Address getAddress() const
Definition: CGValue.h:370
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:46
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
SourceLocation getLocation() const
Definition: DeclBase.h:445
bool hasAttr() const
Definition: DeclBase.h:583
T * getAttr() const
Definition: DeclBase.h:579
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1553
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5587
This represents one expression.
Definition: Expr.h:110
Represents a member of a struct/union/class.
Definition: Decl.h:3060
Represents a function declaration or definition.
Definition: Decl.h:1972
A (possibly-)qualified type.
Definition: Type.h:940
Represents a struct/union/class.
Definition: Decl.h:4171
field_range fields() const
Definition: Decl.h:4377
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5561
Encodes a location in the source.
virtual bool hasInt128Type() const
Determine whether the __int128 type is supported on this target.
Definition: TargetInfo.h:655
virtual bool hasFloat16Type() const
Determine whether the _Float16 type is supported on this target.
Definition: TargetInfo.h:696
virtual bool hasFloat128Type() const
Determine whether the __float128 type is supported on this target.
Definition: TargetInfo.h:693
bool isVoidType() const
Definition: Type.h:7939
bool isFloat16Type() const
Definition: Type.h:7948
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7979
bool isScalarType() const
Definition: Type.h:8038
bool isFloat128Type() const
Definition: Type.h:7964
bool isCUDADeviceBuiltinSurfaceType() const
Check if the type is the CUDA device builtin surface type.
Definition: Type.cpp:4951
bool isCUDADeviceBuiltinTextureType() const
Check if the type is the CUDA device builtin texture type.
Definition: Type.cpp:4958
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
Definition: Type.h:8213
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2265
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8160
QualType getType() const
Definition: Decl.h:718
Represents a variable declaration or definition.
Definition: Decl.h:919
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
std::unique_ptr< TargetCodeGenInfo > createNVPTXTargetCodeGenInfo(CodeGenModule &CGM)
Definition: NVPTX.cpp:401
bool isAggregateTypeForABI(QualType T)
bool Div(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:440
The JSON file list parser is used to communicate input to InstallAPI.
const FunctionProtoType * T
unsigned long uint64_t
Definition: Format.h:5433