10 #include "TargetInfo.h"
12 #include "llvm/ADT/SmallBitVector.h"
14 using namespace clang;
20 bool IsX86_MMXType(llvm::Type *IRType) {
22 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
23 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
24 IRType->getScalarSizeInBits() != 64;
30 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
31 .Cases(
"y",
"&y",
"^Ym",
true)
33 if (IsMMXCons && Ty->isVectorTy()) {
34 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedValue() !=
43 if (Constraint ==
"k") {
45 return llvm::FixedVectorType::get(Int1Ty, Ty->getScalarSizeInBits());
56 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
57 if (BT->getKind() == BuiltinType::LongDouble) {
59 &llvm::APFloat::x87DoubleExtended())
68 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
76 static bool isX86VectorCallAggregateSmallEnough(
uint64_t NumMembers) {
77 return NumMembers <= 4;
81 static ABIArgInfo getDirectX86Hva(llvm::Type*
T =
nullptr) {
84 AI.setCanBeFlattened(
false);
95 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()),
98 llvm::SmallBitVector IsPreassigned;
100 unsigned FreeRegs = 0;
101 unsigned FreeSSERegs = 0;
107 class X86_32ABIInfo :
public ABIInfo {
113 static const unsigned MinABIStackAlignInBytes = 4;
115 bool IsDarwinVectorABI;
116 bool IsRetSmallStructInRegABI;
117 bool IsWin32StructABI;
121 unsigned DefaultNumRegisterParameters;
123 static bool isRegisterSize(
unsigned Size) {
124 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
127 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
129 return isX86VectorTypeForVectorCall(getContext(), Ty);
132 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
133 uint64_t NumMembers)
const override {
135 return isX86VectorCallAggregateSmallEnough(NumMembers);
147 unsigned getTypeStackAlignInBytes(
QualType Ty,
unsigned Align)
const;
152 unsigned ArgIndex)
const;
158 bool shouldAggregateUseDirect(
QualType Ty, CCState &
State,
bool &InReg,
159 bool &NeedsPadding)
const;
160 bool shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const;
162 bool canExpandIndirectArgument(
QualType Ty)
const;
180 bool RetSmallStructInRegABI,
bool Win32StructABI,
181 unsigned NumRegisterParameters,
bool SoftFloatABI)
182 :
ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
183 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
184 IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
185 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
186 IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
187 CGT.getTarget().getTriple().isOSCygMing()),
188 DefaultNumRegisterParameters(NumRegisterParameters) {}
197 bool AsReturnValue)
const override {
202 return occupiesMoreThan(ComponentTys, 3);
209 bool RetSmallStructInRegABI,
bool Win32StructABI,
210 unsigned NumRegisterParameters,
bool SoftFloatABI)
212 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
213 NumRegisterParameters, SoftFloatABI)) {
214 SwiftInfo = std::make_unique<X86_32SwiftABIInfo>(CGT);
217 static bool isStructReturnInRegABI(
220 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
230 llvm::Value *
Address)
const override;
233 StringRef Constraint,
234 llvm::Type* Ty)
const override {
235 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
239 std::string &Constraints,
240 std::vector<llvm::Type *> &ResultRegTypes,
241 std::vector<llvm::Type *> &ResultTruncRegTypes,
242 std::vector<LValue> &ResultRegDests,
243 std::string &AsmString,
244 unsigned NumOutputs)
const override;
246 StringRef getARCRetainAutoreleasedReturnValueMarker()
const override {
247 return "movl\t%ebp, %ebp"
248 "\t\t// marker for objc_retainAutoreleaseReturnValue";
264 std::string &AsmString) {
266 llvm::raw_string_ostream OS(Buf);
268 while (Pos < AsmString.size()) {
269 size_t DollarStart = AsmString.find(
'$', Pos);
270 if (DollarStart == std::string::npos)
271 DollarStart = AsmString.size();
272 size_t DollarEnd = AsmString.find_first_not_of(
'$', DollarStart);
273 if (DollarEnd == std::string::npos)
274 DollarEnd = AsmString.size();
275 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
277 size_t NumDollars = DollarEnd - DollarStart;
278 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
280 size_t DigitStart = Pos;
281 if (AsmString[DigitStart] ==
'{') {
285 size_t DigitEnd = AsmString.find_first_not_of(
"0123456789", DigitStart);
286 if (DigitEnd == std::string::npos)
287 DigitEnd = AsmString.size();
288 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
289 unsigned OperandIndex;
290 if (!OperandStr.getAsInteger(10, OperandIndex)) {
291 if (OperandIndex >= FirstIn)
292 OperandIndex += NumNewOuts;
300 AsmString = std::move(OS.str());
304 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
306 std::vector<llvm::Type *> &ResultRegTypes,
307 std::vector<llvm::Type *> &ResultTruncRegTypes,
308 std::vector<LValue> &ResultRegDests, std::string &AsmString,
309 unsigned NumOutputs)
const {
314 if (!Constraints.empty())
316 if (RetWidth <= 32) {
317 Constraints +=
"={eax}";
318 ResultRegTypes.push_back(CGF.
Int32Ty);
322 ResultRegTypes.push_back(CGF.
Int64Ty);
326 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.
getLLVMContext(), RetWidth);
327 ResultTruncRegTypes.push_back(CoerceTy);
331 ResultRegDests.push_back(ReturnSlot);
338 bool X86_32ABIInfo::shouldReturnTypeInRegister(
QualType Ty,
344 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
350 if (Size == 64 || Size == 128)
365 return shouldReturnTypeInRegister(AT->getElementType(), Context);
369 if (!RT)
return false;
381 if (!shouldReturnTypeInRegister(FD->getType(), Context))
390 Ty = CTy->getElementType();
400 return Size == 32 || Size == 64;
405 for (
const auto *FD : RD->
fields()) {
415 if (FD->isBitField())
440 bool X86_32ABIInfo::canExpandIndirectArgument(
QualType Ty)
const {
447 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
448 if (!IsWin32StructABI) {
451 if (!CXXRD->isCLike())
455 if (CXXRD->isDynamicClass())
466 return Size == getContext().getTypeSize(Ty);
472 if (
State.FreeRegs) {
475 return getNaturalAlignIndirectInReg(RetTy);
477 return getNaturalAlignIndirect(RetTy,
false);
481 CCState &
State)
const {
487 if ((
State.CC == llvm::CallingConv::X86_VectorCall ||
488 State.CC == llvm::CallingConv::X86_RegCall) &&
489 isHomogeneousAggregate(RetTy,
Base, NumElts)) {
496 if (IsDarwinVectorABI) {
504 llvm::Type::getInt64Ty(getVMContext()), 2));
508 if ((Size == 8 || Size == 16 || Size == 32) ||
509 (Size == 64 && VT->getNumElements() == 1))
513 return getIndirectReturnResult(RetTy,
State);
523 return getIndirectReturnResult(RetTy,
State);
528 return getIndirectReturnResult(RetTy,
State);
539 llvm::Type::getHalfTy(getVMContext()), 2));
544 if (shouldReturnTypeInRegister(RetTy, getContext())) {
553 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
554 || SeltTy->hasPointerRepresentation())
562 return getIndirectReturnResult(RetTy,
State);
567 RetTy = EnumTy->getDecl()->getIntegerType();
570 if (EIT->getNumBits() > 64)
571 return getIndirectReturnResult(RetTy,
State);
577 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(
QualType Ty,
578 unsigned Align)
const {
581 if (Align <= MinABIStackAlignInBytes)
589 if (Ty->
isVectorType() && (Align == 16 || Align == 32 || Align == 64))
593 if (!IsDarwinVectorABI) {
595 return MinABIStackAlignInBytes;
603 return MinABIStackAlignInBytes;
607 CCState &
State)
const {
609 if (
State.FreeRegs) {
612 return getNaturalAlignIndirectInReg(Ty);
614 return getNaturalAlignIndirect(Ty,
false);
618 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
619 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
625 bool Realign = TypeAlign > StackAlign;
630 X86_32ABIInfo::Class X86_32ABIInfo::classify(
QualType Ty)
const {
643 bool X86_32ABIInfo::updateFreeRegs(
QualType Ty, CCState &
State)
const {
644 if (!IsSoftFloatABI) {
650 unsigned Size = getContext().getTypeSize(Ty);
651 unsigned SizeInRegs = (
Size + 31) / 32;
657 if (SizeInRegs >
State.FreeRegs) {
666 if (SizeInRegs >
State.FreeRegs || SizeInRegs > 2)
670 State.FreeRegs -= SizeInRegs;
674 bool X86_32ABIInfo::shouldAggregateUseDirect(
QualType Ty, CCState &
State,
676 bool &NeedsPadding)
const {
683 NeedsPadding =
false;
686 if (!updateFreeRegs(Ty,
State))
692 if (
State.CC == llvm::CallingConv::X86_FastCall ||
693 State.CC == llvm::CallingConv::X86_VectorCall ||
694 State.CC == llvm::CallingConv::X86_RegCall) {
695 if (getContext().getTypeSize(Ty) <= 32 &&
State.FreeRegs)
704 bool X86_32ABIInfo::shouldPrimitiveUseInReg(
QualType Ty, CCState &
State)
const {
705 bool IsPtrOrInt = (getContext().getTypeSize(Ty) <= 32) &&
709 if (!IsPtrOrInt && (
State.CC == llvm::CallingConv::X86_FastCall ||
710 State.CC == llvm::CallingConv::X86_VectorCall))
713 if (!updateFreeRegs(Ty,
State))
716 if (!IsPtrOrInt &&
State.CC == llvm::CallingConv::X86_RegCall)
734 for (
int I = 0, E = Args.size(); I < E; ++I) {
739 isHomogeneousAggregate(Ty,
Base, NumElts)) {
740 if (
State.FreeSSERegs >= NumElts) {
741 State.FreeSSERegs -= NumElts;
743 State.IsPreassigned.set(I);
750 unsigned ArgIndex)
const {
752 bool IsFastCall =
State.CC == llvm::CallingConv::X86_FastCall;
753 bool IsRegCall =
State.CC == llvm::CallingConv::X86_RegCall;
754 bool IsVectorCall =
State.CC == llvm::CallingConv::X86_VectorCall;
757 TypeInfo TI = getContext().getTypeInfo(Ty);
764 return getIndirectResult(Ty,
false,
State);
765 }
else if (
State.IsDelegateCall) {
781 if ((IsRegCall || IsVectorCall) &&
782 isHomogeneousAggregate(Ty,
Base, NumElts)) {
783 if (
State.FreeSSERegs >= NumElts) {
784 State.FreeSSERegs -= NumElts;
789 return getDirectX86Hva();
797 return getIndirectResult(Ty,
false,
State);
804 return getIndirectResult(Ty,
true,
State);
807 if (!IsWin32StructABI &&
isEmptyRecord(getContext(), Ty,
true))
810 llvm::LLVMContext &LLVMContext = getVMContext();
811 llvm::IntegerType *
Int32 = llvm::Type::getInt32Ty(LLVMContext);
812 bool NeedsPadding =
false;
814 if (shouldAggregateUseDirect(Ty,
State, InReg, NeedsPadding)) {
815 unsigned SizeInRegs = (TI.
Width + 31) / 32;
817 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
823 llvm::IntegerType *PaddingType = NeedsPadding ?
Int32 :
nullptr;
830 if (IsWin32StructABI &&
State.Required.isRequiredArg(ArgIndex)) {
831 unsigned AlignInBits = 0;
834 getContext().getASTRecordLayout(RT->
getDecl());
837 AlignInBits = TI.
Align;
839 if (AlignInBits > 32)
840 return getIndirectResult(Ty,
false,
State);
849 if (TI.
Width <= 4 * 32 && (!IsMCUABI ||
State.FreeRegs == 0) &&
850 canExpandIndirectArgument(Ty))
852 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
854 return getIndirectResult(Ty,
true,
State);
861 if (IsWin32StructABI) {
862 if (TI.
Width <= 512 &&
State.FreeSSERegs > 0) {
866 return getIndirectResult(Ty,
false,
State);
871 if (IsDarwinVectorABI) {
873 (TI.
Width == 64 && VT->getNumElements() == 1))
875 llvm::IntegerType::get(getVMContext(), TI.
Width));
878 if (IsX86_MMXType(CGT.ConvertType(Ty)))
886 Ty = EnumTy->getDecl()->getIntegerType();
888 bool InReg = shouldPrimitiveUseInReg(Ty,
State);
890 if (isPromotableIntegerTypeForABI(Ty)) {
897 if (EIT->getNumBits() <= 64) {
902 return getIndirectResult(Ty,
false,
State);
915 Ty = EnumTy->getDecl()->getIntegerType();
954 else if (
State.CC == llvm::CallingConv::X86_FastCall) {
956 State.FreeSSERegs = 3;
957 }
else if (
State.CC == llvm::CallingConv::X86_VectorCall) {
959 State.FreeSSERegs = 6;
962 else if (
State.CC == llvm::CallingConv::X86_RegCall) {
964 State.FreeSSERegs = 8;
965 }
else if (IsWin32StructABI) {
968 State.FreeRegs = DefaultNumRegisterParameters;
969 State.FreeSSERegs = 3;
971 State.FreeRegs = DefaultNumRegisterParameters;
978 if (
State.FreeRegs) {
991 if (
State.CC == llvm::CallingConv::X86_VectorCall)
992 runVectorCallFirstPass(FI,
State);
994 bool UsedInAlloca =
false;
996 for (
unsigned I = 0, E = Args.size(); I < E; ++I) {
998 if (
State.IsPreassigned.test(I))
1009 rewriteWithInAlloca(FI);
1018 assert(StackOffset.
isMultipleOf(WordSize) &&
"unaligned inalloca struct");
1023 bool IsIndirect =
false;
1027 llvm::Type *LLTy = CGT.ConvertTypeForMem(
Type);
1029 LLTy = llvm::PointerType::getUnqual(getVMContext());
1030 FrameFields.push_back(LLTy);
1031 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(
Type);
1035 StackOffset = FieldEnd.
alignTo(WordSize);
1036 if (StackOffset != FieldEnd) {
1037 CharUnits NumBytes = StackOffset - FieldEnd;
1038 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1039 Ty = llvm::ArrayType::get(Ty, NumBytes.
getQuantity());
1040 FrameFields.push_back(Ty);
1062 llvm_unreachable(
"invalid enum");
1065 void X86_32ABIInfo::rewriteWithInAlloca(
CGFunctionInfo &FI)
const {
1066 assert(IsWin32StructABI &&
"inalloca only supported on win32");
1081 if (
Ret.isIndirect() &&
Ret.isSRetAfterThis() && !IsThisCall &&
1083 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1088 if (
Ret.isIndirect() && !
Ret.getInReg()) {
1091 Ret.setInAllocaSRet(IsWin32StructABI);
1099 for (; I != E; ++I) {
1101 addFieldToArgStruct(FrameFields, StackOffset, I->
info, I->
type);
1104 FI.
setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1112 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1125 getTypeStackAlignInBytes(Ty,
TypeInfo.
Align.getQuantity()));
1132 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1134 assert(Triple.getArch() == llvm::Triple::x86);
1136 switch (Opts.getStructReturnConvention()) {
1145 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1148 switch (Triple.getOS()) {
1149 case llvm::Triple::DragonFly:
1150 case llvm::Triple::FreeBSD:
1151 case llvm::Triple::OpenBSD:
1152 case llvm::Triple::Win32:
1161 if (!FD->
hasAttr<AnyX86InterruptAttr>())
1164 llvm::Function *Fn = cast<llvm::Function>(GV);
1165 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1171 llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
1172 Fn->getContext(), ByValTy);
1173 Fn->addParamAttr(0, NewAttr);
1176 void X86_32TargetCodeGenInfo::setTargetAttributes(
1178 if (GV->isDeclaration())
1180 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1181 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1182 llvm::Function *Fn = cast<llvm::Function>(GV);
1183 Fn->addFnAttr(
"stackrealign");
1190 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1195 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.
Int8Ty, 4);
1206 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.
Int8Ty, 16);
1212 Builder.CreateAlignedStore(
1213 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.
Int8Ty,
Address, 9),
1219 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.
Int8Ty, 12);
1234 static unsigned getNativeVectorSizeForAVXABI(
X86AVXABILevel AVXLevel) {
1243 llvm_unreachable(
"Unknown AVXLevel");
1247 class X86_64ABIInfo :
public ABIInfo {
1268 static Class merge(Class Accum, Class Field);
1284 void postMerge(
unsigned AggregateSize, Class &Lo, Class &Hi)
const;
1313 bool isNamedArg,
bool IsRegCall =
false)
const;
1315 llvm::Type *GetByteVectorType(
QualType Ty)
const;
1316 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1317 unsigned IROffset,
QualType SourceTy,
1318 unsigned SourceOffset)
const;
1319 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1320 unsigned IROffset,
QualType SourceTy,
1321 unsigned SourceOffset)
const;
1337 unsigned &neededInt,
unsigned &neededSSE,
1339 bool IsRegCall =
false)
const;
1342 unsigned &NeededSSE,
1343 unsigned &MaxVectorWidth)
const;
1346 unsigned &NeededSSE,
1347 unsigned &MaxVectorWidth)
const;
1349 bool IsIllegalVectorType(
QualType Ty)
const;
1356 bool honorsRevision0_98()
const {
1357 return !getTarget().getTriple().isOSDarwin();
1362 bool classifyIntegerMMXAsSSE()
const {
1364 if (getContext().getLangOpts().getClangABICompat() <=
1368 const llvm::Triple &Triple = getTarget().getTriple();
1369 if (Triple.isOSDarwin() || Triple.isPS() || Triple.isOSFreeBSD())
1375 bool passInt128VectorsInMem()
const {
1377 if (getContext().getLangOpts().getClangABICompat() <=
1381 const llvm::Triple &
T = getTarget().getTriple();
1382 return T.isOSLinux() ||
T.isOSNetBSD();
1388 bool Has64BitPointers;
1392 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1393 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {}
1396 unsigned neededInt, neededSSE;
1402 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1403 return vectorTy->getPrimitiveSizeInBits().getFixedValue() > 128;
1415 bool has64BitPointers()
const {
1416 return Has64BitPointers;
1421 class WinX86_64ABIInfo :
public ABIInfo {
1424 :
ABIInfo(CGT), AVXLevel(AVXLevel),
1425 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1432 bool isHomogeneousAggregateBaseType(
QualType Ty)
const override {
1434 return isX86VectorTypeForVectorCall(getContext(), Ty);
1437 bool isHomogeneousAggregateSmallEnough(
const Type *Ty,
1438 uint64_t NumMembers)
const override {
1440 return isX86VectorCallAggregateSmallEnough(NumMembers);
1445 bool IsVectorCall,
bool IsRegCall)
const;
1459 std::make_unique<SwiftABIInfo>(CGT,
true);
1464 bool markARCOptimizedReturnCallsAsNoTail()
const override {
return true; }
1471 llvm::Value *
Address)
const override {
1472 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1481 StringRef Constraint,
1482 llvm::Type* Ty)
const override {
1483 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1486 bool isNoProtoCallVariadic(
const CallArgList &args,
1495 bool HasAVXType =
false;
1496 for (CallArgList::const_iterator
1497 it = args.begin(), ie = args.end(); it != ie; ++it) {
1498 if (getABIInfo<X86_64ABIInfo>().isPassedUsingAVXType(it->Ty)) {
1511 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1513 if (GV->isDeclaration())
1515 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1516 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1517 llvm::Function *Fn = cast<llvm::Function>(GV);
1518 Fn->addFnAttr(
"stackrealign");
1528 QualType ReturnType)
const override;
1533 llvm::StringMap<bool> &CallerMap,
1535 llvm::StringMap<bool> &CalleeMap,
1537 if (CalleeMap.empty() && CallerMap.empty()) {
1548 const llvm::StringMap<bool> &CallerMap,
1549 const llvm::StringMap<bool> &CalleeMap,
1552 bool CallerHasFeat = CallerMap.lookup(Feature);
1553 bool CalleeHasFeat = CalleeMap.lookup(Feature);
1554 if (!CallerHasFeat && !CalleeHasFeat)
1555 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
1556 << IsArgument << Ty << Feature;
1559 if (!CallerHasFeat || !CalleeHasFeat)
1560 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1561 << IsArgument << Ty << Feature;
1570 const llvm::StringMap<bool> &CallerMap,
1571 const llvm::StringMap<bool> &CalleeMap,
1573 bool Caller256 = CallerMap.lookup(
"avx512f") && !CallerMap.lookup(
"evex512");
1574 bool Callee256 = CalleeMap.lookup(
"avx512f") && !CalleeMap.lookup(
"evex512");
1578 if (Caller256 || Callee256)
1579 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
1580 << IsArgument << Ty <<
"evex512";
1583 "avx512f", IsArgument);
1588 const llvm::StringMap<bool> &CallerMap,
1589 const llvm::StringMap<bool> &CalleeMap,
QualType Ty,
1603 void X86_64TargetCodeGenInfo::checkFunctionCallABI(
CodeGenModule &CGM,
1612 llvm::StringMap<bool> CallerMap;
1613 llvm::StringMap<bool> CalleeMap;
1614 unsigned ArgIndex = 0;
1618 for (
const CallArg &Arg : Args) {
1626 if (Arg.getType()->isVectorType() &&
1632 if (ArgIndex < Callee->getNumParams())
1633 Ty =
Callee->getParamDecl(ArgIndex)->getType();
1636 CalleeMap, Ty,
true))
1644 if (
Callee->getReturnType()->isVectorType() &&
1648 CalleeMap,
Callee->getReturnType(),
1657 bool Quote = Lib.contains(
' ');
1658 std::string ArgStr = Quote ?
"\"" :
"";
1660 if (!Lib.ends_with_insensitive(
".lib") && !Lib.ends_with_insensitive(
".a"))
1662 ArgStr += Quote ?
"\"" :
"";
1667 class WinX86_32TargetCodeGenInfo :
public X86_32TargetCodeGenInfo {
1670 bool DarwinVectorABI,
bool RetSmallStructInRegABI,
bool Win32StructABI,
1671 unsigned NumRegisterParameters)
1672 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1673 Win32StructABI, NumRegisterParameters,
false) {}
1675 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1678 void getDependentLibraryOption(llvm::StringRef Lib,
1680 Opt =
"/DEFAULTLIB:";
1681 Opt += qualifyWindowsLibrary(Lib);
1684 void getDetectMismatchOption(llvm::StringRef Name,
1685 llvm::StringRef
Value,
1687 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1692 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
1694 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1695 if (GV->isDeclaration())
1697 addStackProbeTargetAttributes(D, GV, CGM);
1707 std::make_unique<SwiftABIInfo>(CGT,
true);
1710 void setTargetAttributes(
const Decl *D, llvm::GlobalValue *GV,
1718 llvm::Value *
Address)
const override {
1719 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.
Int8Ty, 8);
1727 void getDependentLibraryOption(llvm::StringRef Lib,
1729 Opt =
"/DEFAULTLIB:";
1730 Opt += qualifyWindowsLibrary(Lib);
1733 void getDetectMismatchOption(llvm::StringRef Name,
1734 llvm::StringRef
Value,
1736 Opt =
"/FAILIFMISMATCH:\"" + Name.str() +
"=" +
Value.str() +
"\"";
1741 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
1744 if (GV->isDeclaration())
1746 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1747 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1748 llvm::Function *Fn = cast<llvm::Function>(GV);
1749 Fn->addFnAttr(
"stackrealign");
1755 addStackProbeTargetAttributes(D, GV, CGM);
1758 void X86_64ABIInfo::postMerge(
unsigned AggregateSize, Class &Lo,
1783 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1785 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1787 if (Hi == SSEUp && Lo != SSE)
1791 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1815 assert((Accum != Memory && Accum != ComplexX87) &&
1816 "Invalid accumulated classification during merge.");
1817 if (Accum == Field || Field == NoClass)
1819 if (Field == Memory)
1821 if (Accum == NoClass)
1823 if (Accum == Integer || Field == Integer)
1825 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1826 Accum == X87 || Accum == X87Up)
1832 Class &Hi,
bool isNamedArg,
bool IsRegCall)
const {
1843 Class &Current = OffsetBase < 64 ? Lo : Hi;
1849 if (k == BuiltinType::Void) {
1851 }
else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1854 }
else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1857 k == BuiltinType::Float16 || k == BuiltinType::BFloat16) {
1859 }
else if (k == BuiltinType::Float128) {
1862 }
else if (k == BuiltinType::LongDouble) {
1863 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1864 if (LDF == &llvm::APFloat::IEEEquad()) {
1867 }
else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
1870 }
else if (LDF == &llvm::APFloat::IEEEdouble()) {
1873 llvm_unreachable(
"unexpected long double representation!");
1882 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1893 if (Has64BitPointers) {
1900 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1901 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1902 if (EB_FuncPtr != EB_ThisAdj) {
1916 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
1925 uint64_t EB_Lo = (OffsetBase) / 64;
1929 }
else if (Size == 64) {
1930 QualType ElementType = VT->getElementType();
1939 if (!classifyIntegerMMXAsSSE() &&
1950 if (OffsetBase && OffsetBase != 64)
1952 }
else if (Size == 128 ||
1953 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1954 QualType ElementType = VT->getElementType();
1957 if (passInt128VectorsInMem() &&
Size != 128 &&
1989 else if (Size <= 128)
1991 }
else if (ET->
isFloat16Type() || ET == getContext().FloatTy ||
1994 }
else if (ET == getContext().DoubleTy) {
1996 }
else if (ET == getContext().LongDoubleTy) {
1997 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1998 if (LDF == &llvm::APFloat::IEEEquad())
2000 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2001 Current = ComplexX87;
2002 else if (LDF == &llvm::APFloat::IEEEdouble())
2005 llvm_unreachable(
"unexpected long double representation!");
2010 uint64_t EB_Real = (OffsetBase) / 64;
2011 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2012 if (Hi == NoClass && EB_Real != EB_Imag)
2019 if (EITy->getNumBits() <= 64)
2021 else if (EITy->getNumBits() <= 128)
2036 if (!IsRegCall && Size > 512)
2043 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2049 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2050 uint64_t ArraySize = AT->getZExtSize();
2057 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2061 Class FieldLo, FieldHi;
2062 classify(AT->getElementType(),
Offset, FieldLo, FieldHi, isNamedArg);
2063 Lo = merge(Lo, FieldLo);
2064 Hi = merge(Hi, FieldHi);
2065 if (Lo == Memory || Hi == Memory)
2069 postMerge(Size, Lo, Hi);
2070 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp array classification.");
2100 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2101 for (
const auto &I : CXXRD->bases()) {
2102 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2103 "Unexpected base class!");
2112 Class FieldLo, FieldHi;
2115 classify(I.getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2116 Lo = merge(Lo, FieldLo);
2117 Hi = merge(Hi, FieldHi);
2118 if (Lo == Memory || Hi == Memory) {
2119 postMerge(Size, Lo, Hi);
2127 bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
2129 getContext().getTargetInfo().getTriple().isPS();
2130 bool IsUnion = RT->
isUnionType() && !UseClang11Compat;
2133 i != e; ++i, ++idx) {
2135 bool BitField = i->isBitField();
2138 if (BitField && i->isUnnamedBitField())
2151 ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
2152 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2154 postMerge(Size, Lo, Hi);
2159 Offset % getContext().getTypeAlign(i->getType().getCanonicalType());
2161 if (!BitField && IsInMemory) {
2163 postMerge(Size, Lo, Hi);
2173 Class FieldLo, FieldHi;
2179 assert(!i->isUnnamedBitField());
2187 assert(EB_Hi == EB_Lo &&
"Invalid classification, type > 16 bytes.");
2192 FieldHi = EB_Hi ?
Integer : NoClass;
2195 classify(i->getType(),
Offset, FieldLo, FieldHi, isNamedArg);
2196 Lo = merge(Lo, FieldLo);
2197 Hi = merge(Hi, FieldHi);
2198 if (Lo == Memory || Hi == Memory)
2202 postMerge(Size, Lo, Hi);
2212 Ty = EnumTy->getDecl()->getIntegerType();
2215 return getNaturalAlignIndirect(Ty);
2221 return getNaturalAlignIndirect(Ty);
2224 bool X86_64ABIInfo::IsIllegalVectorType(
QualType Ty)
const {
2227 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2228 if (Size <= 64 || Size > LargestVector)
2230 QualType EltTy = VecTy->getElementType();
2231 if (passInt128VectorsInMem() &&
2241 unsigned freeIntRegs)
const {
2254 Ty = EnumTy->getDecl()->getIntegerType();
2265 unsigned Align =
std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2288 if (freeIntRegs == 0) {
2293 if (Align == 8 && Size <= 64)
2303 llvm::Type *X86_64ABIInfo::GetByteVectorType(
QualType Ty)
const {
2309 llvm::Type *IRType = CGT.ConvertType(Ty);
2310 if (isa<llvm::VectorType>(IRType)) {
2313 if (passInt128VectorsInMem() &&
2314 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
2317 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
2324 if (IRType->getTypeID() == llvm::Type::FP128TyID)
2329 assert((Size == 128 || Size == 256 || Size == 512) &&
"Invalid type found!");
2333 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2350 if (TySize <= StartBit)
2355 unsigned NumElts = (
unsigned)AT->getZExtSize();
2358 for (
unsigned i = 0; i != NumElts; ++i) {
2360 unsigned EltOffset = i*EltSize;
2361 if (EltOffset >= EndBit)
break;
2363 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2365 EndBit-EltOffset, Context))
2377 if (
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2378 for (
const auto &I : CXXRD->bases()) {
2379 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2380 "Unexpected base class!");
2386 if (BaseOffset >= EndBit)
continue;
2388 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2390 EndBit-BaseOffset, Context))
2401 i != e; ++i, ++idx) {
2405 if (FieldOffset >= EndBit)
break;
2407 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2423 const llvm::DataLayout &TD) {
2424 if (IROffset == 0 && IRType->isFloatingPointTy())
2428 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2429 if (!STy->getNumContainedTypes())
2432 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2433 unsigned Elt = SL->getElementContainingOffset(IROffset);
2434 IROffset -= SL->getElementOffset(Elt);
2439 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2440 llvm::Type *EltTy = ATy->getElementType();
2441 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2442 IROffset -= IROffset / EltSize * EltSize;
2451 llvm::Type *X86_64ABIInfo::
2452 GetSSETypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2453 QualType SourceTy,
unsigned SourceOffset)
const {
2454 const llvm::DataLayout &TD = getDataLayout();
2455 unsigned SourceSize =
2456 (
unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
2458 if (!T0 || T0->isDoubleTy())
2459 return llvm::Type::getDoubleTy(getVMContext());
2462 llvm::Type *T1 =
nullptr;
2463 unsigned T0Size = TD.getTypeAllocSize(T0);
2464 if (SourceSize > T0Size)
2466 if (T1 ==
nullptr) {
2469 if (T0->is16bitFPTy() && SourceSize > 4)
2478 if (T0->isFloatTy() && T1->isFloatTy())
2479 return llvm::FixedVectorType::get(T0, 2);
2481 if (T0->is16bitFPTy() && T1->is16bitFPTy()) {
2482 llvm::Type *T2 =
nullptr;
2486 return llvm::FixedVectorType::get(T0, 2);
2487 return llvm::FixedVectorType::get(T0, 4);
2490 if (T0->is16bitFPTy() || T1->is16bitFPTy())
2491 return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
2493 return llvm::Type::getDoubleTy(getVMContext());
2511 llvm::Type *X86_64ABIInfo::
2512 GetINTEGERTypeAtOffset(llvm::Type *IRType,
unsigned IROffset,
2513 QualType SourceTy,
unsigned SourceOffset)
const {
2516 if (IROffset == 0) {
2518 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2519 IRType->isIntegerTy(64))
2528 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2529 IRType->isIntegerTy(32) ||
2530 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2531 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2532 cast<llvm::IntegerType>(IRType)->getBitWidth();
2535 SourceOffset*8+64, getContext()))
2540 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2542 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2543 if (IROffset < SL->getSizeInBytes()) {
2544 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2545 IROffset -= SL->getElementOffset(FieldIdx);
2547 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2548 SourceTy, SourceOffset);
2552 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2553 llvm::Type *EltTy = ATy->getElementType();
2554 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2555 unsigned EltOffset = IROffset/EltSize*EltSize;
2556 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2562 unsigned TySizeInBytes =
2563 (
unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2565 assert(TySizeInBytes != SourceOffset &&
"Empty field?");
2569 return llvm::IntegerType::get(getVMContext(),
2570 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2581 const llvm::DataLayout &TD) {
2586 unsigned LoSize = (
unsigned)TD.getTypeAllocSize(Lo);
2587 llvm::Align HiAlign = TD.getABITypeAlign(Hi);
2588 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2589 assert(HiStart != 0 && HiStart <= 8 &&
"Invalid x86-64 argument pair!");
2601 if (Lo->isHalfTy() || Lo->isFloatTy())
2602 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2604 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2605 &&
"Invalid/unknown lo type");
2606 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2610 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
2613 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2614 "Invalid x86-64 argument pair!");
2622 X86_64ABIInfo::Class Lo, Hi;
2623 classify(RetTy, 0, Lo, Hi,
true);
2626 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2627 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2629 llvm::Type *ResType =
nullptr;
2636 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2637 "Unknown missing lo part");
2642 llvm_unreachable(
"Invalid classification for lo word.");
2647 return getIndirectReturnResult(RetTy);
2652 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2656 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2659 RetTy = EnumTy->getDecl()->getIntegerType();
2662 isPromotableIntegerTypeForABI(RetTy))
2670 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2676 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2683 assert(Hi == ComplexX87 &&
"Unexpected ComplexX87 classification.");
2684 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2685 llvm::Type::getX86_FP80Ty(getVMContext()));
2689 llvm::Type *HighPart =
nullptr;
2695 llvm_unreachable(
"Invalid classification for hi word.");
2702 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2707 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2718 assert(Lo == SSE &&
"Unexpected SSEUp classification.");
2719 ResType = GetByteVectorType(RetTy);
2730 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2748 unsigned &neededInt,
unsigned &neededSSE,
2749 bool isNamedArg,
bool IsRegCall)
const {
2752 X86_64ABIInfo::Class Lo, Hi;
2753 classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
2757 assert((Hi != Memory || Lo == Memory) &&
"Invalid memory classification.");
2758 assert((Hi != SSEUp || Lo == SSE) &&
"Invalid SSEUp classification.");
2762 llvm::Type *ResType =
nullptr;
2769 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2770 "Unknown missing lo part");
2783 return getIndirectResult(Ty, freeIntRegs);
2787 llvm_unreachable(
"Invalid classification for lo word.");
2796 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2800 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2803 Ty = EnumTy->getDecl()->getIntegerType();
2806 isPromotableIntegerTypeForABI(Ty))
2816 llvm::Type *IRType = CGT.ConvertType(Ty);
2817 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2823 llvm::Type *HighPart =
nullptr;
2831 llvm_unreachable(
"Invalid classification for hi word.");
2833 case NoClass:
break;
2838 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2849 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2859 assert(Lo == SSE &&
"Unexpected SSEUp classification");
2860 ResType = GetByteVectorType(Ty);
2874 X86_64ABIInfo::classifyRegCallStructTypeImpl(
QualType Ty,
unsigned &NeededInt,
2875 unsigned &NeededSSE,
2876 unsigned &MaxVectorWidth)
const {
2878 assert(RT &&
"classifyRegCallStructType only valid with struct types");
2881 return getIndirectReturnResult(Ty);
2884 if (
auto CXXRD = dyn_cast<CXXRecordDecl>(RT->
getDecl())) {
2885 if (CXXRD->isDynamicClass()) {
2886 NeededInt = NeededSSE = 0;
2887 return getIndirectReturnResult(Ty);
2890 for (
const auto &I : CXXRD->bases())
2891 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
2894 NeededInt = NeededSSE = 0;
2895 return getIndirectReturnResult(Ty);
2903 if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
2906 NeededInt = NeededSSE = 0;
2907 return getIndirectReturnResult(Ty);
2910 unsigned LocalNeededInt, LocalNeededSSE;
2914 NeededInt = NeededSSE = 0;
2915 return getIndirectReturnResult(Ty);
2917 if (
const auto *AT = getContext().getAsConstantArrayType(MTy))
2918 MTy = AT->getElementType();
2920 if (getContext().getTypeSize(VT) > MaxVectorWidth)
2921 MaxVectorWidth = getContext().getTypeSize(VT);
2922 NeededInt += LocalNeededInt;
2923 NeededSSE += LocalNeededSSE;
2931 X86_64ABIInfo::classifyRegCallStructType(
QualType Ty,
unsigned &NeededInt,
2932 unsigned &NeededSSE,
2933 unsigned &MaxVectorWidth)
const {
2939 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
2953 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
2954 Win64ABIInfo.computeInfo(FI);
2958 bool IsRegCall =
CallingConv == llvm::CallingConv::X86_RegCall;
2961 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
2962 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
2963 unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
2970 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
2971 FreeIntRegs -= NeededInt;
2972 FreeSSERegs -= NeededSSE;
2980 getContext().LongDoubleTy)
2992 else if (NeededSSE && MaxVectorWidth > 0)
3004 it != ie; ++it, ++ArgNo) {
3005 bool IsNamedArg = ArgNo < NumRequiredArgs;
3007 if (IsRegCall && it->type->isStructureOrClassType())
3008 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
3012 NeededSSE, IsNamedArg);
3018 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3019 FreeIntRegs -= NeededInt;
3020 FreeSSERegs -= NeededSSE;
3024 it->info = getIndirectResult(it->type, FreeIntRegs);
3033 llvm::Value *overflow_arg_area =
3048 llvm::Value *Res = overflow_arg_area;
3057 llvm::ConstantInt::get(CGF.
Int32Ty, (SizeInBytes + 7) & ~7);
3059 Offset,
"overflow_arg_area.next");
3063 return Address(Res, LTy, Align);
3075 unsigned neededInt, neededSSE;
3087 if (!neededInt && !neededSSE)
3101 llvm::Value *InRegs =
nullptr;
3103 llvm::Value *gp_offset =
nullptr, *fp_offset =
nullptr;
3107 InRegs = llvm::ConstantInt::get(CGF.
Int32Ty, 48 - neededInt * 8);
3108 InRegs = CGF.
Builder.CreateICmpULE(gp_offset, InRegs,
"fits_in_gp");
3114 llvm::Value *FitsInFP =
3115 llvm::ConstantInt::get(CGF.
Int32Ty, 176 - neededSSE * 16);
3116 FitsInFP = CGF.
Builder.CreateICmpULE(fp_offset, FitsInFP,
"fits_in_fp");
3117 InRegs = InRegs ? CGF.
Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3123 CGF.
Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3144 if (neededInt && neededSSE) {
3146 assert(AI.
isDirect() &&
"Unexpected ABI info for mixed regs");
3150 assert(ST->getNumElements() == 2 &&
"Unexpected ABI info for mixed regs");
3151 llvm::Type *TyLo = ST->getElementType(0);
3152 llvm::Type *TyHi = ST->getElementType(1);
3153 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3154 "Unexpected ABI info for mixed regs");
3155 llvm::Value *GPAddr =
3157 llvm::Value *FPAddr =
3159 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3160 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3176 }
else if (neededInt) {
3181 auto TInfo = getContext().getTypeInfoInChars(Ty);
3182 uint64_t TySize = TInfo.Width.getQuantity();
3193 }
else if (neededSSE == 1) {
3197 assert(neededSSE == 2 &&
"Invalid number of needed registers!");
3230 llvm::Value *
Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededInt * 8);
3235 llvm::Value *
Offset = llvm::ConstantInt::get(CGF.
Int32Ty, neededSSE * 16);
3258 uint64_t Width = getContext().getTypeSize(Ty);
3259 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3267 ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
3273 isHomogeneousAggregate(Ty,
Base, NumElts) && FreeSSERegs >= NumElts) {
3274 FreeSSERegs -= NumElts;
3275 return getDirectX86Hva();
3281 bool IsReturnType,
bool IsVectorCall,
3282 bool IsRegCall)
const {
3288 Ty = EnumTy->getDecl()->getIntegerType();
3290 TypeInfo Info = getContext().getTypeInfo(Ty);
3296 if (!IsReturnType) {
3302 return getNaturalAlignIndirect(Ty,
false);
3310 if ((IsVectorCall || IsRegCall) &&
3311 isHomogeneousAggregate(Ty,
Base, NumElts)) {
3313 if (FreeSSERegs >= NumElts) {
3314 FreeSSERegs -= NumElts;
3320 }
else if (IsVectorCall) {
3321 if (FreeSSERegs >= NumElts &&
3323 FreeSSERegs -= NumElts;
3325 }
else if (IsReturnType) {
3337 llvm::Type *LLTy = CGT.ConvertType(Ty);
3338 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3345 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3346 return getNaturalAlignIndirect(Ty,
false);
3353 switch (BT->getKind()) {
3354 case BuiltinType::Bool:
3359 case BuiltinType::LongDouble:
3363 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3364 if (LDF == &llvm::APFloat::x87DoubleExtended())
3369 case BuiltinType::Int128:
3370 case BuiltinType::UInt128:
3380 llvm::Type::getInt64Ty(getVMContext()), 2));
3407 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
3408 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
3412 if (CC == llvm::CallingConv::X86_64_SysV) {
3413 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
3414 SysVABIInfo.computeInfo(FI);
3418 unsigned FreeSSERegs = 0;
3422 }
else if (IsRegCall) {
3429 IsVectorCall, IsRegCall);
3434 }
else if (IsRegCall) {
3439 unsigned ArgNum = 0;
3440 unsigned ZeroSSERegs = 0;
3445 unsigned *MaybeFreeSSERegs =
3446 (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
3448 classify(I.
type, *MaybeFreeSSERegs,
false, IsVectorCall, IsRegCall);
3456 I.
info = reclassifyHvaArgForVectorCall(I.
type, FreeSSERegs, I.
info);
3464 uint64_t Width = getContext().getTypeSize(Ty);
3465 bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3474 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3475 unsigned NumRegisterParameters,
bool SoftFloatABI) {
3476 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3478 return std::make_unique<X86_32TargetCodeGenInfo>(
3479 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3480 NumRegisterParameters, SoftFloatABI);
3484 CodeGenModule &CGM,
bool DarwinVectorABI,
bool Win32StructABI,
3485 unsigned NumRegisterParameters) {
3486 bool RetSmallStructInRegABI = X86_32TargetCodeGenInfo::isStructReturnInRegABI(
3488 return std::make_unique<WinX86_32TargetCodeGenInfo>(
3489 CGM.
getTypes(), DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
3490 NumRegisterParameters);
3493 std::unique_ptr<TargetCodeGenInfo>
3496 return std::make_unique<X86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
3499 std::unique_ptr<TargetCodeGenInfo>
3502 return std::make_unique<WinX86_64TargetCodeGenInfo>(CGM.
getTypes(), AVXLevel);
static bool checkAVX512ParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context)
static llvm::Type * getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset, const llvm::DataLayout &TD)
getFPTypeAtOffset - Return a floating point type at the specified offset.
static llvm::Type * GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi, const llvm::DataLayout &TD)
GetX86_64ByValArgumentPair - Given a high and low type that can ideally be used as elements of a two ...
static void rewriteInputConstraintReferences(unsigned FirstIn, unsigned NumNewOuts, std::string &AsmString)
Rewrite input constraint references after adding some output constraints.
static void initFeatureMaps(const ASTContext &Ctx, llvm::StringMap< bool > &CallerMap, const FunctionDecl *Caller, llvm::StringMap< bool > &CalleeMap, const FunctionDecl *Callee)
static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, bool IsArgument)
static bool checkAVXParamFeature(DiagnosticsEngine &Diag, SourceLocation CallLoc, const llvm::StringMap< bool > &CallerMap, const llvm::StringMap< bool > &CalleeMap, QualType Ty, StringRef Feature, bool IsArgument)
static ABIArgInfo classifyOpenCL(QualType Ty, ASTContext &Context)
static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD, uint64_t &Size)
static bool doOpenCLClassification(CGFunctionInfo &FI, ASTContext &Context)
static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD, uint64_t &Size)
static bool BitsContainNoUserData(QualType Ty, unsigned StartBit, unsigned EndBit, ASTContext &Context)
BitsContainNoUserData - Return true if the specified [start,end) bit range is known to either be off ...
static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF, Address VAListAddr, QualType Ty)
static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM)
static bool isArgInAlloca(const ABIArgInfo &Info)
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
__DEVICE__ int min(int __a, int __b)
__DEVICE__ int max(int __a, int __b)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
const ConstantArrayType * getAsConstantArrayType(QualType T) const
const LangOptions & getLangOpts() const
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const TargetInfo & getTargetInfo() const
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getRequiredAlignment() const
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
A fixed int type of a specified bitwidth.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
Represents a C++ struct/union/class.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
bool getIndirectByVal() const
static ABIArgInfo getInAlloca(unsigned FieldIndex, bool Indirect=false)
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
void setIndirectAlign(CharUnits IA)
static ABIArgInfo getExtendInReg(QualType Ty, llvm::Type *T=nullptr)
static ABIArgInfo getExpandWithPadding(bool PaddingInReg, llvm::Type *Padding)
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
bool canHaveCoerceToType() const
static ABIArgInfo getDirectInReg(llvm::Type *T=nullptr)
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
RecordArgABI
Specify how one should pass an argument of a record type.
@ RAA_Indirect
Pass it as a pointer to temporary memory.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
const_arg_iterator arg_begin() const
unsigned getRegParm() const
CanQualType getReturnType() const
bool getHasRegParm() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
void setArgStruct(llvm::StructType *Ty, CharUnits Align)
unsigned getMaxVectorWidth() const
Return the maximum vector width in the arguments.
ABIArgInfo & getReturnInfo()
unsigned getNumRequiredArgs() const
void setMaxVectorWidth(unsigned Width)
Set the maximum vector width in the arguments.
CallArgList - Type for representing both the value and type of arguments in a call.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::LLVMContext & getLLVMContext()
ASTContext & getContext() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
const CGFunctionInfo * CurFnInfo
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
const TargetInfo & getTarget() const
CodeGenTypes & getTypes()
const llvm::Triple & getTriple() const
DiagnosticsEngine & getDiags() const
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Address getAddress() const
void setAddress(Address address)
A class for recording the number of arguments that a function signature requires.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
virtual void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const
setTargetAttributes - Provides a convenient hook to handle extra target-specific attributes for the g...
static std::string qualifyWindowsLibrary(StringRef Lib)
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
QualType getElementType() const
Represents the canonical version of C arrays with a specified constant size.
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Decl - This represents one declaration (or definition), e.g.
Concrete class used by the front-end to report problems and issues.
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Represents a function declaration or definition.
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
const ParmVarDecl * getParamDecl(unsigned i) const
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
CallingConv getCallConv() const
@ Ver11
Attempt to be ABI-compatible with code generated by Clang 11.0.x (git 2e10b7a39b93).
@ Ver3_8
Attempt to be ABI-compatible with code generated by Clang 3.8.x (SVN r257626).
@ Ver9
Attempt to be ABI-compatible with code generated by Clang 9.0.x (SVN r351319).
A (possibly-)qualified type.
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
QualType getCanonicalType() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Encodes a location in the source.
const llvm::fltSemantics & getLongDoubleFormat() const
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
The base class of the type hierarchy.
bool isBlockPointerType() const
bool isFloat16Type() const
bool isPointerType() const
bool isReferenceType() const
bool isEnumeralType() const
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
bool isBitIntType() const
bool isSpecificBuiltinType(unsigned K) const
Test for a particular builtin type.
bool isBuiltinType() const
Helper methods to distinguish type categories.
bool isAnyComplexType() const
bool isMemberPointerType() const
bool isBFloat16Type() const
bool isMemberFunctionPointerType() const
bool isVectorType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isRecordType() const
bool hasPointerRepresentation() const
Whether this type is represented natively as a pointer.
Represents a GCC generic vector type.
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type * > types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
std::unique_ptr< TargetCodeGenInfo > createX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
std::unique_ptr< TargetCodeGenInfo > createWinX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters)
bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty)
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
X86AVXABILevel
The AVX ABI level for X86 targets.
bool isEmptyField(ASTContext &Context, const FieldDecl *FD, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyField - Return true iff a the field is "empty", that is it is an unnamed bit-field or an (arra...
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
const Type * isSingleElementStruct(QualType T, ASTContext &Context)
isSingleElementStruct - Determine if a structure is a "single element struct", i.e.
void AssignToArrayRange(CodeGen::CGBuilderTy &Builder, llvm::Value *Array, llvm::Value *Value, unsigned FirstIndex, unsigned LastIndex)
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createX86_32TargetCodeGenInfo(CodeGenModule &CGM, bool DarwinVectorABI, bool Win32StructABI, unsigned NumRegisterParameters, bool SoftFloatABI)
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
std::unique_ptr< TargetCodeGenInfo > createWinX86_64TargetCodeGenInfo(CodeGenModule &CGM, X86AVXABILevel AVXLevel)
bool isSIMDVectorType(ASTContext &Context, QualType Ty)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
The JSON file list parser is used to communicate input to InstallAPI.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
@ Class
The "class" keyword introduces the elaborated-type-specifier.
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty