33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Assumptions.h"
36 #include "llvm/IR/AttributeMask.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/FPAccuracy.h"
41 #include "llvm/IR/InlineAsm.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/Transforms/Utils/Local.h"
47 using namespace clang;
48 using namespace CodeGen;
59 case CC_Win64:
return llvm::CallingConv::Win64;
61 case CC_AAPCS:
return llvm::CallingConv::ARM_AAPCS;
62 case CC_AAPCS_VFP:
return llvm::CallingConv::ARM_AAPCS_VFP;
75 case CC_Swift:
return llvm::CallingConv::Swift;
77 case CC_M68kRTD:
return llvm::CallingConv::M68k_RTD;
131 unsigned totalArgs) {
133 assert(paramInfos.size() <= prefixArgs);
134 assert(proto->
getNumParams() + prefixArgs <= totalArgs);
136 paramInfos.reserve(totalArgs);
139 paramInfos.resize(prefixArgs);
143 paramInfos.push_back(ParamInfo);
145 if (ParamInfo.hasPassObjectSize())
146 paramInfos.emplace_back();
149 assert(paramInfos.size() <= totalArgs &&
150 "Did we forget to insert pass_object_size args?");
152 paramInfos.resize(totalArgs);
162 if (!FPT->hasExtParameterInfos()) {
163 assert(paramInfos.empty() &&
164 "We have paramInfos, but the prototype doesn't?");
165 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
169 unsigned PrefixSize = prefix.size();
173 prefix.reserve(prefix.size() + FPT->getNumParams());
175 auto ExtInfos = FPT->getExtParameterInfos();
176 assert(ExtInfos.size() == FPT->getNumParams());
177 for (
unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
178 prefix.push_back(FPT->getParamType(I));
179 if (ExtInfos[I].hasPassObjectSize())
202 FTP->getExtInfo(), paramInfos,
Required);
220 if (D->
hasAttr<FastCallAttr>())
226 if (D->
hasAttr<ThisCallAttr>())
229 if (D->
hasAttr<VectorCallAttr>())
235 if (PcsAttr *PCS = D->
getAttr<PcsAttr>())
238 if (D->
hasAttr<AArch64VectorPcsAttr>())
241 if (D->
hasAttr<AArch64SVEPcsAttr>())
244 if (D->
hasAttr<AMDGPUKernelCallAttr>())
247 if (D->
hasAttr<IntelOclBiccAttr>())
256 if (D->
hasAttr<PreserveMostAttr>())
259 if (D->
hasAttr<PreserveAllAttr>())
265 if (D->
hasAttr<PreserveNoneAttr>())
268 if (D->
hasAttr<RISCVVectorCCAttr>())
290 *
this,
true, argTypes,
297 if (FD->
hasAttr<CUDAGlobalAttr>()) {
310 assert(!isa<CXXConstructorDecl>(MD) &&
"wrong method for constructors!");
311 assert(!isa<CXXDestructorDecl>(MD) &&
"wrong method for destructors!");
332 !
Target.getCXXABI().hasConstructorVariants();
337 auto *MD = cast<CXXMethodDecl>(GD.
getDecl());
345 bool PassParams =
true;
347 if (
auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
350 if (
auto Inherited = CD->getInheritedConstructor())
362 if (!paramInfos.empty()) {
365 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.
Prefix,
368 paramInfos.append(AddedArgs.
Suffix,
373 (PassParams && MD->isVariadic() ?
RequiredArgs(argTypes.size())
383 argTypes, extInfo, paramInfos, required);
389 for (
auto &
arg : args)
397 for (
auto &
arg : args)
404 unsigned prefixArgs,
unsigned totalArgs) {
424 unsigned ExtraPrefixArgs,
425 unsigned ExtraSuffixArgs,
426 bool PassProtoArgs) {
429 for (
const auto &Arg : args)
433 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
438 FPT, TotalPrefixArgs + ExtraSuffixArgs)
452 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
459 ArgTypes, Info, ParamInfos,
Required);
467 if (MD->isImplicitObjectMemberFunction())
472 assert(isa<FunctionType>(FTy));
479 std::nullopt, noProto->getExtInfo(), {},
514 I->hasAttr<NoEscapeAttr>());
515 extParamInfos.push_back(extParamInfo);
522 if (
getContext().getLangOpts().ObjCAutoRefCount &&
523 MD->
hasAttr<NSReturnsRetainedAttr>())
549 if (isa<CXXConstructorDecl>(GD.
getDecl()) ||
550 isa<CXXDestructorDecl>(GD.
getDecl()))
563 assert(MD->
isVirtual() &&
"only methods have thunks");
580 ArgTys.push_back(*FTP->param_type_begin());
582 ArgTys.push_back(Context.
IntTy);
597 unsigned numExtraRequiredArgs,
599 assert(args.size() >= numExtraRequiredArgs);
609 if (proto->isVariadic())
612 if (proto->hasExtParameterInfos())
622 cast<FunctionNoProtoType>(fnType))) {
628 for (
const auto &
arg : args)
633 paramInfos, required);
645 chainCall ? 1 : 0, chainCall);
674 for (
const auto &Arg : args)
707 unsigned numPrefixArgs) {
708 assert(numPrefixArgs + 1 <= args.size() &&
709 "Emitting a call with less args than the required prefix?");
721 paramInfos, required);
733 assert(signature.
arg_size() <= args.size());
734 if (signature.
arg_size() == args.size())
739 if (!sigParamInfos.empty()) {
740 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
741 paramInfos.resize(args.size());
773 assert(llvm::all_of(argTypes,
777 llvm::FoldingSetNodeID
ID;
782 bool isDelegateCall =
785 info, paramInfos, required, resultType, argTypes);
787 void *insertPos =
nullptr;
795 if (CC == llvm::CallingConv::SPIR_KERNEL &&
803 info, paramInfos, resultType, argTypes, required);
804 FunctionInfos.InsertNode(FI, insertPos);
806 bool inserted = FunctionsBeingProcessed.insert(FI).second;
808 assert(inserted &&
"Recursively being processed?");
811 if (CC == llvm::CallingConv::SPIR_KERNEL) {
829 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() ==
nullptr)
832 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
833 assert(erased &&
"Not in set?");
839 bool chainCall,
bool delegateCall,
845 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
850 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
851 argTypes.size() + 1, paramInfos.size()));
854 FI->CallingConvention = llvmCC;
855 FI->EffectiveCallingConvention = llvmCC;
856 FI->ASTCallingConvention = info.
getCC();
857 FI->InstanceMethod = instanceMethod;
858 FI->ChainCall = chainCall;
859 FI->DelegateCall = delegateCall;
865 FI->Required = required;
868 FI->ArgStruct =
nullptr;
869 FI->ArgStructAlign = 0;
870 FI->NumArgs = argTypes.size();
871 FI->HasExtParameterInfos = !paramInfos.empty();
872 FI->getArgsBuffer()[0].
type = resultType;
873 FI->MaxVectorWidth = 0;
874 for (
unsigned i = 0, e = argTypes.size(); i != e; ++i)
875 FI->getArgsBuffer()[i + 1].
type = argTypes[i];
876 for (
unsigned i = 0, e = paramInfos.size(); i != e; ++i)
877 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
887 struct TypeExpansion {
888 enum TypeExpansionKind {
900 const TypeExpansionKind
Kind;
902 TypeExpansion(TypeExpansionKind K) :
Kind(K) {}
903 virtual ~TypeExpansion() {}
906 struct ConstantArrayExpansion : TypeExpansion {
911 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
912 static bool classof(
const TypeExpansion *TE) {
913 return TE->Kind == TEK_ConstantArray;
917 struct RecordExpansion : TypeExpansion {
924 : TypeExpansion(TEK_Record), Bases(
std::move(Bases)),
925 Fields(
std::move(Fields)) {}
926 static bool classof(
const TypeExpansion *TE) {
927 return TE->Kind == TEK_Record;
931 struct ComplexExpansion : TypeExpansion {
935 static bool classof(
const TypeExpansion *TE) {
940 struct NoExpansion : TypeExpansion {
941 NoExpansion() : TypeExpansion(TEK_None) {}
942 static bool classof(
const TypeExpansion *TE) {
943 return TE->Kind == TEK_None;
948 static std::unique_ptr<TypeExpansion>
951 return std::make_unique<ConstantArrayExpansion>(AT->getElementType(),
959 "Cannot expand structure with flexible array.");
966 for (
const auto *FD : RD->
fields()) {
967 if (FD->isZeroLengthBitField(Context))
969 assert(!FD->isBitField() &&
970 "Cannot expand structure with bit-field members.");
972 if (UnionSize < FieldSize) {
973 UnionSize = FieldSize;
978 Fields.push_back(LargestFD);
980 if (
const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
981 assert(!CXXRD->isDynamicClass() &&
982 "cannot expand vtable pointers in dynamic classes");
983 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
986 for (
const auto *FD : RD->
fields()) {
987 if (FD->isZeroLengthBitField(Context))
989 assert(!FD->isBitField() &&
990 "Cannot expand structure with bit-field members.");
991 Fields.push_back(FD);
994 return std::make_unique<RecordExpansion>(std::move(Bases),
998 return std::make_unique<ComplexExpansion>(CT->getElementType());
1000 return std::make_unique<NoExpansion>();
1005 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1008 if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1010 for (
auto BS : RExp->Bases)
1012 for (
auto FD : RExp->Fields)
1016 if (isa<ComplexExpansion>(Exp.get()))
1018 assert(isa<NoExpansion>(Exp.get()));
1026 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1027 for (
int i = 0, n = CAExp->NumElts; i < n; i++) {
1030 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1031 for (
auto BS : RExp->Bases)
1033 for (
auto FD : RExp->Fields)
1035 }
else if (
auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1040 assert(isa<NoExpansion>(Exp.get()));
1046 ConstantArrayExpansion *CAE,
1048 llvm::function_ref<
void(
Address)> Fn) {
1049 for (
int i = 0, n = CAE->NumElts; i < n; i++) {
1056 llvm::Function::arg_iterator &AI) {
1058 "Unexpected non-simple lvalue during struct expansion.");
1061 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1064 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1065 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1067 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1077 ExpandTypeFromArgs(BS->
getType(), SubLV, AI);
1079 for (
auto FD : RExp->Fields) {
1082 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1084 }
else if (isa<ComplexExpansion>(Exp.get())) {
1085 auto realValue = &*AI++;
1086 auto imagValue = &*AI++;
1091 assert(isa<NoExpansion>(Exp.get()));
1092 llvm::Value *Arg = &*AI++;
1099 if (Arg->getType()->isPointerTy()) {
1108 void CodeGenFunction::ExpandTypeToArgs(
1112 if (
auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1116 *
this, CAExp, Addr, [&](
Address EltAddr) {
1120 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1123 }
else if (
auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1134 ExpandTypeToArgs(BS->
getType(), BaseArg, IRFuncTy, IRCallArgs,
1139 for (
auto FD : RExp->Fields) {
1142 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1145 }
else if (isa<ComplexExpansion>(Exp.get())) {
1147 IRCallArgs[IRCallArgPos++] = CV.first;
1148 IRCallArgs[IRCallArgPos++] = CV.second;
1150 assert(isa<NoExpansion>(Exp.get()));
1152 assert(RV.isScalar() &&
1153 "Unexpected non-scalar rvalue during struct expansion.");
1156 llvm::Value *
V = RV.getScalarVal();
1157 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1158 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1159 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(IRCallArgPos));
1161 IRCallArgs[IRCallArgPos++] =
V;
1169 const Twine &Name =
"tmp") {
1183 llvm::StructType *SrcSTy,
1186 if (SrcSTy->getNumElements() == 0)
return SrcPtr;
1196 if (FirstEltSize < DstSize &&
1205 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1221 if (Val->getType() == Ty)
1224 if (isa<llvm::PointerType>(Val->getType())) {
1226 if (isa<llvm::PointerType>(Ty))
1227 return CGF.
Builder.CreateBitCast(Val, Ty,
"coerce.val");
1233 llvm::Type *DestIntTy = Ty;
1234 if (isa<llvm::PointerType>(DestIntTy))
1237 if (Val->getType() != DestIntTy) {
1239 if (DL.isBigEndian()) {
1242 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1243 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1245 if (SrcSize > DstSize) {
1246 Val = CGF.
Builder.CreateLShr(Val, SrcSize - DstSize,
"coerce.highbits");
1247 Val = CGF.
Builder.CreateTrunc(Val, DestIntTy,
"coerce.val.ii");
1249 Val = CGF.
Builder.CreateZExt(Val, DestIntTy,
"coerce.val.ii");
1250 Val = CGF.
Builder.CreateShl(Val, DstSize - SrcSize,
"coerce.highbits");
1254 Val = CGF.
Builder.CreateIntCast(Val, DestIntTy,
false,
"coerce.val.ii");
1258 if (isa<llvm::PointerType>(Ty))
1259 Val = CGF.
Builder.CreateIntToPtr(Val, Ty,
"coerce.val.ip");
1282 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1284 DstSize.getFixedValue(), CGF);
1292 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1293 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1299 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1300 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1314 if (
auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1315 if (
auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1318 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
1319 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
1320 FixedSrcTy->getElementType()->isIntegerTy(8)) {
1321 ScalableDstTy = llvm::ScalableVectorType::get(
1322 FixedSrcTy->getElementType(),
1323 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
1325 if (ScalableDstTy->getElementType() == FixedSrcTy->getElementType()) {
1327 auto *UndefVec = llvm::UndefValue::get(ScalableDstTy);
1328 auto *Zero = llvm::Constant::getNullValue(CGF.
CGM.
Int64Ty);
1329 llvm::Value *Result = CGF.
Builder.CreateInsertVector(
1330 ScalableDstTy, UndefVec,
Load, Zero,
"cast.scalable");
1331 if (ScalableDstTy != Ty)
1332 Result = CGF.
Builder.CreateBitCast(Result, Ty);
1344 llvm::ConstantInt::get(CGF.
IntPtrTy, SrcSize.getKnownMinValue()));
1353 bool DestIsVolatile) {
1355 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1356 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1358 llvm::Value *Elt =
Builder.CreateExtractValue(Val, i);
1376 llvm::Type *SrcTy = Src->getType();
1378 if (SrcTy == DstTy) {
1385 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1387 SrcSize.getFixedValue(), CGF);
1391 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1392 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1393 if (SrcPtrTy && DstPtrTy &&
1394 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1402 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1403 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1412 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1413 isa<llvm::ScalableVectorType>(DstTy) ||
1414 SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
1433 llvm::ConstantInt::get(CGF.
IntPtrTy, DstSize.getFixedValue()));
1452 class ClangToLLVMArgMapping {
1453 static const unsigned InvalidIndex = ~0
U;
1454 unsigned InallocaArgNo;
1456 unsigned TotalIRArgs;
1460 unsigned PaddingArgIndex;
1463 unsigned FirstArgIndex;
1464 unsigned NumberOfArgs;
1467 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1475 bool OnlyRequiredArgs =
false)
1476 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1477 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1478 construct(Context, FI, OnlyRequiredArgs);
1481 bool hasInallocaArg()
const {
return InallocaArgNo != InvalidIndex; }
1482 unsigned getInallocaArgNo()
const {
1483 assert(hasInallocaArg());
1484 return InallocaArgNo;
1487 bool hasSRetArg()
const {
return SRetArgNo != InvalidIndex; }
1488 unsigned getSRetArgNo()
const {
1489 assert(hasSRetArg());
1493 unsigned totalIRArgs()
const {
return TotalIRArgs; }
1495 bool hasPaddingArg(
unsigned ArgNo)
const {
1496 assert(ArgNo < ArgInfo.size());
1497 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1499 unsigned getPaddingArgNo(
unsigned ArgNo)
const {
1500 assert(hasPaddingArg(ArgNo));
1501 return ArgInfo[ArgNo].PaddingArgIndex;
1506 std::pair<unsigned, unsigned> getIRArgs(
unsigned ArgNo)
const {
1507 assert(ArgNo < ArgInfo.size());
1508 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1509 ArgInfo[ArgNo].NumberOfArgs);
1514 bool OnlyRequiredArgs);
1517 void ClangToLLVMArgMapping::construct(
const ASTContext &Context,
1519 bool OnlyRequiredArgs) {
1520 unsigned IRArgNo = 0;
1521 bool SwapThisWithSRet =
false;
1526 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1537 auto &IRArgs = ArgInfo[ArgNo];
1540 IRArgs.PaddingArgIndex = IRArgNo++;
1546 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.
getCoerceToType());
1548 IRArgs.NumberOfArgs = STy->getNumElements();
1550 IRArgs.NumberOfArgs = 1;
1556 IRArgs.NumberOfArgs = 1;
1561 IRArgs.NumberOfArgs = 0;
1571 if (IRArgs.NumberOfArgs > 0) {
1572 IRArgs.FirstArgIndex = IRArgNo;
1573 IRArgNo += IRArgs.NumberOfArgs;
1578 if (IRArgNo == 1 && SwapThisWithSRet)
1581 assert(ArgNo == ArgInfo.size());
1584 InallocaArgNo = IRArgNo++;
1586 TotalIRArgs = IRArgNo;
1594 return RI.
isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1609 switch (BT->getKind()) {
1614 case BuiltinType::Double:
1616 case BuiltinType::LongDouble:
1627 if (BT->getKind() == BuiltinType::LongDouble)
1640 llvm::FunctionType *
1643 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1645 assert(Inserted &&
"Recursively being processed?");
1647 llvm::Type *resultType =
nullptr;
1652 llvm_unreachable(
"Invalid ABI kind for return argument");
1664 resultType = llvm::PointerType::get(
getLLVMContext(), addressSpace);
1680 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI,
true);
1684 if (IRFunctionArgs.hasSRetArg()) {
1687 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1692 if (IRFunctionArgs.hasInallocaArg())
1693 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1700 for (; it != ie; ++it, ++ArgNo) {
1704 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1705 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1708 unsigned FirstIRArg, NumIRArgs;
1709 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1714 assert(NumIRArgs == 0);
1718 assert(NumIRArgs == 1);
1720 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1724 assert(NumIRArgs == 1);
1725 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1733 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1735 assert(NumIRArgs == st->getNumElements());
1736 for (
unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1737 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1739 assert(NumIRArgs == 1);
1740 ArgTypes[FirstIRArg] = argType;
1746 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1748 *ArgTypesIter++ = EltTy;
1750 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1755 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1757 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1762 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1763 assert(Erased &&
"Not in set?");
1765 return llvm::FunctionType::get(resultType, ArgTypes, FI.
isVariadic());
1779 llvm::AttrBuilder &FuncAttrs,
1786 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1790 FuncAttrs.addAttribute(
"aarch64_pstate_sm_enabled");
1792 FuncAttrs.addAttribute(
"aarch64_pstate_sm_compatible");
1796 FuncAttrs.addAttribute(
"aarch64_preserves_za");
1798 FuncAttrs.addAttribute(
"aarch64_in_za");
1800 FuncAttrs.addAttribute(
"aarch64_out_za");
1802 FuncAttrs.addAttribute(
"aarch64_inout_za");
1806 FuncAttrs.addAttribute(
"aarch64_preserves_zt0");
1808 FuncAttrs.addAttribute(
"aarch64_in_zt0");
1810 FuncAttrs.addAttribute(
"aarch64_out_zt0");
1812 FuncAttrs.addAttribute(
"aarch64_inout_zt0");
1816 const Decl *Callee) {
1822 for (
const OMPAssumeAttr *AA : Callee->specific_attrs<OMPAssumeAttr>())
1823 AA->getAssumption().split(Attrs,
",");
1826 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1827 llvm::join(Attrs.begin(), Attrs.end(),
","));
1836 if (
const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1837 return ClassDecl->hasTrivialDestructor();
1843 const Decl *TargetDecl) {
1849 if (
Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1853 if (!
Module.getLangOpts().CPlusPlus)
1856 if (
const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1857 if (FDecl->isExternC())
1859 }
else if (
const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1861 if (VDecl->isExternC())
1869 return Module.getCodeGenOpts().StrictReturn ||
1870 !
Module.MayDropFunctionReturn(
Module.getContext(), RetTy) ||
1871 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1875 return llvm::StringSwitch<llvm::fp::FPAccuracy>(FPAccuracyStr)
1876 .Case(
"high", llvm::fp::FPAccuracy::High)
1877 .Case(
"medium", llvm::fp::FPAccuracy::Medium)
1878 .Case(
"low", llvm::fp::FPAccuracy::Low)
1879 .Case(
"sycl", llvm::fp::FPAccuracy::SYCL)
1880 .Case(
"cuda", llvm::fp::FPAccuracy::CUDA);
1884 assert(FPAccuracyStr ==
"high" || FPAccuracyStr ==
"medium" ||
1885 FPAccuracyStr ==
"low" || FPAccuracyStr ==
"sycl" ||
1886 FPAccuracyStr ==
"cuda");
1887 return llvm::StringSwitch<int32_t>(FPAccuracyStr)
1895 void CodeGenModule::getDefaultFunctionFPAccuracyAttributes(
1896 StringRef Name, llvm::AttrBuilder &FuncAttrs, llvm::Metadata *&MD,
1897 unsigned ID,
const llvm::Type *FuncType) {
1908 if (FuncMapIt !=
getLangOpts().FPAccuracyFuncMap.end()) {
1909 StringRef FPAccuracyVal = llvm::fp::getAccuracyForFPBuiltin(
1911 assert(!FPAccuracyVal.empty() &&
"A valid accuracy value is expected");
1912 FuncAttrs.addAttribute(
"fpbuiltin-max-error", FPAccuracyVal);
1913 MD = llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1917 if (FuncAttrs.attrs().size() == 0)
1919 StringRef FPAccuracyVal = llvm::fp::getAccuracyForFPBuiltin(
1921 assert(!FPAccuracyVal.empty() &&
"A valid accuracy value is expected");
1922 FuncAttrs.addAttribute(
"fpbuiltin-max-error", FPAccuracyVal);
1923 MD = llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1932 llvm::DenormalMode FP32DenormalMode,
1933 llvm::AttrBuilder &FuncAttrs) {
1934 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1935 FuncAttrs.addAttribute(
"denormal-fp-math", FPDenormalMode.str());
1937 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1938 FuncAttrs.addAttribute(
"denormal-fp-math-f32", FP32DenormalMode.str());
1946 llvm::AttrBuilder &FuncAttrs) {
1952 StringRef Name,
bool HasOptnone,
const CodeGenOptions &CodeGenOpts,
1954 llvm::AttrBuilder &FuncAttrs) {
1957 if (CodeGenOpts.OptimizeSize)
1958 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1959 if (CodeGenOpts.OptimizeSize == 2)
1960 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1963 if (CodeGenOpts.DisableRedZone)
1964 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1965 if (CodeGenOpts.IndirectTlsSegRefs)
1966 FuncAttrs.addAttribute(
"indirect-tls-seg-refs");
1967 if (CodeGenOpts.NoImplicitFloat)
1968 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1970 if (AttrOnCallSite) {
1975 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1977 FuncAttrs.addAttribute(
"trap-func-name", CodeGenOpts.
TrapFuncName);
1979 switch (CodeGenOpts.getFramePointer()) {
1985 FuncAttrs.addAttribute(
"frame-pointer",
1987 CodeGenOpts.getFramePointer()));
1990 if (CodeGenOpts.LessPreciseFPMAD)
1991 FuncAttrs.addAttribute(
"less-precise-fpmad",
"true");
1993 if (CodeGenOpts.NullPointerIsValid)
1994 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1997 FuncAttrs.addAttribute(
"no-trapping-math",
"true");
2001 if (LangOpts.NoHonorInfs)
2002 FuncAttrs.addAttribute(
"no-infs-fp-math",
"true");
2003 if (LangOpts.NoHonorNaNs)
2004 FuncAttrs.addAttribute(
"no-nans-fp-math",
"true");
2005 if (LangOpts.ApproxFunc)
2006 FuncAttrs.addAttribute(
"approx-func-fp-math",
"true");
2007 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
2008 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
2009 (LangOpts.getDefaultFPContractMode() ==
2010 LangOptions::FPModeKind::FPM_Fast ||
2011 LangOpts.getDefaultFPContractMode() ==
2012 LangOptions::FPModeKind::FPM_FastHonorPragmas))
2013 FuncAttrs.addAttribute(
"unsafe-fp-math",
"true");
2014 if (CodeGenOpts.SoftFloat)
2015 FuncAttrs.addAttribute(
"use-soft-float",
"true");
2016 FuncAttrs.addAttribute(
"stack-protector-buffer-size",
2017 llvm::utostr(CodeGenOpts.SSPBufferSize));
2018 if (LangOpts.NoSignedZero)
2019 FuncAttrs.addAttribute(
"no-signed-zeros-fp-math",
"true");
2022 const std::vector<std::string> &Recips = CodeGenOpts.
Reciprocals;
2023 if (!Recips.empty())
2024 FuncAttrs.addAttribute(
"reciprocal-estimates",
2025 llvm::join(Recips,
","));
2029 FuncAttrs.addAttribute(
"prefer-vector-width",
2032 if (CodeGenOpts.StackRealignment)
2033 FuncAttrs.addAttribute(
"stackrealign");
2034 if (CodeGenOpts.Backchain)
2035 FuncAttrs.addAttribute(
"backchain");
2036 if (CodeGenOpts.EnableSegmentedStacks)
2037 FuncAttrs.addAttribute(
"split-stack");
2039 if (CodeGenOpts.SpeculativeLoadHardening)
2040 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2043 switch (CodeGenOpts.getZeroCallUsedRegs()) {
2044 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
2045 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2047 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
2048 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr-arg");
2050 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
2051 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-gpr");
2053 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
2054 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used-arg");
2057 FuncAttrs.addAttribute(
"zero-call-used-regs",
"used");
2059 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
2060 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr-arg");
2062 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
2063 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-gpr");
2065 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
2066 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all-arg");
2069 FuncAttrs.addAttribute(
"zero-call-used-regs",
"all");
2080 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2085 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2086 LangOpts.SYCLIsDevice) {
2087 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2091 StringRef Var,
Value;
2093 FuncAttrs.addAttribute(Var,
Value);
2104 const llvm::Function &F,
2106 auto FFeatures = F.getFnAttribute(
"target-features");
2108 llvm::StringSet<> MergedNames;
2110 MergedFeatures.reserve(TargetOpts.
Features.size());
2112 auto AddUnmergedFeatures = [&](
auto &&FeatureRange) {
2113 for (StringRef Feature : FeatureRange) {
2114 if (Feature.empty())
2116 assert(Feature[0] ==
'+' || Feature[0] ==
'-');
2117 StringRef Name = Feature.drop_front(1);
2118 bool Merged = !MergedNames.insert(Name).second;
2120 MergedFeatures.push_back(Feature);
2124 if (FFeatures.isValid())
2125 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(),
','));
2126 AddUnmergedFeatures(TargetOpts.
Features);
2128 if (!MergedFeatures.empty()) {
2129 llvm::sort(MergedFeatures);
2130 FuncAttr.addAttribute(
"target-features", llvm::join(MergedFeatures,
","));
2137 bool WillInternalize) {
2139 llvm::AttrBuilder FuncAttrs(F.getContext());
2142 if (!TargetOpts.
CPU.empty())
2143 FuncAttrs.addAttribute(
"target-cpu", TargetOpts.
CPU);
2144 if (!TargetOpts.
TuneCPU.empty())
2145 FuncAttrs.addAttribute(
"tune-cpu", TargetOpts.
TuneCPU);
2148 CodeGenOpts, LangOpts,
2151 if (!WillInternalize && F.isInterposable()) {
2156 F.addFnAttrs(FuncAttrs);
2160 llvm::AttributeMask AttrsToRemove;
2162 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2163 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2164 llvm::DenormalMode Merged =
2168 if (DenormModeToMergeF32.isValid()) {
2173 if (Merged == llvm::DenormalMode::getDefault()) {
2174 AttrsToRemove.addAttribute(
"denormal-fp-math");
2175 }
else if (Merged != DenormModeToMerge) {
2177 FuncAttrs.addAttribute(
"denormal-fp-math",
2181 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2182 AttrsToRemove.addAttribute(
"denormal-fp-math-f32");
2183 }
else if (MergedF32 != DenormModeToMergeF32) {
2185 FuncAttrs.addAttribute(
"denormal-fp-math-f32",
2189 F.removeFnAttrs(AttrsToRemove);
2194 F.addFnAttrs(FuncAttrs);
2197 void CodeGenModule::getTrivialDefaultFunctionAttributes(
2198 StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
2199 llvm::AttrBuilder &FuncAttrs) {
2200 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone,
getCodeGenOpts(),
2205 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2207 bool AttrOnCallSite,
2208 llvm::AttrBuilder &FuncAttrs) {
2209 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2213 if (!AttrOnCallSite)
2218 llvm::AttrBuilder &attrs) {
2219 getDefaultFunctionAttributes(
"",
false,
2221 GetCPUAndFeaturesAttributes(
GlobalDecl(), attrs);
2226 const NoBuiltinAttr *NBA =
nullptr) {
2227 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2229 AttributeName +=
"no-builtin-";
2230 AttributeName += BuiltinName;
2231 FuncAttrs.addAttribute(AttributeName);
2235 if (LangOpts.NoBuiltin) {
2237 FuncAttrs.addAttribute(
"no-builtins");
2251 if (llvm::is_contained(NBA->builtinNames(),
"*")) {
2252 FuncAttrs.addAttribute(
"no-builtins");
2257 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2261 const llvm::DataLayout &DL,
const ABIArgInfo &AI,
2262 bool CheckCoerce =
true) {
2263 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2269 if (!DL.typeSizeEqualsStoreSize(Ty))
2276 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2277 DL.getTypeSizeInBits(Ty)))
2301 if (
const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2312 unsigned NumRequiredArgs,
unsigned ArgNo) {
2313 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2318 if (ArgNo >= NumRequiredArgs)
2322 if (ArgNo < FD->getNumParams()) {
2323 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2324 if (Param && Param->
hasAttr<MaybeUndefAttr>())
2341 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2344 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2346 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2347 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2356 llvm::FPClassTest Mask = llvm::fcNone;
2357 if (LangOpts.NoHonorInfs)
2358 Mask |= llvm::fcInf;
2359 if (LangOpts.NoHonorNaNs)
2360 Mask |= llvm::fcNan;
2366 llvm::AttributeList &Attrs) {
2367 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2368 Attrs = Attrs.removeFnAttribute(
getLLVMContext(), llvm::Attribute::Memory);
2369 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2395 llvm::AttributeList &AttrList,
2397 bool AttrOnCallSite,
bool IsThunk) {
2405 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2407 FuncAttrs.addAttribute(
"cmse_nonsecure_call");
2419 bool HasOptnone =
false;
2421 const NoBuiltinAttr *NBA =
nullptr;
2425 auto AddPotentialArgAccess = [&]() {
2426 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2428 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2429 llvm::MemoryEffects::argMemOnly());
2436 if (TargetDecl->
hasAttr<ReturnsTwiceAttr>())
2437 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2438 if (TargetDecl->
hasAttr<NoThrowAttr>())
2439 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2440 if (TargetDecl->
hasAttr<NoReturnAttr>())
2441 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2442 if (TargetDecl->
hasAttr<ColdAttr>())
2443 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2444 if (TargetDecl->
hasAttr<HotAttr>())
2445 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2446 if (TargetDecl->
hasAttr<NoDuplicateAttr>())
2447 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2448 if (TargetDecl->
hasAttr<ConvergentAttr>())
2449 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2451 if (
const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2454 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2456 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2458 (
Kind == OO_New ||
Kind == OO_Array_New))
2459 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2462 const bool IsVirtualCall = MD && MD->
isVirtual();
2465 if (!(AttrOnCallSite && IsVirtualCall)) {
2466 if (Fn->isNoReturn())
2467 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2468 NBA = Fn->getAttr<NoBuiltinAttr>();
2472 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2475 if (AttrOnCallSite && TargetDecl->
hasAttr<NoMergeAttr>())
2476 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2480 if (TargetDecl->
hasAttr<ConstAttr>()) {
2481 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2482 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2485 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2486 }
else if (TargetDecl->
hasAttr<PureAttr>()) {
2487 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2488 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2490 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2491 }
else if (TargetDecl->
hasAttr<NoAliasAttr>()) {
2492 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2493 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2495 if (TargetDecl->
hasAttr<RestrictAttr>())
2496 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2497 if (TargetDecl->
hasAttr<ReturnsNonNullAttr>() &&
2498 !CodeGenOpts.NullPointerIsValid)
2499 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2500 if (TargetDecl->
hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2501 FuncAttrs.addAttribute(
"no_caller_saved_registers");
2502 if (TargetDecl->
hasAttr<AnyX86NoCfCheckAttr>())
2503 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2504 if (TargetDecl->
hasAttr<LeafAttr>())
2505 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2507 HasOptnone = TargetDecl->
hasAttr<OptimizeNoneAttr>();
2508 if (
auto *AllocSize = TargetDecl->
getAttr<AllocSizeAttr>()) {
2509 std::optional<unsigned> NumElemsParam;
2510 if (AllocSize->getNumElemsParam().isValid())
2511 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2512 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2516 if (TargetDecl->
hasAttr<OpenCLKernelAttr>()) {
2519 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2526 FuncAttrs.addAttribute(
2527 "uniform-work-group-size",
2528 llvm::toStringRef(
getLangOpts().OffloadUniformBlock));
2532 if (TargetDecl->
hasAttr<CUDAGlobalAttr>() &&
2534 FuncAttrs.addAttribute(
"uniform-work-group-size",
"true");
2536 if (TargetDecl->
hasAttr<ArmLocallyStreamingAttr>())
2537 FuncAttrs.addAttribute(
"aarch64_pstate_sm_body");
2549 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2554 if (TargetDecl->
hasAttr<NoSpeculativeLoadHardeningAttr>())
2555 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2556 if (TargetDecl->
hasAttr<SpeculativeLoadHardeningAttr>())
2557 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2558 if (TargetDecl->
hasAttr<NoSplitStackAttr>())
2559 FuncAttrs.removeAttribute(
"split-stack");
2560 if (TargetDecl->
hasAttr<ZeroCallUsedRegsAttr>()) {
2563 TargetDecl->
getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2564 FuncAttrs.removeAttribute(
"zero-call-used-regs");
2565 FuncAttrs.addAttribute(
2566 "zero-call-used-regs",
2567 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(
Kind));
2574 if (CodeGenOpts.NoPLT) {
2575 if (
auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2576 if (!Fn->isDefined() && !AttrOnCallSite) {
2577 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2585 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2586 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2587 if (!FD->isExternallyVisible())
2588 FuncAttrs.addAttribute(
"sample-profile-suffix-elision-policy",
2595 if (!AttrOnCallSite) {
2596 if (TargetDecl && TargetDecl->
hasAttr<CmseNSEntryAttr>())
2597 FuncAttrs.addAttribute(
"cmse_nonsecure_entry");
2600 auto shouldDisableTailCalls = [&] {
2602 if (CodeGenOpts.DisableTailCalls)
2608 if (TargetDecl->
hasAttr<DisableTailCallsAttr>() ||
2609 TargetDecl->
hasAttr<AnyX86InterruptAttr>())
2612 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2613 if (
const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2614 if (!BD->doesNotEscape())
2620 if (shouldDisableTailCalls())
2621 FuncAttrs.addAttribute(
"disable-tail-calls",
"true");
2625 GetCPUAndFeaturesAttributes(CalleeInfo.
getCalleeDecl(), FuncAttrs);
2629 ClangToLLVMArgMapping IRFunctionArgs(
getContext(), FI);
2636 if (CodeGenOpts.EnableNoundefAttrs &&
2640 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2646 RetAttrs.addAttribute(llvm::Attribute::SExt);
2648 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2652 RetAttrs.addAttribute(llvm::Attribute::InReg);
2664 AddPotentialArgAccess();
2673 llvm_unreachable(
"Invalid ABI kind for return argument");
2681 RetAttrs.addDereferenceableAttr(
2683 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2684 !CodeGenOpts.NullPointerIsValid)
2685 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2687 llvm::Align Alignment =
2689 RetAttrs.addAlignmentAttr(Alignment);
2694 bool hasUsedSRet =
false;
2698 if (IRFunctionArgs.hasSRetArg()) {
2700 SRETAttrs.addStructRetAttr(
getTypes().ConvertTypeForMem(RetTy));
2701 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2702 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2705 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2707 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2712 if (IRFunctionArgs.hasInallocaArg()) {
2715 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2724 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2726 assert(IRArgs.second == 1 &&
"Expected only a single `this` pointer.");
2733 if (!CodeGenOpts.NullPointerIsValid &&
2735 Attrs.addAttribute(llvm::Attribute::NonNull);
2742 Attrs.addDereferenceableOrNullAttr(
2748 llvm::Align Alignment =
2752 Attrs.addAlignmentAttr(Alignment);
2754 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(
getLLVMContext(), Attrs);
2760 I != E; ++I, ++ArgNo) {
2766 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2768 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2769 llvm::AttributeSet::get(
2771 llvm::AttrBuilder(
getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2776 if (CodeGenOpts.EnableNoundefAttrs &&
2778 Attrs.addAttribute(llvm::Attribute::NoUndef);
2787 Attrs.addAttribute(llvm::Attribute::SExt);
2789 Attrs.addAttribute(llvm::Attribute::ZExt);
2793 Attrs.addAttribute(llvm::Attribute::Nest);
2795 Attrs.addAttribute(llvm::Attribute::InReg);
2796 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.
getDirectAlign()));
2803 Attrs.addAttribute(llvm::Attribute::InReg);
2806 Attrs.addByValAttr(
getTypes().ConvertTypeForMem(ParamType));
2809 if (CodeGenOpts.PassByValueIsNoAlias &&
Decl &&
2810 Decl->getArgPassingRestrictions() ==
2814 Attrs.addAttribute(llvm::Attribute::NoAlias);
2839 AddPotentialArgAccess();
2844 Attrs.addByRefAttr(
getTypes().ConvertTypeForMem(ParamType));
2855 AddPotentialArgAccess();
2862 Attrs.addDereferenceableAttr(
2864 if (
getTypes().getTargetAddressSpace(PTy) == 0 &&
2865 !CodeGenOpts.NullPointerIsValid)
2866 Attrs.addAttribute(llvm::Attribute::NonNull);
2868 llvm::Align Alignment =
2870 Attrs.addAlignmentAttr(Alignment);
2878 if (TargetDecl && TargetDecl->
hasAttr<OpenCLKernelAttr>() &&
2882 llvm::Align Alignment =
2884 Attrs.addAlignmentAttr(Alignment);
2896 Attrs.addStructRetAttr(
getTypes().ConvertTypeForMem(ParamType));
2901 Attrs.addAttribute(llvm::Attribute::NoAlias);
2905 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2907 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2908 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2914 Attrs.addAttribute(llvm::Attribute::SwiftError);
2918 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2922 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2927 Attrs.addAttribute(llvm::Attribute::NoCapture);
2929 if (Attrs.hasAttributes()) {
2930 unsigned FirstIRArg, NumIRArgs;
2931 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2932 for (
unsigned i = 0; i < NumIRArgs; i++)
2933 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2939 AttrList = llvm::AttributeList::get(
2948 llvm::Value *value) {
2953 if (value->getType() == varType)
return value;
2955 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2956 &&
"unexpected promotion type");
2958 if (isa<llvm::IntegerType>(varType))
2959 return CGF.
Builder.CreateTrunc(value, varType,
"arg.unpromote");
2961 return CGF.
Builder.CreateFPCast(value, varType,
"arg.unpromote");
2967 QualType ArgType,
unsigned ArgNo) {
2979 if (
auto ParmNNAttr = PVD->
getAttr<NonNullAttr>())
2986 if (NNAttr->isNonNull(ArgNo))
3016 if (FD->hasImplicitReturnZero()) {
3017 QualType RetTy = FD->getReturnType().getUnqualifiedType();
3019 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
3028 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
3033 if (IRFunctionArgs.hasInallocaArg())
3034 ArgStruct =
Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
3038 if (IRFunctionArgs.hasSRetArg()) {
3039 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
3040 AI->setName(
"agg.result");
3041 AI->addAttr(llvm::Attribute::NoAlias);
3048 ArgVals.reserve(Args.size());
3054 assert(FI.
arg_size() == Args.size() &&
3055 "Mismatch between function signature & arguments.");
3058 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
3059 i != e; ++i, ++info_it, ++ArgNo) {
3064 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
3072 unsigned FirstIRArg, NumIRArgs;
3073 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3075 if (Arg->
hasAttr<SYCLAccessorReadonlyAttr>())
3076 Fn->getArg(FirstIRArg)->addAttr(llvm::Attribute::ReadOnly);
3078 if (
const auto *AddIRAttr =
3079 Arg->
getAttr<SYCLAddIRAttributesKernelParameterAttr>()) {
3081 AddIRAttr->getFilteredAttributeNameValuePairs(
CGM.
getContext());
3083 llvm::AttrBuilder KernelParamAttrBuilder(Fn->getContext());
3084 for (
const auto &NameValuePair : NameValuePairs)
3085 KernelParamAttrBuilder.addAttribute(NameValuePair.first,
3086 NameValuePair.second);
3087 Fn->addParamAttrs(ArgNo, KernelParamAttrBuilder);
3092 assert(NumIRArgs == 0);
3105 assert(NumIRArgs == 1);
3128 llvm::ConstantInt::get(
IntPtrTy, Size.getQuantity()));
3129 ParamAddr = AlignedTemp;
3146 auto AI = Fn->getArg(FirstIRArg);
3154 assert(NumIRArgs == 1);
3156 if (
const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3159 PVD->getFunctionScopeIndex()) &&
3161 AI->addAttr(llvm::Attribute::NonNull);
3163 QualType OTy = PVD->getOriginalType();
3164 if (
const auto *ArrTy =
3171 QualType ETy = ArrTy->getElementType();
3172 llvm::Align Alignment =
3174 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3175 uint64_t ArrSize = ArrTy->getZExtSize();
3179 Attrs.addDereferenceableAttr(
3180 getContext().getTypeSizeInChars(ETy).getQuantity() *
3182 AI->addAttrs(Attrs);
3183 }
else if (
getContext().getTargetInfo().getNullPointerValue(
3186 AI->addAttr(llvm::Attribute::NonNull);
3189 }
else if (
const auto *ArrTy =
3195 QualType ETy = ArrTy->getElementType();
3196 llvm::Align Alignment =
3198 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(Alignment));
3199 if (!
getTypes().getTargetAddressSpace(ETy) &&
3201 AI->addAttr(llvm::Attribute::NonNull);
3206 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3209 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3210 if (AVAttr && !
SanOpts.
has(SanitizerKind::Alignment)) {
3214 llvm::ConstantInt *AlignmentCI =
3217 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3218 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3219 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3220 AI->addAttrs(llvm::AttrBuilder(
getLLVMContext()).addAlignmentAttr(
3221 llvm::Align(AlignmentInt)));
3232 AI->addAttr(llvm::Attribute::NoAlias);
3240 assert(NumIRArgs == 1);
3244 llvm::Value *
V = AI;
3252 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
3275 if (
V->getType() != LTy)
3286 if (
auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(
ConvertType(Ty))) {
3287 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
3288 if (
auto *VecTyFrom =
3289 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3292 if (VecTyFrom->getElementType()->isIntegerTy(1) &&
3293 VecTyFrom->getElementCount().isKnownMultipleOf(8) &&
3294 VecTyTo->getElementType() ==
Builder.getInt8Ty()) {
3295 VecTyFrom = llvm::ScalableVectorType::get(
3296 VecTyTo->getElementType(),
3297 VecTyFrom->getElementCount().getKnownMinValue() / 8);
3298 Coerced =
Builder.CreateBitCast(Coerced, VecTyFrom);
3300 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3301 llvm::Value *Zero = llvm::Constant::getNullValue(
CGM.
Int64Ty);
3303 assert(NumIRArgs == 1);
3304 Coerced->setName(Arg->
getName() +
".coerce");
3306 VecTyTo, Coerced, Zero,
"cast.fixed")));
3312 llvm::StructType *STy =
3315 STy->getNumElements() > 1) {
3316 [[maybe_unused]] llvm::TypeSize StructSize =
3318 [[maybe_unused]] llvm::TypeSize PtrElementSize =
3320 if (STy->containsHomogeneousScalableVectorTypes()) {
3321 assert(StructSize == PtrElementSize &&
3322 "Only allow non-fractional movement of structure with"
3323 "homogeneous scalable vector type");
3339 STy->getNumElements() > 1) {
3341 llvm::TypeSize PtrElementSize =
3343 if (StructSize.isScalable()) {
3344 assert(STy->containsHomogeneousScalableVectorTypes() &&
3345 "ABI only supports structure with homogeneous scalable vector "
3347 assert(StructSize == PtrElementSize &&
3348 "Only allow non-fractional movement of structure with"
3349 "homogeneous scalable vector type");
3350 assert(STy->getNumElements() == NumIRArgs);
3352 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3353 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3354 auto *AI = Fn->getArg(FirstIRArg + i);
3355 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3357 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3362 uint64_t SrcSize = StructSize.getFixedValue();
3363 uint64_t DstSize = PtrElementSize.getFixedValue();
3366 if (SrcSize <= DstSize) {
3373 assert(STy->getNumElements() == NumIRArgs);
3374 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3375 auto AI = Fn->getArg(FirstIRArg + i);
3376 AI->setName(Arg->
getName() +
".coerce" + Twine(i));
3381 if (SrcSize > DstSize) {
3387 assert(NumIRArgs == 1);
3388 auto AI = Fn->getArg(FirstIRArg);
3389 AI->setName(Arg->
getName() +
".coerce");
3413 unsigned argIndex = FirstIRArg;
3414 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3415 llvm::Type *eltType = coercionType->getElementType(i);
3420 auto elt = Fn->getArg(argIndex++);
3423 assert(argIndex == FirstIRArg + NumIRArgs);
3435 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3436 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3437 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3438 for (
unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3439 auto AI = Fn->getArg(FirstIRArg + i);
3440 AI->setName(Arg->
getName() +
"." + Twine(i));
3446 assert(NumIRArgs == 0);
3458 if (
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3459 for (
int I = Args.size() - 1; I >= 0; --I)
3462 for (
unsigned I = 0, E = Args.size(); I != E; ++I)
3468 while (insn->use_empty()) {
3469 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3470 if (!bitcast)
return;
3473 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3474 bitcast->eraseFromParent();
3480 llvm::Value *result) {
3482 llvm::BasicBlock *BB = CGF.
Builder.GetInsertBlock();
3483 if (BB->empty())
return nullptr;
3484 if (&BB->back() != result)
return nullptr;
3486 llvm::Type *resultType = result->getType();
3489 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3495 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3498 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3501 if (generator->getNextNode() != bitcast)
3504 InstsToKill.push_back(bitcast);
3511 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3512 if (!call)
return nullptr;
3514 bool doRetainAutorelease;
3517 doRetainAutorelease =
true;
3518 }
else if (call->getCalledOperand() ==
3520 doRetainAutorelease =
false;
3528 llvm::Instruction *prev = call->getPrevNode();
3530 if (isa<llvm::BitCastInst>(prev)) {
3531 prev = prev->getPrevNode();
3534 assert(isa<llvm::CallInst>(prev));
3535 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3537 InstsToKill.push_back(prev);
3543 result = call->getArgOperand(0);
3544 InstsToKill.push_back(call);
3548 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3549 if (!bitcast->hasOneUse())
break;
3550 InstsToKill.push_back(bitcast);
3551 result = bitcast->getOperand(0);
3555 for (
auto *I : InstsToKill)
3556 I->eraseFromParent();
3559 if (doRetainAutorelease)
3563 return CGF.
Builder.CreateBitCast(result, resultType);
3568 llvm::Value *result) {
3571 dyn_cast_or_null<ObjCMethodDecl>(CGF.
CurCodeDecl);
3572 if (!method)
return nullptr;
3574 if (!self->getType().isConstQualified())
return nullptr;
3578 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3579 if (!retainCall || retainCall->getCalledOperand() !=
3584 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3585 llvm::LoadInst *load =
3586 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3587 if (!load || load->isAtomic() || load->isVolatile() ||
3594 llvm::Type *resultType = result->getType();
3596 assert(retainCall->use_empty());
3597 retainCall->eraseFromParent();
3600 return CGF.
Builder.CreateBitCast(load, resultType);
3607 llvm::Value *result) {
3630 auto GetStoreIfValid = [&CGF,
3631 ReturnValuePtr](llvm::User *
U) -> llvm::StoreInst * {
3632 auto *SI = dyn_cast<llvm::StoreInst>(
U);
3633 if (!SI || SI->getPointerOperand() != ReturnValuePtr ||
3639 assert(!SI->isAtomic() &&
3647 if (!ReturnValuePtr->hasOneUse()) {
3648 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3649 if (IP->empty())
return nullptr;
3653 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3654 if (isa<llvm::BitCastInst>(&I))
3656 if (
auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3657 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3660 return GetStoreIfValid(&I);
3665 llvm::StoreInst *store = GetStoreIfValid(ReturnValuePtr->user_back());
3666 if (!store)
return nullptr;
3670 llvm::BasicBlock *StoreBB = store->getParent();
3671 llvm::BasicBlock *IP = CGF.
Builder.GetInsertBlock();
3673 while (IP != StoreBB) {
3674 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3690 int BitWidth,
int CharWidth) {
3691 assert(CharWidth <= 64);
3692 assert(
static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3695 if (BitOffset >= CharWidth) {
3696 Pos += BitOffset / CharWidth;
3697 BitOffset = BitOffset % CharWidth;
3701 if (BitOffset + BitWidth >= CharWidth) {
3702 Bits[Pos++] |= (
Used << BitOffset) &
Used;
3703 BitWidth -= CharWidth - BitOffset;
3707 while (BitWidth >= CharWidth) {
3709 BitWidth -= CharWidth;
3713 Bits[Pos++] |= (
Used >> (CharWidth - BitWidth)) << BitOffset;
3721 int StorageSize,
int BitOffset,
int BitWidth,
3722 int CharWidth,
bool BigEndian) {
3725 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3728 std::reverse(TmpBits.begin(), TmpBits.end());
3731 Bits[StorageOffset++] |=
V;
3762 BFI.
Size, CharWidth,
3784 auto Src = TmpBits.begin();
3785 auto Dst = Bits.begin() +
Offset + I * Size;
3786 for (
int J = 0; J < Size; ++J)
3806 std::fill_n(Bits.begin() +
Offset, Size,
3811 int Pos,
int Size,
int CharWidth,
3816 for (
auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size;
P != E;
3818 Mask = (Mask << CharWidth) | *
P;
3820 auto P = Bits.begin() + Pos + Size,
End = Bits.begin() + Pos;
3822 Mask = (Mask << CharWidth) | *--
P;
3831 llvm::IntegerType *ITy,
3833 assert(Src->getType() == ITy);
3834 assert(ITy->getScalarSizeInBits() <= 64);
3837 int Size = DataLayout.getTypeStoreSize(ITy);
3845 return Builder.CreateAnd(Src, Mask,
"cmse.clear");
3851 llvm::ArrayType *ATy,
3854 int Size = DataLayout.getTypeStoreSize(ATy);
3861 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3863 llvm::Value *R = llvm::PoisonValue::get(ATy);
3864 for (
int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3866 DataLayout.isBigEndian());
3867 MaskIndex += CharsPerElt;
3868 llvm::Value *T0 =
Builder.CreateExtractValue(Src, I);
3869 llvm::Value *T1 =
Builder.CreateAnd(T0, Mask,
"cmse.clear");
3870 R =
Builder.CreateInsertValue(R, T1, I);
3897 llvm::DebugLoc RetDbgLoc;
3898 llvm::Value *RV =
nullptr;
3908 llvm::Function::arg_iterator EI =
CurFn->arg_end();
3910 llvm::Value *ArgStruct = &*EI;
3914 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3920 auto AI =
CurFn->arg_begin();
3959 if (llvm::StoreInst *SI =
3965 RetDbgLoc = SI->getDebugLoc();
3967 RV = SI->getValueOperand();
3968 SI->eraseFromParent();
3991 if (
auto *FD = dyn_cast<FunctionDecl>(
CurCodeDecl))
3992 RT = FD->getReturnType();
3993 else if (
auto *MD = dyn_cast<ObjCMethodDecl>(
CurCodeDecl))
3994 RT = MD->getReturnType();
3998 llvm_unreachable(
"Unexpected function/method type");
4018 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4025 results.push_back(elt);
4029 if (results.size() == 1) {
4037 RV = llvm::PoisonValue::get(returnType);
4038 for (
unsigned i = 0, e = results.size(); i != e; ++i) {
4039 RV =
Builder.CreateInsertValue(RV, results[i], i);
4046 llvm_unreachable(
"Invalid ABI kind for return argument");
4049 llvm::Instruction *
Ret;
4055 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
4066 Ret->setDebugLoc(std::move(RetDbgLoc));
4079 ReturnsNonNullAttr *RetNNAttr =
nullptr;
4080 if (
SanOpts.
has(SanitizerKind::ReturnsNonnullAttribute))
4083 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
4091 assert(!requiresReturnValueNullabilityCheck() &&
4092 "Cannot check nullability and the nonnull attribute");
4093 AttrLoc = RetNNAttr->getLocation();
4094 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
4095 Handler = SanitizerHandler::NonnullReturn;
4097 if (
auto *DD = dyn_cast<DeclaratorDecl>(
CurCodeDecl))
4098 if (
auto *TSI = DD->getTypeSourceInfo())
4100 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
4101 CheckKind = SanitizerKind::NullabilityReturn;
4102 Handler = SanitizerHandler::NullabilityReturn;
4112 llvm::Value *CanNullCheck =
Builder.CreateIsNotNull(SLocPtr);
4113 if (requiresReturnValueNullabilityCheck())
4115 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4116 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4120 llvm::Value *Cond =
Builder.CreateIsNotNull(RV);
4122 llvm::Value *DynamicData[] = {SLocPtr};
4123 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4143 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.
getLLVMContext());
4144 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4171 if (
type->isReferenceType()) {
4180 param->
hasAttr<NSConsumedAttr>() &&
4181 type->isObjCRetainableType()) {
4184 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4199 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4201 "cleanup for callee-destructed param not recorded");
4203 llvm::Instruction *isActive =
Builder.CreateUnreachable();
4209 return llvm::isa_and_nonnull<llvm::ConstantPointerNull>(addr);
4222 "shouldn't have writeback for provably null argument");
4224 llvm::BasicBlock *contBB =
nullptr;
4230 if (!provablyNonNull) {
4235 CGF.
Builder.CreateCondBr(isNull, contBB, writebackBB);
4244 "icr.writeback-cast");
4253 if (writeback.
ToUse) {
4278 if (!provablyNonNull)
4293 for (
const auto &I : llvm::reverse(Cleanups)) {
4295 I.IsActiveIP->eraseFromParent();
4301 if (uop->getOpcode() == UO_AddrOf)
4302 return uop->getSubExpr();
4332 llvm::PointerType *destType =
4334 llvm::Type *destElemType =
4357 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4361 llvm::BasicBlock *contBB =
nullptr;
4362 llvm::BasicBlock *originBB =
nullptr;
4365 llvm::Value *finalArgument;
4369 if (provablyNonNull) {
4374 finalArgument = CGF.
Builder.CreateSelect(
4375 isNull, llvm::ConstantPointerNull::get(destType),
4381 originBB = CGF.
Builder.GetInsertBlock();
4384 CGF.
Builder.CreateCondBr(isNull, contBB, copyBB);
4386 condEval.
begin(CGF);
4390 llvm::Value *valueToUse =
nullptr;
4398 src = CGF.
Builder.CreateBitCast(src, destElemType,
"icr.cast");
4415 if (shouldCopy && !provablyNonNull) {
4416 llvm::BasicBlock *copyBB = CGF.
Builder.GetInsertBlock();
4421 llvm::PHINode *phiToUse = CGF.
Builder.CreatePHI(valueToUse->getType(), 2,
4423 phiToUse->addIncoming(valueToUse, copyBB);
4424 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4426 valueToUse = phiToUse;
4440 StackBase = CGF.
Builder.CreateStackSave(
"inalloca.save");
4446 CGF.
Builder.CreateStackRestore(StackBase);
4454 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4459 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) :
nullptr;
4460 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4463 const NonNullAttr *NNAttr =
nullptr;
4464 if (
SanOpts.
has(SanitizerKind::NonnullAttribute))
4467 bool CanCheckNullability =
false;
4468 if (
SanOpts.
has(SanitizerKind::NullabilityArg) && !NNAttr && PVD &&
4469 !PVD->getType()->isRecordType()) {
4470 auto Nullability = PVD->getType()->getNullability();
4473 PVD->getTypeSourceInfo();
4476 if (!NNAttr && !CanCheckNullability)
4483 AttrLoc = NNAttr->getLocation();
4484 CheckKind = SanitizerKind::NonnullAttribute;
4485 Handler = SanitizerHandler::NonnullArg;
4487 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4488 CheckKind = SanitizerKind::NullabilityArg;
4489 Handler = SanitizerHandler::NullabilityArg;
4494 llvm::Constant *StaticData[] = {
4496 llvm::ConstantInt::get(
Int32Ty, ArgNo + 1),
4498 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4504 if (!AC.getDecl() || !(
SanOpts.
has(SanitizerKind::NonnullAttribute) ||
4524 return llvm::any_of(ArgTypes, [&](
QualType Ty) {
4535 return classDecl->getTypeParamListAsWritten();
4539 return catDecl->getTypeParamList();
4549 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4553 assert((ParamsToSkip == 0 ||
Prototype.P) &&
4554 "Can't skip parameters if type info is not provided");
4564 bool IsVariadic =
false;
4571 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4572 MD->param_type_end());
4576 ExplicitCC = FPT->getExtInfo().getCC();
4577 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4578 FPT->param_type_end());
4586 assert(Arg != ArgRange.end() &&
"Running over edge of argument list!");
4593 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4594 "type mismatch in call argument!");
4600 assert((Arg == ArgRange.end() || IsVariadic) &&
4601 "Extra arguments in non-variadic function!");
4606 for (
auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4607 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4608 assert((
int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4620 auto MaybeEmitImplicitObjectSize = [&](
unsigned I,
const Expr *Arg,
4622 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4624 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4631 assert(EmittedArg.getScalarVal() &&
"We emitted nothing for the arg?");
4632 llvm::Value *
V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(),
T,
4633 EmittedArg.getScalarVal(),
4639 std::swap(Args.back(), *(&Args.back() - 1));
4644 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4645 "inalloca only supported on x86");
4650 size_t CallArgsStart = Args.size();
4651 for (
unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4652 unsigned Idx = LeftToRight ? I : E - I - 1;
4654 unsigned InitialArgSize = Args.size();
4657 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4658 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4660 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4662 "Argument and parameter types don't match");
4666 assert(InitialArgSize + 1 == Args.size() &&
4667 "The code below depends on only adding one arg per EmitCallArg");
4668 (void)InitialArgSize;
4671 if (!Args.back().hasLValue()) {
4672 RValue RVArg = Args.back().getKnownRValue();
4674 ParamsToSkip + Idx);
4678 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4685 std::reverse(Args.begin() + CallArgsStart, Args.end());
4693 : Addr(Addr), Ty(Ty) {}
4711 struct DisableDebugLocationUpdates {
4713 bool disabledDebugInfo;
4715 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.
getDebugInfo()))
4718 ~DisableDebugLocationUpdates() {
4719 if (disabledDebugInfo)
4755 DisableDebugLocationUpdates Dis(*
this, E);
4757 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4763 "reference binding to unmaterialized r-value!");
4775 if (
type->isRecordType() &&
4782 bool DestroyedInCallee =
true, NeedsCleanup =
true;
4783 if (
const auto *RD =
type->getAsCXXRecordDecl())
4784 DestroyedInCallee = RD->hasNonTrivialDestructor();
4786 NeedsCleanup =
type.isDestructedType();
4788 if (DestroyedInCallee)
4795 if (DestroyedInCallee && NeedsCleanup) {
4802 llvm::Instruction *IsActive =
4809 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4810 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue &&
4811 !
type->isArrayParameterType()) {
4821 QualType CodeGenFunction::getVarArgType(
const Expr *Arg) {
4825 if (!
getTarget().getTriple().isOSWindows())
4842 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4845 Inst->setMetadata(
"clang.arc.no_objc_arc_exceptions",
4852 const llvm::Twine &
name) {
4860 const llvm::Twine &
name) {
4862 for (
auto arg : args)
4863 values.push_back(
arg.emitRawPointer(*
this));
4870 const llvm::Twine &
name) {
4872 call->setDoesNotThrow();
4879 const llvm::Twine &
name) {
4894 if (
auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
4895 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4896 auto IID = CalleeFn->getIntrinsicID();
4897 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4910 const llvm::Twine &
name) {
4911 llvm::CallInst *call =
Builder.CreateCall(
4927 llvm::InvokeInst *invoke =
4933 invoke->setDoesNotReturn();
4936 llvm::CallInst *call =
Builder.CreateCall(callee, args, BundleList);
4937 call->setDoesNotReturn();
4946 const Twine &
name) {
4954 const Twine &
name) {
4964 const Twine &Name) {
4969 llvm::CallBase *Inst;
4971 Inst =
Builder.CreateCall(Callee, Args, BundleList, Name);
4974 Inst =
Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4982 AddObjCARCExceptionMetadata(Inst);
4987 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4989 DeferredReplacements.push_back(
4990 std::make_pair(llvm::WeakTrackingVH(Old), New));
4997 [[nodiscard]] llvm::AttributeList
4998 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4999 const llvm::AttributeList &Attrs,
5000 llvm::Align NewAlign) {
5001 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
5002 if (CurAlign >= NewAlign)
5004 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
5005 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
5006 .addRetAttribute(Ctx, AlignAttr);
5009 template <
typename AlignedAttrTy>
class AbstractAssumeAlignedAttrEmitter {
5014 const AlignedAttrTy *AA =
nullptr;
5016 llvm::Value *Alignment =
nullptr;
5017 llvm::ConstantInt *OffsetCI =
nullptr;
5023 AA = FuncDecl->
getAttr<AlignedAttrTy>();
5028 [[nodiscard]] llvm::AttributeList
5029 TryEmitAsCallSiteAttribute(
const llvm::AttributeList &Attrs) {
5030 if (!AA || OffsetCI || CGF.
SanOpts.
has(SanitizerKind::Alignment))
5032 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
5037 if (!AlignmentCI->getValue().isPowerOf2())
5039 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
5042 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
5054 AA->getLocation(), Alignment, OffsetCI);
5060 class AssumeAlignedAttrEmitter final
5061 :
public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
5064 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5068 Alignment = cast<llvm::ConstantInt>(CGF.
EmitScalarExpr(AA->getAlignment()));
5071 if (OffsetCI->isNullValue())
5078 class AllocAlignAttrEmitter final
5079 :
public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
5083 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
5087 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
5096 if (
auto *VT = dyn_cast<llvm::VectorType>(Ty))
5097 return VT->getPrimitiveSizeInBits().getKnownMinValue();
5098 if (
auto *AT = dyn_cast<llvm::ArrayType>(Ty))
5101 unsigned MaxVectorWidth = 0;
5102 if (
auto *ST = dyn_cast<llvm::StructType>(Ty))
5103 for (
auto *I : ST->elements())
5105 return MaxVectorWidth;
5112 llvm::CallBase **callOrInvoke,
bool IsMustTail,
5116 assert(Callee.isOrdinary() || Callee.isVirtual());
5125 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
5126 if (
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
5133 if (TargetDecl->
hasAttr<AlwaysInlineAttr>() &&
5134 (TargetDecl->
hasAttr<TargetAttr>() ||
5143 dyn_cast_or_null<FunctionDecl>(TargetDecl), CallArgs, RetTy);
5150 if (llvm::StructType *ArgStruct = CallInfo.
getArgStruct()) {
5153 llvm::AllocaInst *AI;
5155 IP = IP->getNextNode();
5156 AI =
new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
5162 AI->setAlignment(Align.getAsAlign());
5163 AI->setUsedWithInAlloca(
true);
5164 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5165 ArgMemory =
RawAddress(AI, ArgStruct, Align);
5168 ClangToLLVMArgMapping IRFunctionArgs(
CGM.
getContext(), CallInfo);
5175 llvm::Value *UnusedReturnSizePtr =
nullptr;
5182 llvm::TypeSize size =
5187 if (IRFunctionArgs.hasSRetArg()) {
5188 IRCallArgs[IRFunctionArgs.getSRetArgNo()] =
5206 assert(CallInfo.
arg_size() == CallArgs.size() &&
5207 "Mismatch between function signature & arguments.");
5210 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5211 I != E; ++I, ++info_it, ++ArgNo) {
5215 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5216 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5219 unsigned FirstIRArg, NumIRArgs;
5220 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5222 bool ArgHasMaybeUndefAttr =
5227 assert(NumIRArgs == 0);
5228 assert(
getTarget().getTriple().getArch() == llvm::Triple::x86);
5229 if (I->isAggregate()) {
5231 ? I->getKnownLValue().getAddress()
5232 : I->getKnownRValue().getAggregateAddress();
5233 llvm::Instruction *Placeholder =
5238 CGBuilderTy::InsertPoint IP =
Builder.saveIP();
5239 Builder.SetInsertPoint(Placeholder);
5252 deferPlaceholderReplacement(Placeholder, Addr.
getPointer());
5257 I->Ty,
getContext().getTypeAlignInChars(I->Ty),
5258 "indirect-arg-temp");
5259 I->copyInto(*
this, Addr);
5268 I->copyInto(*
this, Addr);
5275 assert(NumIRArgs == 1);
5276 if (I->isAggregate()) {
5286 ? I->getKnownLValue().getAddress()
5287 : I->getKnownRValue().getAggregateAddress();
5291 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5292 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5293 TD->getAllocaAddrSpace()) &&
5294 "indirect argument must be in alloca address space");
5296 bool NeedCopy =
false;
5302 }
else if (I->hasLValue()) {
5303 auto LV = I->getKnownLValue();
5309 if (!isByValOrRef ||
5314 if ((isByValOrRef &&
5322 else if ((isByValOrRef &&
5323 Addr.
getType()->getAddressSpace() != IRFuncTy->
5332 auto *
T = llvm::PointerType::get(
5337 if (ArgHasMaybeUndefAttr)
5338 Val =
Builder.CreateFreeze(Val);
5339 IRCallArgs[FirstIRArg] = Val;
5349 if (ArgHasMaybeUndefAttr)
5350 Val =
Builder.CreateFreeze(Val);
5351 IRCallArgs[FirstIRArg] = Val;
5354 llvm::TypeSize ByvalTempElementSize =
5356 llvm::Value *LifetimeSize =
5361 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5364 I->copyInto(*
this, AI);
5369 assert(NumIRArgs == 0);
5377 assert(NumIRArgs == 1);
5379 if (!I->isAggregate())
5380 V = I->getKnownRValue().getScalarVal();
5383 I->hasLValue() ? I->getKnownLValue().getAddress()
5384 : I->getKnownRValue().getAggregateAddress());
5390 assert(!swiftErrorTemp.
isValid() &&
"multiple swifterror args");
5394 V, pointeeTy,
getContext().getTypeAlignInChars(pointeeTy));
5399 cast<llvm::AllocaInst>(
V)->setSwiftError(
true);
5407 V->getType()->isIntegerTy())
5412 if (FirstIRArg < IRFuncTy->getNumParams() &&
5413 V->getType() != IRFuncTy->getParamType(FirstIRArg)) {
5414 if (
V->getType()->getPointerAddressSpace() !=
5415 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace())
5417 IRFuncTy->getParamType(FirstIRArg));
5419 V =
Builder.CreateBitCast(
V, IRFuncTy->getParamType(FirstIRArg));
5422 if (ArgHasMaybeUndefAttr)
5424 IRCallArgs[FirstIRArg] =
V;
5428 llvm::StructType *STy =
5432 [[maybe_unused]] llvm::TypeSize SrcTypeSize =
5434 [[maybe_unused]] llvm::TypeSize DstTypeSize =
5436 if (STy->containsHomogeneousScalableVectorTypes()) {
5437 assert(SrcTypeSize == DstTypeSize &&
5438 "Only allow non-fractional movement of structure with "
5439 "homogeneous scalable vector type");
5441 IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
5448 if (!I->isAggregate()) {
5450 I->copyInto(*
this, Src);
5452 Src = I->hasLValue() ? I->getKnownLValue().getAddress()
5453 : I->getKnownRValue().getAggregateAddress();
5463 llvm::TypeSize SrcTypeSize =
5466 if (SrcTypeSize.isScalable()) {
5467 assert(STy->containsHomogeneousScalableVectorTypes() &&
5468 "ABI only supports structure with homogeneous scalable vector "
5470 assert(SrcTypeSize == DstTypeSize &&
5471 "Only allow non-fractional movement of structure with "
5472 "homogeneous scalable vector type");
5473 assert(NumIRArgs == STy->getNumElements());
5475 llvm::Value *StoredStructValue =
5477 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5478 llvm::Value *Extract =
Builder.CreateExtractValue(
5479 StoredStructValue, i, Src.
getName() +
".extract" + Twine(i));
5480 IRCallArgs[FirstIRArg + i] = Extract;
5484 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5485 uint64_t DstSize = DstTypeSize.getFixedValue();
5491 if (SrcSize < DstSize) {
5500 assert(NumIRArgs == STy->getNumElements());
5501 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5504 if (ArgHasMaybeUndefAttr)
5505 LI =
Builder.CreateFreeze(LI);
5506 IRCallArgs[FirstIRArg + i] = LI;
5511 assert(NumIRArgs == 1);
5519 auto *ATy = dyn_cast<llvm::ArrayType>(
Load->getType());
5520 if (ATy !=
nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5524 if (ArgHasMaybeUndefAttr)
5526 IRCallArgs[FirstIRArg] =
Load;
5536 llvm::Value *tempSize =
nullptr;
5539 if (I->isAggregate()) {
5540 addr = I->hasLValue() ? I->getKnownLValue().getAddress()
5541 : I->getKnownRValue().getAggregateAddress();
5544 RValue RV = I->getKnownRValue();
5556 nullptr, &AllocaAddr);
5564 unsigned IRArgPos = FirstIRArg;
5565 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5566 llvm::Type *eltType = coercionType->getElementType(i);
5570 if (ArgHasMaybeUndefAttr)
5571 elt =
Builder.CreateFreeze(elt);
5572 IRCallArgs[IRArgPos++] = elt;
5574 assert(IRArgPos == FirstIRArg + NumIRArgs);
5584 unsigned IRArgPos = FirstIRArg;
5585 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5586 assert(IRArgPos == FirstIRArg + NumIRArgs);
5592 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*
this);
5598 assert(IRFunctionArgs.hasInallocaArg());
5599 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5610 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5611 llvm::Value *Ptr) -> llvm::Function * {
5612 if (!CalleeFT->isVarArg())
5616 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5617 if (CE->getOpcode() == llvm::Instruction::BitCast)
5618 Ptr = CE->getOperand(0);
5621 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5625 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5629 if (OrigFT->isVarArg() ||
5630 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5631 OrigFT->getReturnType() != CalleeFT->getReturnType())
5634 for (
unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5635 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5641 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5643 IRFuncTy = OrigFn->getFunctionType();
5658 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5659 for (
unsigned i = 0; i < IRCallArgs.size(); ++i) {
5661 if (IRFunctionArgs.hasInallocaArg() &&
5662 i == IRFunctionArgs.getInallocaArgNo())
5664 if (i < IRFuncTy->getNumParams())
5665 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5670 for (
unsigned i = 0; i < IRCallArgs.size(); ++i)
5671 LargestVectorWidth =
std::max(LargestVectorWidth,
5676 llvm::AttributeList Attrs;
5682 if (
CallingConv == llvm::CallingConv::X86_VectorCall &&
5683 getTarget().getTriple().isWindowsArm64EC()) {
5684 CGM.
Error(
Loc,
"__vectorcall calling convention is not currently "
5689 if (FD->hasAttr<StrictFPAttr>())
5691 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5696 if (FD->hasAttr<OptimizeNoneAttr>() &&
getLangOpts().FastMath)
5702 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoMerge);
5706 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5711 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5720 !(TargetDecl && TargetDecl->
hasAttr<NoInlineAttr>())) {
5722 Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::AlwaysInline);
5727 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::NoInline);
5734 CannotThrow =
false;
5743 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5745 if (
auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5746 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5754 if (UnusedReturnSizePtr)
5756 UnusedReturnSizePtr);
5758 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr :
getInvokeDest();
5764 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5768 if (FD->hasAttr<StrictFPAttr>())
5770 Attrs = Attrs.addFnAttribute(
getLLVMContext(), llvm::Attribute::StrictFP);
5772 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*
this, TargetDecl);
5773 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5775 AllocAlignAttrEmitter AllocAlignAttrEmitter(*
this, TargetDecl, CallArgs);
5776 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5783 const auto *FD = dyn_cast_if_present<FunctionDecl>(TargetDecl);
5784 if (FD && FD->getNameInfo().getName().isIdentifier()) {
5786 FD->getName(), FD->getBuiltinID());
5791 CI =
Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5794 CI =
Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5798 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5799 CI->getCalledFunction()->getName().starts_with(
"_Z4sqrt")) {
5808 if (
const auto *FD = dyn_cast_or_null<FunctionDecl>(
CurFuncDecl)) {
5809 if (
const auto *A = FD->getAttr<CFGuardAttr>()) {
5810 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5816 CI->setAttributes(Attrs);
5821 if (!CI->getType()->isVoidTy())
5822 CI->setName(
"call");
5828 LargestVectorWidth =
5834 if (!CI->getCalledFunction())
5841 AddObjCARCExceptionMetadata(CI);
5844 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5845 if (TargetDecl && TargetDecl->
hasAttr<NotTailCalledAttr>())
5846 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5847 else if (IsMustTail)
5848 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5853 TargetDecl->
hasAttr<MSAllocatorAttr>())
5857 if (TargetDecl && TargetDecl->
hasAttr<ErrorAttr>()) {
5858 llvm::ConstantInt *Line =
5860 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
5862 CI->setMetadata(
"srcloc", MDT);
5869 bool SyclSkipNoReturn =
false;
5870 if (
getLangOpts().SYCLIsDevice && CI->doesNotReturn()) {
5871 if (
auto *F = CI->getCalledFunction())
5872 F->removeFnAttr(llvm::Attribute::NoReturn);
5873 CI->removeFnAttr(llvm::Attribute::NoReturn);
5874 SyclSkipNoReturn =
true;
5880 if (!SyclSkipNoReturn && CI->doesNotReturn()) {
5881 if (UnusedReturnSizePtr)
5885 if (
SanOpts.
has(SanitizerKind::Unreachable)) {
5888 if (
auto *F = CI->getCalledFunction())
5889 F->removeFnAttr(llvm::Attribute::NoReturn);
5890 CI->removeFnAttr(llvm::Attribute::NoReturn);
5895 SanitizerKind::KernelAddress)) {
5897 llvm::IRBuilder<>::InsertPointGuard IPGuard(
Builder);
5899 auto *FnType = llvm::FunctionType::get(
CGM.
VoidTy,
false);
5900 llvm::FunctionCallee Fn =
5907 Builder.ClearInsertionPoint();
5927 if (CI->getType()->isVoidTy())
5931 Builder.ClearInsertionPoint();
5937 if (swiftErrorTemp.
isValid()) {
5960 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5962 unsigned unpaddedIndex = 0;
5963 for (
unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5964 llvm::Type *eltType = coercionType->getElementType(i);
5967 llvm::Value *elt = CI;
5968 if (requiresExtract)
5969 elt =
Builder.CreateExtractValue(elt, unpaddedIndex++);
5971 assert(unpaddedIndex == 0);
5980 if (UnusedReturnSizePtr)
5996 llvm::Value *Real =
Builder.CreateExtractValue(CI, 0);
5997 llvm::Value *Imag =
Builder.CreateExtractValue(CI, 1);
6006 DestIsVolatile =
false;
6014 llvm::Value *
V = CI;
6015 if (
V->getType() != RetIRTy)
6020 llvm_unreachable(
"bad evaluation kind");
6026 if (
auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
6027 llvm::Value *
V = CI;
6028 if (
auto *ScalableSrcTy =
6029 dyn_cast<llvm::ScalableVectorType>(
V->getType())) {
6030 if (FixedDstTy->getElementType() == ScalableSrcTy->getElementType()) {
6031 llvm::Value *Zero = llvm::Constant::getNullValue(
CGM.
Int64Ty);
6032 V =
Builder.CreateExtractVector(FixedDstTy,
V, Zero,
"cast.fixed");
6043 DestIsVolatile =
false;
6060 llvm_unreachable(
"Invalid ABI kind for return argument");
6063 llvm_unreachable(
"Unhandled ABIArgInfo::Kind");
6067 if (
Ret.isScalar() && TargetDecl) {
6068 AssumeAlignedAttrEmitter.EmitAsAnAssumption(
Loc, RetTy,
Ret);
6069 AllocAlignAttrEmitter.EmitAsAnAssumption(
Loc, RetTy,
Ret);
6075 LifetimeEnd.Emit(*
this, {});
static void appendParameterTypes(const CodeGenTypes &CGT, SmallVectorImpl< CanQualType > &prefix, SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, CanQual< FunctionProtoType > FPT)
Adds the formal parameters in FPT to the given prefix.
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type)
static uint64_t buildMultiCharMask(const SmallVectorImpl< uint64_t > &Bits, int Pos, int Size, int CharWidth, bool BigEndian)
static const CGFunctionInfo & arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, CodeGenModule &CGM, const CallArgList &args, const FunctionType *fnType, unsigned numExtraRequiredArgs, bool chainCall)
Arrange a call as unto a free function, except possibly with an additional number of formal parameter...
static CanQualType GetReturnType(QualType RetTy)
Returns the "extra-canonicalized" return type, which discards qualifiers on the return type.
static llvm::Value * emitAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Emit an ARC autorelease of the result of a function.
static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, const ABIArgInfo &info)
static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty)
static void setBitRange(SmallVectorImpl< uint64_t > &Bits, int BitOffset, int BitWidth, int CharWidth)
static bool isProvablyNull(llvm::Value *addr)
static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, llvm::AttrBuilder &FuncAttrs, const FunctionProtoType *FPT)
static llvm::StoreInst * findDominatingStoreToReturnValue(CodeGenFunction &CGF)
Heuristically search for a dominating store to the return-value slot.
static const Expr * maybeGetUnaryAddrOfOperand(const Expr *E)
static llvm::SmallVector< FunctionProtoType::ExtParameterInfo, 16 > getExtParameterInfosForCall(const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static void eraseUnusedBitCasts(llvm::Instruction *insn)
static llvm::Value * CreateCoercedLoad(Address Src, llvm::Type *Ty, CodeGenFunction &CGF)
CreateCoercedLoad - Create a load from.
static llvm::Value * emitArgumentDemotion(CodeGenFunction &CGF, const VarDecl *var, llvm::Value *value)
An argument came in as a promoted argument; demote it back to its declared type.
static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method)
static llvm::Value * tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, llvm::Value *result)
Try to emit a fused autorelease of a return result.
static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, const LangOptions &LangOpts, const NoBuiltinAttr *NBA=nullptr)
static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, const ObjCIndirectCopyRestoreExpr *CRE)
Emit an argument that's being passed call-by-writeback.
static void overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr, const llvm::Function &F, const TargetOptions &TargetOpts)
Merges target-features from \TargetOpts and \F, and sets the result in \FuncAttr.
static std::unique_ptr< TypeExpansion > getTypeExpansion(QualType Ty, const ASTContext &Context)
static llvm::fp::FPAccuracy convertFPAccuracy(StringRef FPAccuracyStr)
static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, bool IsWindows)
static int getExpansionSize(QualType Ty, const ASTContext &Context)
static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, const llvm::DataLayout &DL, const ABIArgInfo &AI, bool CheckCoerce=true)
static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode, llvm::DenormalMode FP32DenormalMode, llvm::AttrBuilder &FuncAttrs)
Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the requested denormal behavior,...
static void emitWritebacks(CodeGenFunction &CGF, const CallArgList &args)
static int32_t convertFPAccuracyToAspect(StringRef FPAccuracyStr)
static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, const CallArgList &CallArgs)
static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF)
static void CreateCoercedStore(llvm::Value *Src, Address Dst, bool DstIsVolatile, CodeGenFunction &CGF)
CreateCoercedStore - Create a store to.
static llvm::Value * tryRemoveRetainOfSelf(CodeGenFunction &CGF, llvm::Value *result)
If this is a +1 of the value of an immutable 'self', remove it.
static void addExtParameterInfosForCall(llvm::SmallVectorImpl< FunctionProtoType::ExtParameterInfo > ¶mInfos, const FunctionProtoType *proto, unsigned prefixArgs, unsigned totalArgs)
static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType, bool IsReturn)
Test if it's legal to apply nofpclass for the given parameter type and it's lowered IR type.
static void getTrivialDefaultFunctionAttributes(StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, bool AttrOnCallSite, llvm::AttrBuilder &FuncAttrs)
static CanQual< FunctionProtoType > GetFormalType(const CXXMethodDecl *MD)
Returns the canonical formal type of the given C++ method.
static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts)
Return the nofpclass mask that can be applied to floating-point parameters.
static void forConstantArrayExpansion(CodeGenFunction &CGF, ConstantArrayExpansion *CAE, Address BaseAddr, llvm::function_ref< void(Address)> Fn)
static bool IsArgumentMaybeUndef(const Decl *TargetDecl, unsigned NumRequiredArgs, unsigned ArgNo)
Check if the argument of a function has maybe_undef attribute.
static const NonNullAttr * getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, QualType ArgType, unsigned ArgNo)
Returns the attribute (either parameter attribute, or function attribute), which declares argument Ar...
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, ArrayRef< QualType > ArgTypes)
static SmallVector< CanQualType, 16 > getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args)
static RawAddress CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, CharUnits MinAlign, const Twine &Name="tmp")
Create a temporary allocation for the purposes of coercion.
static void setUsedBits(CodeGenModule &, QualType, int, SmallVectorImpl< uint64_t > &)
static SmallVector< CanQualType, 16 > getArgTypesForCall(ASTContext &ctx, const CallArgList &args)
static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, const FunctionDecl *FD)
Set calling convention for CUDA/HIP kernel.
static Address EnterStructPointerForCoercedAccess(Address SrcPtr, llvm::StructType *SrcSTy, uint64_t DstSize, CodeGenFunction &CGF)
EnterStructPointerForCoercedAccess - Given a struct pointer that we are accessing some number of byte...
static const CGFunctionInfo & arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, SmallVectorImpl< CanQualType > &prefix, CanQual< FunctionProtoType > FTP)
Arrange the LLVM function layout for a value of the given function type, on top of any implicit param...
static void emitWriteback(CodeGenFunction &CGF, const CallArgList::Writeback &writeback)
Emit the actual writing-back of a writeback.
static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy, const Decl *TargetDecl)
static void addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts, llvm::AttrBuilder &FuncAttrs)
Add default attributes to a function, which have merge semantics under -mlink-builtin-bitcode and sho...
static void AddAttributesFromOMPAssumes(llvm::AttrBuilder &FuncAttrs, const Decl *Callee)
static unsigned getMaxVectorWidth(const llvm::Type *Ty)
static llvm::Value * CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, llvm::Type *Ty, CodeGenFunction &CGF)
CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both are either integers or p...
CodeGenFunction::ComplexPairTy ComplexPairTy
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
llvm::MachO::Target Target
static QualType getParamType(Sema &SemaRef, ArrayRef< ResultCandidate > Candidates, unsigned N)
Get the type of the Nth parameter from a given set of overload candidates.
static bool isInstanceMethod(const Decl *D)
__DEVICE__ int max(int __a, int __b)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getCanonicalParamType(QualType T) const
Return the canonical parameter type corresponding to the specific potentially non-canonical one.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CallingConv getDefaultCallingConvention(bool IsVariadic, bool IsCXXMethod, bool IsBuiltin=false) const
Retrieves the default calling convention for the current target.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
QualType getObjCSelType() const
Retrieve the type that corresponds to the predefined Objective-C 'SEL' type.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoInChars(const Type *T) const
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Attr - This represents one attribute.
const FunctionProtoType * getFunctionType() const
getFunctionType - Return the underlying function type for this block.
This class is used for builtin types like 'int'.
Represents a base class of a C++ class.
QualType getType() const
Retrieves the type of the base class.
Represents a C++ constructor within a class.
Represents a C++ destructor within a class.
Represents a static or instance method of a struct/union/class.
bool isImplicitObjectMemberFunction() const
[C++2b][dcl.fct]/p7 An implicit object member function is a non-static member function without an exp...
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Qualifiers getMethodQualifiers() const
Represents a C++ struct/union/class.
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
unsigned getNumVBases() const
Retrieves the number of virtual base classes of this class.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
SourceLocation getBeginLoc() const LLVM_READONLY
static CanQual< Type > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
CanProxy< U > castAs() const
CanQual< T > getUnqualifiedType() const
Retrieve the unqualified form of this type.
CanProxy< U > getAs() const
Retrieve a canonical type pointer with a different static type, upcasting or downcasting as needed.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
CodeGenOptions - Track various options which control how the code is optimized and passed to the back...
llvm::DenormalMode FPDenormalMode
The floating-point denormal mode to use.
static StringRef getFramePointerKindName(FramePointerKind Kind)
std::vector< std::string > Reciprocals
llvm::DenormalMode FP32DenormalMode
The floating-point denormal mode to use, for float.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
std::vector< std::string > DefaultFunctionAttrs
std::string PreferVectorWidth
The preferred width for auto-vectorization transforms.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
unsigned getInAllocaFieldIndex() const
bool getIndirectByVal() const
bool getIndirectRealign() const
void setCoerceToType(llvm::Type *T)
bool getCanBeFlattened() const
unsigned getDirectOffset() const
static bool isPaddingForCoerceAndExpand(llvm::Type *eltType)
bool getInAllocaSRet() const
Return true if this field of an inalloca struct should be returned to implement a struct return calli...
llvm::Type * getUnpaddedCoerceAndExpandType() const
bool getPaddingInReg() const
unsigned getDirectAlign() const
unsigned getIndirectAddrSpace() const
ArrayRef< llvm::Type * > getCoerceAndExpandTypeSequence() const
llvm::Type * getPaddingType() const
@ Extend
Extend - Valid only for integer argument types.
@ Ignore
Ignore - Ignore the argument (treat as void).
@ IndirectAliased
IndirectAliased - Similar to Indirect, but the pointer may be to an object that is otherwise referenc...
@ Expand
Expand - Only valid for aggregate argument types.
@ InAlloca
InAlloca - Pass the argument directly using the LLVM inalloca attribute.
@ Indirect
Indirect - Pass the argument indirectly via a hidden pointer with the specified alignment (0 indicate...
@ CoerceAndExpand
CoerceAndExpand - Only valid for aggregate argument types.
@ Direct
Direct - Pass the argument directly using the normal converted LLVM type, or by coercing to another s...
bool isCoerceAndExpand() const
llvm::Type * getCoerceToType() const
unsigned getInAllocaIndirect() const
bool isIndirectAliased() const
bool isSRetAfterThis() const
bool canHaveCoerceToType() const
llvm::StructType * getCoerceAndExpandType() const
CharUnits getIndirectAlign() const
virtual CodeGen::Address EmitMSVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const
Emit the target dependent code to load a value of.
virtual CodeGen::Address EmitVAArg(CodeGen::CodeGenFunction &CGF, CodeGen::Address VAListAddr, QualType Ty) const =0
EmitVAArg - Emit the target dependent code to load a value of.
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const =0
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::PointerType * getType() const
Return the type of the pointer value.
llvm::Value * getBasePointer() const
llvm::StringRef getName() const
Return the IR name of the pointer value.
Address getAddress() const
void setExternallyDestructed(bool destructed=true)
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
const BlockExpr * BlockExpression
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
llvm::LoadInst * CreateFlagLoad(llvm::Value *Addr, const llvm::Twine &Name="")
Emit a load from an i1 flag variable.
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::Value * CreateIsNull(Address Addr, const Twine &Name="")
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Implements C++ ABI-specific code generation functions.
virtual bool hasMostDerivedReturn(GlobalDecl GD) const
virtual bool HasThisReturn(GlobalDecl GD) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
virtual const CXXRecordDecl * getThisArgumentTypeForMethod(GlobalDecl GD)
Get the type of the implicit "this" parameter used by a method.
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, Address This, llvm::Type *Ty, SourceLocation Loc)=0
Build a virtual function pointer in the ABI-specific way.
virtual RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const =0
Returns how an argument of the given record type should be passed.
virtual AddedStructorArgCounts buildStructorSignature(GlobalDecl GD, SmallVectorImpl< CanQualType > &ArgTys)=0
Build the signature of the given constructor or destructor variant by adding any required parameters.
Abstract information about a function or function prototype.
const GlobalDecl getCalleeDecl() const
const FunctionProtoType * getCalleeFunctionProtoType() const
All available information about a concrete callee.
CGCallee prepareConcreteCallee(CodeGenFunction &CGF) const
If this is a delayed callee computation of some sort, prepare a concrete callee.
Address getThisAddress() const
llvm::Value * getFunctionPointer() const
const CallExpr * getVirtualCallExpr() const
GlobalDecl getVirtualMethodDecl() const
llvm::FunctionType * getVirtualFunctionType() const
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
CGFunctionInfo - Class to encapsulate the information about a function definition.
bool usesInAlloca() const
Return true if this function uses inalloca arguments.
FunctionType::ExtInfo getExtInfo() const
bool isInstanceMethod() const
bool isReturnsRetained() const
In ARC, whether this function retains its return value.
void Profile(llvm::FoldingSetNodeID &ID)
const_arg_iterator arg_begin() const
CanQualType getReturnType() const
static CGFunctionInfo * create(unsigned llvmCC, bool instanceMethod, bool chainCall, bool delegateCall, const FunctionType::ExtInfo &extInfo, ArrayRef< ExtParameterInfo > paramInfos, CanQualType resultType, ArrayRef< CanQualType > argTypes, RequiredArgs required)
bool isCmseNSCall() const
bool isDelegateCall() const
MutableArrayRef< ArgInfo > arguments()
const_arg_iterator arg_end() const
unsigned getEffectiveCallingConvention() const
getEffectiveCallingConvention - Return the actual calling convention to use, which may depend on the ...
ExtParameterInfo getExtParameterInfo(unsigned argIndex) const
ABIArgInfo & getReturnInfo()
CharUnits getArgStructAlignment() const
unsigned arg_size() const
llvm::StructType * getArgStruct() const
Get the struct type used to represent all the arguments in memory.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
RequiredArgs getRequiredArgs() const
unsigned getNumRequiredArgs() const
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
CallArgList - Type for representing both the value and type of arguments in a call.
void addUncopiedAggregate(LValue LV, QualType type)
ArrayRef< CallArgCleanup > getCleanupsToDeactivate() const
llvm::Instruction * getStackBase() const
void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP)
bool hasWritebacks() const
void add(RValue rvalue, QualType type)
bool isUsingInAlloca() const
Returns if we're using an inalloca struct to pass arguments in memory.
void allocateArgumentMemory(CodeGenFunction &CGF)
void freeArgumentMemory(CodeGenFunction &CGF) const
writeback_const_range writebacks() const
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse)
An abstract representation of regular/ObjC call/message targets.
An object to manage conditionally-evaluated expressions.
void begin(CodeGenFunction &CGF)
void end(CodeGenFunction &CGF)
static ParamValue forIndirect(Address addr)
static ParamValue forDirect(llvm::Value *value)
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
EHScopeStack::stable_iterator CurrentCleanupScopeDepth
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
llvm::Value * EmitARCRetainAutoreleaseReturnValue(llvm::Value *value)
Do a fused retain/autorelease of the given object.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
SanitizerSet SanOpts
Sanitizers enabled for this function.
void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc, SourceLocation EndLoc)
EmitFunctionEpilog - Emit the target specific LLVM code to return the given temporary.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
bool isCleanupPadScope() const
Returns true while emitting a cleanuppad.
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
void EmitAggregateStore(llvm::Value *Val, Address Dest, bool DestIsVolatile)
Build all the stores needed to initialize an aggregate at Dest with the value Val.
void EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args)
Emits a call or invoke to the given noreturn runtime function.
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
void callCStructDestructor(LValue Dst)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
llvm::Value * EmitARCAutoreleaseReturnValue(llvm::Value *value)
Autorelease the given object.
bool shouldUseFusedARCCalls()
bool CurFuncIsThunk
In C++, whether we are code generating a thunk.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
bool isSEHTryScope() const
Returns true inside SEH __try blocks.
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitReturnValueCheck(llvm::Value *RV)
Emit a test that checks if the return value RV is nonnull.
llvm::LLVMContext & getLLVMContext()
void EmitDelegateCallArg(CallArgList &args, const VarDecl *param, SourceLocation loc)
EmitDelegateCallArg - We are performing a delegate call; that is, the current function is delegating ...
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
llvm::BasicBlock * getUnreachableBlock()
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
const CodeGen::CGBlockInfo * BlockInfo
void EmitKCFIOperandBundle(const CGCallee &Callee, SmallVectorImpl< llvm::OperandBundleDef > &Bundles)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr)
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type, bool ForVirtualBase, bool Delegating, Address This, QualType ThisTy)
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise)
Release the given object.
bool currentFunctionUsesSEHTry() const
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
JumpDest ReturnBlock
ReturnBlock - Unified return block.
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
ASTContext & getContext() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *DominatingIP)
DeactivateCleanupBlock - Deactivates the given cleanup block.
void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType)
EmitCallArg - Emit a single call argument.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
CallType * addControlledConvergenceToken(CallType *Input)
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
const TargetInfo & getTarget() const
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
void EmitFunctionProlog(const CGFunctionInfo &FI, llvm::Function *Fn, const FunctionArgList &Args)
EmitFunctionProlog - Emit the target specific LLVM code to load the arguments for the given function.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
void EmitParmDecl(const VarDecl &D, ParamValue Arg, unsigned ArgNo)
EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
CGDebugInfo * getDebugInfo()
llvm::CallInst * MaybeEmitFPBuiltinofFD(llvm::FunctionType *IRFuncTy, const SmallVectorImpl< llvm::Value * > &IRArgs, llvm::Value *FnPtr, StringRef Name, unsigned FDBuiltinID)
llvm::BasicBlock * getInvokeDest()
bool AutoreleaseResult
In ARC, whether we should autorelease the return value.
llvm::Value * EmitARCRetainNonBlock(llvm::Value *value)
Retain the given object, with normal retain semantics.
llvm::Type * ConvertTypeForMem(QualType T)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
const TargetCodeGenInfo & getTargetHooks() const
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
static bool hasAggregateEvaluationKind(QualType T)
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
const CallExpr * MustTailCall
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
CodeGenTypes & getTypes() const
llvm::Instruction * CurrentFuncletPad
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
llvm::Value * EmitCMSEClearRecord(llvm::Value *V, llvm::IntegerType *ITy, QualType RTy)
void PopCleanupBlock(bool FallThroughIsBranchThrough=false, bool ForDeactivation=false)
PopCleanupBlock - Will pop the cleanup entry on the stack and process all branch fixups.
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
const LangOptions & getLangOpts() const
This class organizes the cross-function state that is used while generating LLVM code.
const TargetInfo & getTarget() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
bool ReturnTypeUsesFPRet(QualType ResultType)
Return true iff the given type uses 'fpret' when used as a return type.
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const LangOptions & getLangOpts() const
const llvm::Triple & getTriple() const
llvm::LLVMContext & getLLVMContext()
llvm::MDNode * getNoObjCARCExceptionsMetadata()
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
bool ReturnTypeUsesFP2Ret(QualType ResultType)
Return true iff the given type uses 'fp2ret' when used as a return type.
ObjCEntrypoints & getObjCEntrypoints() const
CGCXXABI & getCXXABI() const
bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI)
Return true iff the given type uses an argument slot when 'sret' is used as a return type.
bool ReturnTypeHasInReg(const CGFunctionInfo &FI)
Return true iff the given type has inreg set.
void AdjustMemoryAttribute(StringRef Name, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs)
Adjust Memory attribute to ensure that the BE gets the right attribute.
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info, CGCalleeInfo CalleeInfo, llvm::AttributeList &Attrs, unsigned &CallingConv, bool AttrOnCallSite, bool IsThunk)
Get the LLVM attributes and calling convention to use for a particular function type.
bool ReturnTypeUsesSRet(const CGFunctionInfo &FI)
Return true iff the given type uses 'sret' when used as a return type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
ASTContext & getContext() const
void addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder &attrs)
Like the overload taking a Function &, but intended specifically for frontends that want to build on ...
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
const CodeGenOptions & getCodeGenOpts() const
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType) const
Whether this function's return type has no side effects, and thus may be trivially discarded if it is...
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind, llvm::Instruction *ValueSite, llvm::Value *ValuePtr)
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
const CGFunctionInfo & arrangeCXXMethodType(const CXXRecordDecl *RD, const FunctionProtoType *FTP, const CXXMethodDecl *MD)
Arrange the argument and result information for a call to an unknown C++ non-static member function o...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeCXXMethodDeclaration(const CXXMethodDecl *MD)
C++ methods have some special rules and also have implicit parameters.
const CGFunctionInfo & arrangeLLVMFunctionInfo(CanQualType returnType, FnInfoOpts opts, ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, ArrayRef< FunctionProtoType::ExtParameterInfo > paramInfos, RequiredArgs args)
"Arrange" the LLVM information for a call or type with the given signature.
const CGFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > Ty)
Arrange the argument and result information for a value of the given freestanding function type.
CanQualType DeriveThisType(const CXXRecordDecl *RD, const CXXMethodDecl *MD)
Derives the 'this' type for codegen purposes, i.e.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
bool inheritingCtorHasParams(const InheritedConstructor &Inherited, CXXCtorType Type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
bool isFuncTypeConvertible(const FunctionType *FT)
isFuncTypeConvertible - Utility to check whether a function type can be converted to an LLVM type (i....
const CGFunctionInfo & arrangeBlockFunctionCall(const CallArgList &args, const FunctionType *type)
A block function is essentially a free function with an extra implicit argument.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
const CGFunctionInfo & arrangeUnprototypedObjCMessageSend(QualType returnType, const CallArgList &args)
const ABIInfo & getABIInfo() const
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
unsigned getTargetAddressSpace(QualType T) const
void getExpandedTypes(QualType Ty, SmallVectorImpl< llvm::Type * >::iterator &TI)
getExpandedTypes - Expand the type
const CGFunctionInfo & arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD)
Objective-C methods are C functions with some implicit parameters.
const CGFunctionInfo & arrangeGlobalDeclaration(GlobalDecl GD)
const CGFunctionInfo & arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD)
Arrange a thunk that takes 'this' as the first parameter followed by varargs.
const CGFunctionInfo & arrangeCXXMethodCall(const CallArgList &args, const FunctionProtoType *type, RequiredArgs required, unsigned numPrefixArgs)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
ASTContext & getContext() const
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
const CGFunctionInfo & arrangeFunctionDeclaration(const FunctionDecl *FD)
Free functions are functions that are compatible with an ordinary C function pointer type.
const CGFunctionInfo & arrangeBlockFunctionDeclaration(const FunctionProtoType *type, const FunctionArgList &args)
Block invocation functions are C functions with an implicit parameter.
unsigned ClangCallConvToLLVMCallConv(CallingConv CC)
Convert clang calling convention to LLVM callilng convention.
llvm::Type * GetFunctionTypeForVTable(GlobalDecl GD)
GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable, given a CXXMethodDecl.
const CGFunctionInfo & arrangeCXXConstructorCall(const CallArgList &Args, const CXXConstructorDecl *D, CXXCtorType CtorKind, unsigned ExtraPrefixArgs, unsigned ExtraSuffixArgs, bool PassProtoArgs=true)
Arrange a call to a C++ method, passing the given arguments.
const CGFunctionInfo & arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, QualType receiverType)
Arrange the argument and result information for the function type through which to perform a send to ...
const CGFunctionInfo & arrangeCXXStructorDeclaration(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeMSCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT)
const CGFunctionInfo & arrangeCall(const CGFunctionInfo &declFI, const CallArgList &args)
Given a function info for a declaration, return the function info for a call with the given arguments...
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type 'void ()'.
A cleanup scope which generates the cleanup blocks lazily.
EHScopeStack::Cleanup * getCleanup()
Information for lazily generating a cleanup.
virtual bool isRedundantBeforeReturn()
A saved depth on the scope stack.
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
iterator end() const
Returns an iterator pointing to the outermost EH scope.
iterator find(stable_iterator save) const
Turn a stable reference to a scope depth into a unstable pointer to the EH stack.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
LValue - This represents an lvalue references.
bool isVolatileQualified() const
LangAS getAddressSpace() const
CharUnits getAlignment() const
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
ARCPreciseLifetime_t isARCPreciseLifetime() const
Qualifiers::ObjCLifetime getObjCLifetime() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
bool isVolatileQualified() const
An abstract representation of an aligned address.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Value * getPointer() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
static RawAddress invalid()
A class for recording the number of arguments that a function signature requires.
bool allowsOptionalArgs() const
unsigned getNumRequiredArgs() const
static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual bool doesReturnSlotInterfereWithArgs() const
doesReturnSlotInterfereWithArgs - Return true if the target uses an argument slot for an 'sret' type.
virtual void setCUDAKernelCallingConvention(const FunctionType *&FT) const
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller, const FunctionDecl *Callee, const CallArgList &Args, QualType ReturnType) const
Any further codegen related checks that need to be done on a function call in a target specific manne...
virtual unsigned getOpenCLKernelCallingConv() const
Get LLVM calling convention for OpenCL kernel.
virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args, const FunctionNoProtoType *fnType) const
Determine whether a call to an unprototyped functions under the given calling convention should use t...
Complex values, per C99 6.2.5p11.
Represents the canonical version of C arrays with a specified constant size.
bool constructsVirtualBase() const
Returns true if the constructed base class is a virtual base class subobject of this declaration's cl...
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Decl - This represents one declaration (or definition), e.g.
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
DeclContext * getDeclContext()
SourceLocation getBeginLoc() const LLVM_READONLY
This represents one expression.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
ExprObjectKind getObjectKind() const
getObjectKind - The object kind that this expression produces.
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
bool isZeroLengthBitField(const ASTContext &Ctx) const
Is this a zero-length bit-field? Such bit-fields aren't really bit-fields at all and instead act as a...
bool isUnnamedBitField() const
Determines whether this is an unnamed bitfield.
Represents a function declaration or definition.
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Represents a prototype with parameter type info, e.g.
ExceptionSpecificationType getExceptionSpecType() const
Get the kind of exception specification on this function.
unsigned getNumParams() const
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
bool isVariadic() const
Whether this function prototype is variadic.
ArrayRef< ExtParameterInfo > getExtParameterInfos() const
bool isNothrow(bool ResultIfDependent=false) const
Determine whether this function type has a non-throwing exception specification.
bool hasExtParameterInfos() const
Is there any interesting extra information for any of the parameters of this function type?
Wrapper for source info for functions.
A class which abstracts out some details necessary for making a call.
ExtInfo withCallingConv(CallingConv cc) const
CallingConv getCC() const
ExtInfo withProducesResult(bool producesResult) const
bool getCmseNSCall() const
bool getNoCfCheck() const
unsigned getRegParm() const
bool getNoCallerSavedRegs() const
bool getHasRegParm() const
bool getProducesResult() const
Interesting information about a specific parameter that can't simply be reflected in parameter's type...
ParameterABI getABI() const
Return the ABI treatment of this parameter.
ExtParameterInfo withIsNoEscape(bool NoEscape) const
FunctionType - C99 6.7.5.3 - Function Declarators.
ExtInfo getExtInfo() const
static ArmStateValue getArmZT0State(unsigned AttrBits)
static ArmStateValue getArmZAState(unsigned AttrBits)
QualType getReturnType() const
@ SME_PStateSMEnabledMask
@ SME_PStateSMCompatibleMask
GlobalDecl - represents a global declaration.
CXXCtorType getCtorType() const
const Decl * getDecl() const
Description of a constructor that was inherited from a base class.
ConstructorUsingShadowDecl * getShadowDecl() const
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
std::vector< std::string > NoBuiltinFuncs
A list of all -fno-builtin-* function names (e.g., memset).
FPExceptionModeKind getDefaultExceptionMode() const
bool isNoBuiltinFunc(StringRef Name) const
Is this a libc/libm function that is no longer recognized as a builtin because a -fno-builtin-* optio...
bool assumeFunctionsAreConvergent() const
FPAccuracyFuncMapTy FPAccuracyFuncMap
Represents a matrix type, as defined in the Matrix Types clang extensions.
Describes a module or submodule.
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
ObjCCategoryDecl - Represents a category declaration.
ObjCIndirectCopyRestoreExpr - Represents the passing of a function argument by indirect copy-restore ...
bool shouldCopy() const
shouldCopy - True if we should do the 'copy' part of the copy-restore.
Represents an ObjC class declaration.
ObjCMethodDecl - Represents an instance or class method declaration.
ImplicitParamDecl * getSelfDecl() const
bool isDirectMethod() const
True if the method is tagged as objc_direct.
QualType getReturnType() const
ArrayRef< ParmVarDecl * > parameters() const
Represents a parameter to a function.
PointerType - C99 6.7.5.1 - Pointer Declarators.
QualType getPointeeType() const
A (possibly-)qualified type.
bool isRestrictQualified() const
Determine whether this type is restrict-qualified.
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
QualType getCanonicalType() const
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
LangAS getAddressSpace() const
Represents a struct/union/class.
bool hasFlexibleArrayMember() const
field_iterator field_end() const
field_range fields() const
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
bool isParamDestroyedInCallee() const
field_iterator field_begin() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
RecordDecl * getDecl() const
Base for LValueReferenceType and RValueReferenceType.
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
bool areArgsDestroyedLeftToRightInCallee() const
Are arguments to a call destroyed left to right in the callee? This is a fundamental language change,...
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
bool useObjCFPRetForRealType(FloatModeKind T) const
Check whether the given real type should use the "fpret" flavor of Objective-C message passing on thi...
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
bool useObjCFP2RetForComplexLongDouble() const
Check whether _Complex long double should use the "fp2ret" flavor of Objective-C message passing on t...
Options for controlling the target.
std::vector< std::string > Features
The list of target specific features to enable or disable – this should be a list of strings starting...
std::string TuneCPU
If given, the name of the target CPU to tune code for.
std::string CPU
If given, the name of the target CPU to generate code for.
The base class of the type hierarchy.
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
bool isBlockPointerType() const
bool isIncompleteArrayType() const
bool isConstantSizeType() const
Return true if this is not a variable sized type, according to the rules of C99 6....
bool isPointerType() const
CanQualType getCanonicalTypeUnqualified() const
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
const T * castAs() const
Member-template castAs<specific type>.
bool isReferenceType() const
bool isScalarType() const
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isBitIntType() const
QualType getCanonicalTypeInternal() const
bool isMemberPointerType() const
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
bool isObjectType() const
Determine whether this type is an object type.
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
bool hasFloatingRepresentation() const
Determine whether this type has a floating-point representation of some sort, e.g....
bool isAnyPointerType() const
const T * getAs() const
Member-template getAs<specific type>'.
bool isNullPtrType() const
bool isObjCRetainableType() const
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Represents a call to the builtin function __builtin_va_arg.
bool isMicrosoftABI() const
Returns whether this is really a Win64 ABI va_arg expression.
const Expr * getSubExpr() const
Represents a variable declaration or definition.
QualType::DestructionKind needsDestruction(const ASTContext &Ctx) const
Would the destruction of this variable have any effect, and if so, what kind?
Represents a GCC generic vector type.
Defines the clang::TargetInfo interface.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
@ fp_intrinsic_accuracy_high
@ fp_intrinsic_accuracy_medium
@ fp_intrinsic_accuracy_low
@ fp_intrinsic_accuracy_cuda
@ fp_intrinsic_accuracy_sycl
void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
@ NormalCleanup
Denotes a cleanup that should run when a scope is exited using normal control flow (falling off the e...
void mergeDefaultFunctionDefinitionAttributes(llvm::Function &F, const CodeGenOptions &CodeGenOpts, const LangOptions &LangOpts, const TargetOptions &TargetOpts, bool WillInternalize)
Adds attributes to F according to our CodeGenOpts and LangOpts, as though we had emitted it ourselves...
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
constexpr XRayInstrMask All
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
constexpr Variable var(Literal L)
Returns the variable of L.
bool Ret(InterpState &S, CodePtr &PC, APValue &Result)
bool This(InterpState &S, CodePtr OpPC)
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
CXXCtorType
C++ constructor types.
@ Ctor_DefaultClosure
Default closure variant of a ctor.
@ Ctor_CopyingClosure
Copying closure variant of a ctor.
@ Ctor_Complete
Complete object ctor.
bool isUnresolvedExceptionSpec(ExceptionSpecificationType ESpecType)
@ NonNull
Values of this type can never be null.
@ OK_Ordinary
An ordinary object is located at an address in memory.
@ SwiftAsyncContext
This parameter (which must have pointer type) uses the special Swift asynchronous context-pointer ABI...
@ SwiftErrorResult
This parameter (which must have pointer-to-pointer type) uses the special Swift error-result ABI trea...
@ Ordinary
This parameter uses ordinary ABI rules for its type.
@ SwiftIndirectResult
This parameter (which must have pointer type) is a Swift indirect result parameter.
@ SwiftContext
This parameter (which must have pointer type) uses the special Swift context-pointer ABI treatment.
@ Dtor_Complete
Complete object dtor.
@ CanPassInRegs
The argument of this type can be passed directly in registers.
const FunctionProtoType * T
CallingConv
CallingConv - Specifies the calling convention that a function uses.
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Similar to AddedStructorArgs, but only notes the number of additional arguments.
llvm::Value * ToUse
A value to "use" after the writeback, or null.
LValue Source
The original argument.
Address Temporary
The temporary alloca.
LValue getKnownLValue() const
RValue getKnownRValue() const
void copyInto(CodeGenFunction &CGF, Address A) const
RValue getRValue(CodeGenFunction &CGF) const
llvm::BasicBlock * getBlock() const
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::CallingConv::ID getRuntimeCC() const
llvm::IntegerType * SizeTy
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntPtrTy
llvm::PointerType * Int8PtrTy
CharUnits getPointerAlign() const
LangAS getASTAllocaAddressSpace() const
bool isMSVCXXPersonality() const
static const EHPersonality & get(CodeGenModule &CGM, const FunctionDecl *FD)
llvm::Function * objc_retainAutoreleasedReturnValue
id objc_retainAutoreleasedReturnValue(id);
llvm::Function * objc_retain
id objc_retain(id);
llvm::InlineAsm * retainAutoreleasedReturnValueMarker
A void(void) inline asm to use to mark that the return value of a call will be immediately retain.
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Iterator for iterating over Stmt * arrays that contain only T *.