clang  19.0.0git
AArch64.cpp
Go to the documentation of this file.
1 //===- AArch64.cpp --------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ABIInfoImpl.h"
10 #include "TargetInfo.h"
11 #include "clang/AST/Decl.h"
13 #include "llvm/TargetParser/AArch64TargetParser.h"
14 
15 using namespace clang;
16 using namespace clang::CodeGen;
17 
18 //===----------------------------------------------------------------------===//
19 // AArch64 ABI Implementation
20 //===----------------------------------------------------------------------===//
21 
22 namespace {
23 
24 class AArch64ABIInfo : public ABIInfo {
26 
27 public:
28  AArch64ABIInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
29  : ABIInfo(CGT), Kind(Kind) {}
30 
31  bool isSoftFloat() const { return Kind == AArch64ABIKind::AAPCSSoft; }
32 
33 private:
34  AArch64ABIKind getABIKind() const { return Kind; }
35  bool isDarwinPCS() const { return Kind == AArch64ABIKind::DarwinPCS; }
36 
37  ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
38  ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
39  unsigned CallingConvention) const;
40  ABIArgInfo coerceIllegalVector(QualType Ty) const;
41  bool isHomogeneousAggregateBaseType(QualType Ty) const override;
42  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
43  uint64_t Members) const override;
44  bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
45 
46  bool isIllegalVectorType(QualType Ty) const;
47 
48  void computeInfo(CGFunctionInfo &FI) const override {
49  if (!::classifyReturnType(getCXXABI(), FI, *this))
50  FI.getReturnInfo() =
52 
53  for (auto &it : FI.arguments())
54  it.info = classifyArgumentType(it.type, FI.isVariadic(),
56  }
57 
58  Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
59  CodeGenFunction &CGF) const;
60 
61  Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty, CodeGenFunction &CGF,
62  AArch64ABIKind Kind) const;
63 
64  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
65  QualType Ty) const override {
66  llvm::Type *BaseTy = CGF.ConvertType(Ty);
67  if (isa<llvm::ScalableVectorType>(BaseTy))
68  llvm::report_fatal_error("Passing SVE types to variadic functions is "
69  "currently not supported");
70 
71  return Kind == AArch64ABIKind::Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
72  : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
73  : EmitAAPCSVAArg(VAListAddr, Ty, CGF, Kind);
74  }
75 
76  Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
77  QualType Ty) const override;
78 
79  bool allowBFloatArgsAndRet() const override {
80  return getTarget().hasBFloat16Type();
81  }
82 
84  void appendAttributeMangling(TargetClonesAttr *Attr, unsigned Index,
85  raw_ostream &Out) const override;
86  void appendAttributeMangling(StringRef AttrStr,
87  raw_ostream &Out) const override;
88 };
89 
90 class AArch64SwiftABIInfo : public SwiftABIInfo {
91 public:
92  explicit AArch64SwiftABIInfo(CodeGenTypes &CGT)
93  : SwiftABIInfo(CGT, /*SwiftErrorInRegister=*/true) {}
94 
95  bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy,
96  unsigned NumElts) const override;
97 };
98 
99 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
100 public:
101  AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind Kind)
102  : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {
103  SwiftInfo = std::make_unique<AArch64SwiftABIInfo>(CGT);
104  }
105 
106  StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
107  return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
108  }
109 
110  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
111  return 31;
112  }
113 
114  bool doesReturnSlotInterfereWithArgs() const override { return false; }
115 
116  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
117  CodeGen::CodeGenModule &CGM) const override {
118  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
119  if (!FD)
120  return;
121 
122  const auto *TA = FD->getAttr<TargetAttr>();
123  if (TA == nullptr)
124  return;
125 
127  CGM.getTarget().parseTargetAttr(TA->getFeaturesStr());
128  if (Attr.BranchProtection.empty())
129  return;
130 
132  StringRef Error;
133  (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
134  Attr.CPU, BPI, Error);
135  assert(Error.empty());
136 
137  auto *Fn = cast<llvm::Function>(GV);
138  Fn->addFnAttr("sign-return-address", BPI.getSignReturnAddrStr());
139 
141  Fn->addFnAttr("sign-return-address-key",
143  ? "a_key"
144  : "b_key");
145  }
146 
147  Fn->addFnAttr("branch-target-enforcement",
148  BPI.BranchTargetEnforcement ? "true" : "false");
149  Fn->addFnAttr("branch-protection-pauth-lr",
150  BPI.BranchProtectionPAuthLR ? "true" : "false");
151  Fn->addFnAttr("guarded-control-stack",
152  BPI.GuardedControlStack ? "true" : "false");
153  }
154 
155  bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
156  llvm::Type *Ty) const override {
157  if (CGF.getTarget().hasFeature("ls64")) {
158  auto *ST = dyn_cast<llvm::StructType>(Ty);
159  if (ST && ST->getNumElements() == 1) {
160  auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
161  if (AT && AT->getNumElements() == 8 &&
162  AT->getElementType()->isIntegerTy(64))
163  return true;
164  }
165  }
167  }
168 
169  void checkFunctionABI(CodeGenModule &CGM,
170  const FunctionDecl *Decl) const override;
171 
172  void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
173  const FunctionDecl *Caller,
174  const FunctionDecl *Callee, const CallArgList &Args,
175  QualType ReturnType) const override;
176 
177 private:
178  // Diagnose calls between functions with incompatible Streaming SVE
179  // attributes.
180  void checkFunctionCallABIStreaming(CodeGenModule &CGM, SourceLocation CallLoc,
181  const FunctionDecl *Caller,
182  const FunctionDecl *Callee) const;
183  // Diagnose calls which must pass arguments in floating-point registers when
184  // the selected target does not have floating-point registers.
185  void checkFunctionCallABISoftFloat(CodeGenModule &CGM, SourceLocation CallLoc,
186  const FunctionDecl *Caller,
187  const FunctionDecl *Callee,
188  const CallArgList &Args,
189  QualType ReturnType) const;
190 };
191 
192 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
193 public:
194  WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIKind K)
195  : AArch64TargetCodeGenInfo(CGT, K) {}
196 
197  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
198  CodeGen::CodeGenModule &CGM) const override;
199 
200  void getDependentLibraryOption(llvm::StringRef Lib,
201  llvm::SmallString<24> &Opt) const override {
202  Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
203  }
204 
205  void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
206  llvm::SmallString<32> &Opt) const override {
207  Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
208  }
209 };
210 
211 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
212  const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
213  AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
214  if (GV->isDeclaration())
215  return;
216  addStackProbeTargetAttributes(D, GV, CGM);
217 }
218 }
219 
220 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
221  assert(Ty->isVectorType() && "expected vector type!");
222 
223  const auto *VT = Ty->castAs<VectorType>();
224  if (VT->getVectorKind() == VectorKind::SveFixedLengthPredicate) {
225  assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
226  assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
227  BuiltinType::UChar &&
228  "unexpected builtin type for SVE predicate!");
229  return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
230  llvm::Type::getInt1Ty(getVMContext()), 16));
231  }
232 
233  if (VT->getVectorKind() == VectorKind::SveFixedLengthData) {
234  assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
235 
236  const auto *BT = VT->getElementType()->castAs<BuiltinType>();
237  llvm::ScalableVectorType *ResType = nullptr;
238  switch (BT->getKind()) {
239  default:
240  llvm_unreachable("unexpected builtin type for SVE vector!");
241  case BuiltinType::SChar:
242  case BuiltinType::UChar:
243  ResType = llvm::ScalableVectorType::get(
244  llvm::Type::getInt8Ty(getVMContext()), 16);
245  break;
246  case BuiltinType::Short:
247  case BuiltinType::UShort:
248  ResType = llvm::ScalableVectorType::get(
249  llvm::Type::getInt16Ty(getVMContext()), 8);
250  break;
251  case BuiltinType::Int:
252  case BuiltinType::UInt:
253  ResType = llvm::ScalableVectorType::get(
254  llvm::Type::getInt32Ty(getVMContext()), 4);
255  break;
256  case BuiltinType::Long:
257  case BuiltinType::ULong:
258  ResType = llvm::ScalableVectorType::get(
259  llvm::Type::getInt64Ty(getVMContext()), 2);
260  break;
261  case BuiltinType::Half:
262  ResType = llvm::ScalableVectorType::get(
263  llvm::Type::getHalfTy(getVMContext()), 8);
264  break;
265  case BuiltinType::Float:
266  ResType = llvm::ScalableVectorType::get(
267  llvm::Type::getFloatTy(getVMContext()), 4);
268  break;
269  case BuiltinType::Double:
270  ResType = llvm::ScalableVectorType::get(
271  llvm::Type::getDoubleTy(getVMContext()), 2);
272  break;
273  case BuiltinType::BFloat16:
274  ResType = llvm::ScalableVectorType::get(
275  llvm::Type::getBFloatTy(getVMContext()), 8);
276  break;
277  }
278  return ABIArgInfo::getDirect(ResType);
279  }
280 
281  uint64_t Size = getContext().getTypeSize(Ty);
282  // Android promotes <2 x i8> to i16, not i32
283  if ((isAndroid() || isOHOSFamily()) && (Size <= 16)) {
284  llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
285  return ABIArgInfo::getDirect(ResType);
286  }
287  if (Size <= 32) {
288  llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
289  return ABIArgInfo::getDirect(ResType);
290  }
291  if (Size == 64) {
292  auto *ResType =
293  llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
294  return ABIArgInfo::getDirect(ResType);
295  }
296  if (Size == 128) {
297  auto *ResType =
298  llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
299  return ABIArgInfo::getDirect(ResType);
300  }
301  return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
302 }
303 
306  unsigned CallingConvention) const {
308 
309  // Handle illegal vector types here.
310  if (isIllegalVectorType(Ty))
311  return coerceIllegalVector(Ty);
312 
313  if (!isAggregateTypeForABI(Ty)) {
314  // Treat an enum type as its underlying type.
315  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
316  Ty = EnumTy->getDecl()->getIntegerType();
317 
318  if (const auto *EIT = Ty->getAs<BitIntType>())
319  if (EIT->getNumBits() > 128)
320  return getNaturalAlignIndirect(Ty, false);
321 
322  return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
325  }
326 
327  // Structures with either a non-trivial destructor or a non-trivial
328  // copy constructor are always indirect.
329  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
330  return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
332  }
333 
334  // Empty records are always ignored on Darwin, but actually passed in C++ mode
335  // elsewhere for GNU compatibility.
336  uint64_t Size = getContext().getTypeSize(Ty);
337  bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
338  if (IsEmpty || Size == 0) {
339  if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
340  return ABIArgInfo::getIgnore();
341 
342  // GNU C mode. The only argument that gets ignored is an empty one with size
343  // 0.
344  if (IsEmpty && Size == 0)
345  return ABIArgInfo::getIgnore();
346  return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
347  }
348 
349  // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
350  const Type *Base = nullptr;
351  uint64_t Members = 0;
352  bool IsWin64 = Kind == AArch64ABIKind::Win64 ||
353  CallingConvention == llvm::CallingConv::Win64;
354  bool IsWinVariadic = IsWin64 && IsVariadic;
355  // In variadic functions on Windows, all composite types are treated alike,
356  // no special handling of HFAs/HVAs.
357  if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
359  return ABIArgInfo::getDirect(
360  llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
361 
362  // For HFAs/HVAs, cap the argument alignment to 16, otherwise
363  // set it to 8 according to the AAPCS64 document.
364  unsigned Align =
365  getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
366  Align = (Align >= 16) ? 16 : 8;
367  return ABIArgInfo::getDirect(
368  llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
369  nullptr, true, Align);
370  }
371 
372  // Aggregates <= 16 bytes are passed directly in registers or on the stack.
373  if (Size <= 128) {
374  // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
375  // same size and alignment.
376  if (getTarget().isRenderScriptTarget()) {
377  return coerceToIntArray(Ty, getContext(), getVMContext());
378  }
379  unsigned Alignment;
380  if (Kind == AArch64ABIKind::AAPCS) {
381  Alignment = getContext().getTypeUnadjustedAlign(Ty);
382  Alignment = Alignment < 128 ? 64 : 128;
383  } else {
384  Alignment =
385  std::max(getContext().getTypeAlign(Ty),
386  (unsigned)getTarget().getPointerWidth(LangAS::Default));
387  }
388  Size = llvm::alignTo(Size, Alignment);
389 
390  // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
391  // For aggregates with 16-byte alignment, we use i128.
392  llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
393  return ABIArgInfo::getDirect(
394  Size == Alignment ? BaseTy
395  : llvm::ArrayType::get(BaseTy, Size / Alignment));
396  }
397 
398  return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
399 }
400 
402  bool IsVariadic) const {
403  if (RetTy->isVoidType())
404  return ABIArgInfo::getIgnore();
405 
406  if (const auto *VT = RetTy->getAs<VectorType>()) {
407  if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
408  VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
409  return coerceIllegalVector(RetTy);
410  }
411 
412  // Large vector types should be returned via memory.
413  if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
414  return getNaturalAlignIndirect(RetTy);
415 
416  if (!isAggregateTypeForABI(RetTy)) {
417  // Treat an enum type as its underlying type.
418  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
419  RetTy = EnumTy->getDecl()->getIntegerType();
420 
421  if (const auto *EIT = RetTy->getAs<BitIntType>())
422  if (EIT->getNumBits() > 128)
423  return getNaturalAlignIndirect(RetTy);
424 
425  return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
426  ? ABIArgInfo::getExtend(RetTy)
428  }
429 
430  uint64_t Size = getContext().getTypeSize(RetTy);
431  if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
432  return ABIArgInfo::getIgnore();
433 
434  const Type *Base = nullptr;
435  uint64_t Members = 0;
436  if (isHomogeneousAggregate(RetTy, Base, Members) &&
437  !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
438  IsVariadic))
439  // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
440  return ABIArgInfo::getDirect();
441 
442  // Aggregates <= 16 bytes are returned directly in registers or on the stack.
443  if (Size <= 128) {
444  // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
445  // same size and alignment.
446  if (getTarget().isRenderScriptTarget()) {
447  return coerceToIntArray(RetTy, getContext(), getVMContext());
448  }
449 
450  if (Size <= 64 && getDataLayout().isLittleEndian()) {
451  // Composite types are returned in lower bits of a 64-bit register for LE,
452  // and in higher bits for BE. However, integer types are always returned
453  // in lower bits for both LE and BE, and they are not rounded up to
454  // 64-bits. We can skip rounding up of composite types for LE, but not for
455  // BE, otherwise composite types will be indistinguishable from integer
456  // types.
457  return ABIArgInfo::getDirect(
458  llvm::IntegerType::get(getVMContext(), Size));
459  }
460 
461  unsigned Alignment = getContext().getTypeAlign(RetTy);
462  Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
463 
464  // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
465  // For aggregates with 16-byte alignment, we use i128.
466  if (Alignment < 128 && Size == 128) {
467  llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
468  return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
469  }
470  return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
471  }
472 
473  return getNaturalAlignIndirect(RetTy);
474 }
475 
476 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
477 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
478  if (const VectorType *VT = Ty->getAs<VectorType>()) {
479  // Check whether VT is a fixed-length SVE vector. These types are
480  // represented as scalable vectors in function args/return and must be
481  // coerced from fixed vectors.
482  if (VT->getVectorKind() == VectorKind::SveFixedLengthData ||
483  VT->getVectorKind() == VectorKind::SveFixedLengthPredicate)
484  return true;
485 
486  // Check whether VT is legal.
487  unsigned NumElements = VT->getNumElements();
488  uint64_t Size = getContext().getTypeSize(VT);
489  // NumElements should be power of 2.
490  if (!llvm::isPowerOf2_32(NumElements))
491  return true;
492 
493  // arm64_32 has to be compatible with the ARM logic here, which allows huge
494  // vectors for some reason.
495  llvm::Triple Triple = getTarget().getTriple();
496  if (Triple.getArch() == llvm::Triple::aarch64_32 &&
497  Triple.isOSBinFormatMachO())
498  return Size <= 32;
499 
500  return Size != 64 && (Size != 128 || NumElements == 1);
501  }
502  return false;
503 }
504 
506  llvm::Type *EltTy,
507  unsigned NumElts) const {
508  if (!llvm::isPowerOf2_32(NumElts))
509  return false;
510  if (VectorSize.getQuantity() != 8 &&
511  (VectorSize.getQuantity() != 16 || NumElts == 1))
512  return false;
513  return true;
514 }
515 
516 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
517  // For the soft-float ABI variant, no types are considered to be homogeneous
518  // aggregates.
520  return false;
521 
522  // Homogeneous aggregates for AAPCS64 must have base types of a floating
523  // point type or a short-vector type. This is the same as the 32-bit ABI,
524  // but with the difference that any floating-point type is allowed,
525  // including __fp16.
526  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
527  if (BT->isFloatingPoint())
528  return true;
529  } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
530  unsigned VecSize = getContext().getTypeSize(VT);
531  if (VecSize == 64 || VecSize == 128)
532  return true;
533  }
534  return false;
535 }
536 
537 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
538  uint64_t Members) const {
539  return Members <= 4;
540 }
541 
542 bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
543  const {
544  // AAPCS64 says that the rule for whether something is a homogeneous
545  // aggregate is applied to the output of the data layout decision. So
546  // anything that doesn't affect the data layout also does not affect
547  // homogeneity. In particular, zero-length bitfields don't stop a struct
548  // being homogeneous.
549  return true;
550 }
551 
552 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
553  CodeGenFunction &CGF,
554  AArch64ABIKind Kind) const {
555  ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
557  // Empty records are ignored for parameter passing purposes.
558  if (AI.isIgnore()) {
559  uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
560  CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
561  VAListAddr = VAListAddr.withElementType(CGF.Int8PtrTy);
562  auto *Load = CGF.Builder.CreateLoad(VAListAddr);
563  return Address(Load, CGF.ConvertTypeForMem(Ty), SlotSize);
564  }
565 
566  bool IsIndirect = AI.isIndirect();
567 
568  llvm::Type *BaseTy = CGF.ConvertType(Ty);
569  if (IsIndirect)
570  BaseTy = llvm::PointerType::getUnqual(BaseTy);
571  else if (AI.getCoerceToType())
572  BaseTy = AI.getCoerceToType();
573 
574  unsigned NumRegs = 1;
575  if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
576  BaseTy = ArrTy->getElementType();
577  NumRegs = ArrTy->getNumElements();
578  }
579  bool IsFPR = Kind != AArch64ABIKind::AAPCSSoft &&
580  (BaseTy->isFloatingPointTy() || BaseTy->isVectorTy());
581 
582  // The AArch64 va_list type and handling is specified in the Procedure Call
583  // Standard, section B.4:
584  //
585  // struct {
586  // void *__stack;
587  // void *__gr_top;
588  // void *__vr_top;
589  // int __gr_offs;
590  // int __vr_offs;
591  // };
592 
593  llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
594  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
595  llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
596  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
597 
598  CharUnits TySize = getContext().getTypeSizeInChars(Ty);
599  CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
600 
601  Address reg_offs_p = Address::invalid();
602  llvm::Value *reg_offs = nullptr;
603  int reg_top_index;
604  int RegSize = IsIndirect ? 8 : TySize.getQuantity();
605  if (!IsFPR) {
606  // 3 is the field number of __gr_offs
607  reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
608  reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
609  reg_top_index = 1; // field number for __gr_top
610  RegSize = llvm::alignTo(RegSize, 8);
611  } else {
612  // 4 is the field number of __vr_offs.
613  reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
614  reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
615  reg_top_index = 2; // field number for __vr_top
616  RegSize = 16 * NumRegs;
617  }
618 
619  //=======================================
620  // Find out where argument was passed
621  //=======================================
622 
623  // If reg_offs >= 0 we're already using the stack for this type of
624  // argument. We don't want to keep updating reg_offs (in case it overflows,
625  // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
626  // whatever they get).
627  llvm::Value *UsingStack = nullptr;
628  UsingStack = CGF.Builder.CreateICmpSGE(
629  reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
630 
631  CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
632 
633  // Otherwise, at least some kind of argument could go in these registers, the
634  // question is whether this particular type is too big.
635  CGF.EmitBlock(MaybeRegBlock);
636 
637  // Integer arguments may need to correct register alignment (for example a
638  // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
639  // align __gr_offs to calculate the potential address.
640  if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
641  int Align = TyAlign.getQuantity();
642 
643  reg_offs = CGF.Builder.CreateAdd(
644  reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
645  "align_regoffs");
646  reg_offs = CGF.Builder.CreateAnd(
647  reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
648  "aligned_regoffs");
649  }
650 
651  // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
652  // The fact that this is done unconditionally reflects the fact that
653  // allocating an argument to the stack also uses up all the remaining
654  // registers of the appropriate kind.
655  llvm::Value *NewOffset = nullptr;
656  NewOffset = CGF.Builder.CreateAdd(
657  reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
658  CGF.Builder.CreateStore(NewOffset, reg_offs_p);
659 
660  // Now we're in a position to decide whether this argument really was in
661  // registers or not.
662  llvm::Value *InRegs = nullptr;
663  InRegs = CGF.Builder.CreateICmpSLE(
664  NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
665 
666  CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
667 
668  //=======================================
669  // Argument was in registers
670  //=======================================
671 
672  // Now we emit the code for if the argument was originally passed in
673  // registers. First start the appropriate block:
674  CGF.EmitBlock(InRegBlock);
675 
676  llvm::Value *reg_top = nullptr;
677  Address reg_top_p =
678  CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
679  reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
680  Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
681  CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
682  Address RegAddr = Address::invalid();
683  llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
684 
685  if (IsIndirect) {
686  // If it's been passed indirectly (actually a struct), whatever we find from
687  // stored registers or on the stack will actually be a struct **.
688  MemTy = llvm::PointerType::getUnqual(MemTy);
689  }
690 
691  const Type *Base = nullptr;
692  uint64_t NumMembers = 0;
693  bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
694  if (IsHFA && NumMembers > 1) {
695  // Homogeneous aggregates passed in registers will have their elements split
696  // and stored 16-bytes apart regardless of size (they're notionally in qN,
697  // qN+1, ...). We reload and store into a temporary local variable
698  // contiguously.
699  assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
700  auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
701  llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
702  llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
703  Address Tmp = CGF.CreateTempAlloca(HFATy,
704  std::max(TyAlign, BaseTyInfo.Align));
705 
706  // On big-endian platforms, the value will be right-aligned in its slot.
707  int Offset = 0;
708  if (CGF.CGM.getDataLayout().isBigEndian() &&
709  BaseTyInfo.Width.getQuantity() < 16)
710  Offset = 16 - BaseTyInfo.Width.getQuantity();
711 
712  for (unsigned i = 0; i < NumMembers; ++i) {
713  CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
714  Address LoadAddr =
715  CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
716  LoadAddr = LoadAddr.withElementType(BaseTy);
717 
718  Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
719 
720  llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
721  CGF.Builder.CreateStore(Elem, StoreAddr);
722  }
723 
724  RegAddr = Tmp.withElementType(MemTy);
725  } else {
726  // Otherwise the object is contiguous in memory.
727 
728  // It might be right-aligned in its slot.
729  CharUnits SlotSize = BaseAddr.getAlignment();
730  if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
731  (IsHFA || !isAggregateTypeForABI(Ty)) &&
732  TySize < SlotSize) {
733  CharUnits Offset = SlotSize - TySize;
734  BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
735  }
736 
737  RegAddr = BaseAddr.withElementType(MemTy);
738  }
739 
740  CGF.EmitBranch(ContBlock);
741 
742  //=======================================
743  // Argument was on the stack
744  //=======================================
745  CGF.EmitBlock(OnStackBlock);
746 
747  Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
748  llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
749 
750  // Again, stack arguments may need realignment. In this case both integer and
751  // floating-point ones might be affected.
752  if (!IsIndirect && TyAlign.getQuantity() > 8) {
753  OnStackPtr = emitRoundPointerUpToAlignment(CGF, OnStackPtr, TyAlign);
754  }
755  Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
756  std::max(CharUnits::fromQuantity(8), TyAlign));
757 
758  // All stack slots are multiples of 8 bytes.
759  CharUnits StackSlotSize = CharUnits::fromQuantity(8);
760  CharUnits StackSize;
761  if (IsIndirect)
762  StackSize = StackSlotSize;
763  else
764  StackSize = TySize.alignTo(StackSlotSize);
765 
766  llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
767  llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
768  CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
769 
770  // Write the new value of __stack for the next call to va_arg
771  CGF.Builder.CreateStore(NewStack, stack_p);
772 
773  if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
774  TySize < StackSlotSize) {
775  CharUnits Offset = StackSlotSize - TySize;
776  OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
777  }
778 
779  OnStackAddr = OnStackAddr.withElementType(MemTy);
780 
781  CGF.EmitBranch(ContBlock);
782 
783  //=======================================
784  // Tidy up
785  //=======================================
786  CGF.EmitBlock(ContBlock);
787 
788  Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
789  OnStackBlock, "vaargs.addr");
790 
791  if (IsIndirect)
792  return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
793  TyAlign);
794 
795  return ResAddr;
796 }
797 
798 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
799  CodeGenFunction &CGF) const {
800  // The backend's lowering doesn't support va_arg for aggregates or
801  // illegal vector types. Lower VAArg here for these cases and use
802  // the LLVM va_arg instruction for everything else.
803  if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
804  return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
805 
806  uint64_t PointerSize = getTarget().getPointerWidth(LangAS::Default) / 8;
807  CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
808 
809  // Empty records are ignored for parameter passing purposes.
810  if (isEmptyRecord(getContext(), Ty, true))
811  return Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
812  CGF.ConvertTypeForMem(Ty), SlotSize);
813 
814  // The size of the actual thing passed, which might end up just
815  // being a pointer for indirect types.
816  auto TyInfo = getContext().getTypeInfoInChars(Ty);
817 
818  // Arguments bigger than 16 bytes which aren't homogeneous
819  // aggregates should be passed indirectly.
820  bool IsIndirect = false;
821  if (TyInfo.Width.getQuantity() > 16) {
822  const Type *Base = nullptr;
823  uint64_t Members = 0;
824  IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
825  }
826 
827  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
828  TyInfo, SlotSize, /*AllowHigherAlign*/ true);
829 }
830 
831 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
832  QualType Ty) const {
833  bool IsIndirect = false;
834 
835  // Composites larger than 16 bytes are passed by reference.
836  if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
837  IsIndirect = true;
838 
839  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
840  CGF.getContext().getTypeInfoInChars(Ty),
842  /*allowHigherAlign*/ false);
843 }
844 
845 static bool isStreamingCompatible(const FunctionDecl *F) {
846  if (const auto *T = F->getType()->getAs<FunctionProtoType>())
847  return T->getAArch64SMEAttributes() &
849  return false;
850 }
851 
852 // Report an error if an argument or return value of type Ty would need to be
853 // passed in a floating-point register.
855  const StringRef ABIName,
856  const AArch64ABIInfo &ABIInfo,
857  const QualType &Ty, const NamedDecl *D) {
858  const Type *HABase = nullptr;
859  uint64_t HAMembers = 0;
860  if (Ty->isFloatingType() || Ty->isVectorType() ||
861  ABIInfo.isHomogeneousAggregate(Ty, HABase, HAMembers)) {
862  Diags.Report(D->getLocation(), diag::err_target_unsupported_type_for_abi)
863  << D->getDeclName() << Ty << ABIName;
864  }
865 }
866 
867 // If we are using a hard-float ABI, but do not have floating point registers,
868 // then report an error for any function arguments or returns which would be
869 // passed in floating-pint registers.
870 void AArch64TargetCodeGenInfo::checkFunctionABI(
871  CodeGenModule &CGM, const FunctionDecl *FuncDecl) const {
872  const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
873  const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
874 
875  if (!TI.hasFeature("fp") && !ABIInfo.isSoftFloat()) {
877  FuncDecl->getReturnType(), FuncDecl);
878  for (ParmVarDecl *PVD : FuncDecl->parameters()) {
879  diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, PVD->getType(),
880  PVD);
881  }
882  }
883 }
884 
885 void AArch64TargetCodeGenInfo::checkFunctionCallABIStreaming(
886  CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
887  const FunctionDecl *Callee) const {
888  if (!Caller || !Callee || !Callee->hasAttr<AlwaysInlineAttr>())
889  return;
890 
891  bool CallerIsStreaming =
892  IsArmStreamingFunction(Caller, /*IncludeLocallyStreaming=*/true);
893  bool CalleeIsStreaming =
894  IsArmStreamingFunction(Callee, /*IncludeLocallyStreaming=*/true);
895  bool CallerIsStreamingCompatible = isStreamingCompatible(Caller);
896  bool CalleeIsStreamingCompatible = isStreamingCompatible(Callee);
897 
898  if (!CalleeIsStreamingCompatible &&
899  (CallerIsStreaming != CalleeIsStreaming || CallerIsStreamingCompatible))
900  CGM.getDiags().Report(CallLoc,
901  diag::err_function_always_inline_attribute_mismatch)
902  << Caller->getDeclName() << Callee->getDeclName() << "streaming";
903  if (auto *NewAttr = Callee->getAttr<ArmNewAttr>())
904  if (NewAttr->isNewZA())
905  CGM.getDiags().Report(CallLoc, diag::err_function_always_inline_new_za)
906  << Callee->getDeclName();
907 }
908 
909 // If the target does not have floating-point registers, but we are using a
910 // hard-float ABI, there is no way to pass floating-point, vector or HFA values
911 // to functions, so we report an error.
912 void AArch64TargetCodeGenInfo::checkFunctionCallABISoftFloat(
913  CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
914  const FunctionDecl *Callee, const CallArgList &Args,
915  QualType ReturnType) const {
916  const AArch64ABIInfo &ABIInfo = getABIInfo<AArch64ABIInfo>();
917  const TargetInfo &TI = ABIInfo.getContext().getTargetInfo();
918 
919  if (!Caller || TI.hasFeature("fp") || ABIInfo.isSoftFloat())
920  return;
921 
922  diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, ReturnType,
923  Caller);
924 
925  for (const CallArg &Arg : Args)
926  diagnoseIfNeedsFPReg(CGM.getDiags(), TI.getABI(), ABIInfo, Arg.getType(),
927  Caller);
928 }
929 
930 void AArch64TargetCodeGenInfo::checkFunctionCallABI(CodeGenModule &CGM,
931  SourceLocation CallLoc,
932  const FunctionDecl *Caller,
933  const FunctionDecl *Callee,
934  const CallArgList &Args,
935  QualType ReturnType) const {
936  checkFunctionCallABIStreaming(CGM, CallLoc, Caller, Callee);
937  checkFunctionCallABISoftFloat(CGM, CallLoc, Caller, Callee, Args, ReturnType);
938 }
939 
940 void AArch64ABIInfo::appendAttributeMangling(TargetClonesAttr *Attr,
941  unsigned Index,
942  raw_ostream &Out) const {
943  appendAttributeMangling(Attr->getFeatureStr(Index), Out);
944 }
945 
946 void AArch64ABIInfo::appendAttributeMangling(StringRef AttrStr,
947  raw_ostream &Out) const {
948  if (AttrStr == "default") {
949  Out << ".default";
950  return;
951  }
952 
953  Out << "._";
954  SmallVector<StringRef, 8> Features;
955  AttrStr.split(Features, "+");
956  for (auto &Feat : Features)
957  Feat = Feat.trim();
958 
959  llvm::sort(Features, [](const StringRef LHS, const StringRef RHS) {
960  return LHS.compare(RHS) < 0;
961  });
962 
963  llvm::SmallDenseSet<StringRef, 8> UniqueFeats;
964  for (auto &Feat : Features)
965  if (auto Ext = llvm::AArch64::parseArchExtension(Feat))
966  if (UniqueFeats.insert(Ext->Name).second)
967  Out << 'M' << Ext->Name;
968 }
969 
970 std::unique_ptr<TargetCodeGenInfo>
973  return std::make_unique<AArch64TargetCodeGenInfo>(CGM.getTypes(), Kind);
974 }
975 
976 std::unique_ptr<TargetCodeGenInfo>
978  AArch64ABIKind K) {
979  return std::make_unique<WindowsAArch64TargetCodeGenInfo>(CGM.getTypes(), K);
980 }
static bool isStreamingCompatible(const FunctionDecl *F)
Definition: AArch64.cpp:845
static void diagnoseIfNeedsFPReg(DiagnosticsEngine &Diags, const StringRef ABIName, const AArch64ABIInfo &ABIInfo, const QualType &Ty, const NamedDecl *D)
Definition: AArch64.cpp:854
unsigned Offset
Definition: Format.cpp:2978
__DEVICE__ int max(int __a, int __b)
TypeInfoChars getTypeInfoInChars(const Type *T) const
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:760
Attr - This represents one attribute.
Definition: Attr.h:46
A fixed int type of a specified bitwidth.
Definition: Type.h:7254
This class is used for builtin types like 'int'.
Definition: Type.h:2989
Kind getKind() const
Definition: Type.h:3035
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getExtend(QualType Ty, llvm::Type *T=nullptr)
llvm::Type * getCoerceToType() const
ABIInfo - Target specific hooks for defining how a type should be passed or returned from functions.
Definition: ABIInfo.h:45
bool isHomogeneousAggregate(QualType Ty, const Type *&Base, uint64_t &Members) const
isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous aggregate.
Definition: ABIInfo.cpp:61
ASTContext & getContext() const
Definition: ABIInfo.cpp:20
virtual void appendAttributeMangling(TargetAttr *Attr, raw_ostream &Out) const
Definition: ABIInfo.cpp:187
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::ConstantInt * getSize(CharUnits N)
Definition: CGBuilder.h:99
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
RecordArgABI
Specify how one should pass an argument of a record type.
Definition: CGCXXABI.h:150
@ RAA_DirectInMemory
Pass it on the stack using its defined layout.
Definition: CGCXXABI.h:158
CGFunctionInfo - Class to encapsulate the information about a function definition.
unsigned getCallingConvention() const
getCallingConvention - Return the user specified calling convention, which has been translated into a...
CanQualType getReturnType() const
MutableArrayRef< ArgInfo > arguments()
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:257
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:116
const TargetInfo & getTarget() const
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition: CGStmt.cpp:598
const CGFunctionInfo * CurFnInfo
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:578
This class organizes the cross-function state that is used while generating LLVM code.
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
DiagnosticsEngine & getDiags() const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
Target specific hooks for defining how a type should be passed or returned from functions with one of...
Definition: ABIInfo.h:128
TargetCodeGenInfo - This class organizes various target-specific codegeneration issues,...
Definition: TargetInfo.h:46
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:179
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
SourceLocation getLocation() const
Definition: DeclBase.h:445
T * getAttr() const
Definition: DeclBase.h:579
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:193
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1553
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:5587
Represents a function declaration or definition.
Definition: Decl.h:1972
QualType getReturnType() const
Definition: Decl.h:2757
ArrayRef< ParmVarDecl * > parameters() const
Definition: Decl.h:2686
Represents a prototype with parameter type info, e.g.
Definition: Type.h:4668
unsigned getAArch64SMEAttributes() const
Return a bitmask describing the SME attributes on the function type, see AArch64SMETypeAttributes for...
Definition: Type.h:5106
@ SME_PStateSMCompatibleMask
Definition: Type.h:4530
@ AKey
Return address signing uses APIA key.
This represents a decl that may have a name.
Definition: Decl.h:249
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:315
Represents a parameter to a function.
Definition: Decl.h:1762
A (possibly-)qualified type.
Definition: Type.h:940
Encodes a location in the source.
Exposes information about the current target.
Definition: TargetInfo.h:218
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1324
virtual ParsedTargetAttr parseTargetAttr(StringRef Str) const
Definition: TargetInfo.cpp:582
virtual bool hasFeature(StringRef Feature) const
Determine whether the given target has the given feature.
Definition: TargetInfo.h:1472
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const
Determine if this TargetInfo supports the given branch protection specification.
Definition: TargetInfo.h:1448
The base class of the type hierarchy.
Definition: Type.h:1813
bool isVoidType() const
Definition: Type.h:7939
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8227
bool isVectorType() const
Definition: Type.h:7730
bool isFloatingType() const
Definition: Type.cpp:2248
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8160
QualType getType() const
Definition: Decl.h:718
Represents a GCC generic vector type.
Definition: Type.h:3981
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT, CGCXXABI &CXXABI)
bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, const ABIInfo &Info)
Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty, const ABIArgInfo &AI)
Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, QualType ValueTy, bool IsIndirect, TypeInfoChars ValueInfo, CharUnits SlotSizeAndAlign, bool AllowHigherAlign, bool ForceRightAdjust=false)
Emit va_arg for a platform using the common void* representation, where arguments are simply emitted ...
Address emitMergePHI(CodeGenFunction &CGF, Address Addr1, llvm::BasicBlock *Block1, Address Addr2, llvm::BasicBlock *Block2, const llvm::Twine &Name="")
ABIArgInfo coerceToIntArray(QualType Ty, ASTContext &Context, llvm::LLVMContext &LLVMContext)
Definition: ABIInfoImpl.cpp:79
llvm::Value * emitRoundPointerUpToAlignment(CodeGenFunction &CGF, llvm::Value *Ptr, CharUnits Align)
bool isAggregateTypeForABI(QualType T)
std::unique_ptr< TargetCodeGenInfo > createAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind Kind)
Definition: AArch64.cpp:971
QualType useFirstFieldIfTransparentUnion(QualType Ty)
Pass transparent unions as if they were the type of the first element.
std::unique_ptr< TargetCodeGenInfo > createWindowsAArch64TargetCodeGenInfo(CodeGenModule &CGM, AArch64ABIKind K)
Definition: AArch64.cpp:977
bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, bool AsIfNoUniqueAddr=false)
isEmptyRecord - Return true iff a structure contains only empty fields.
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1396
The JSON file list parser is used to communicate input to InstallAPI.
@ CPlusPlus
Definition: LangStandard.h:55
const FunctionProtoType * T
@ SveFixedLengthData
is AArch64 SVE fixed-length data vector
@ SveFixedLengthPredicate
is AArch64 SVE fixed-length predicate vector
bool IsArmStreamingFunction(const FunctionDecl *FD, bool IncludeLocallyStreaming)
Returns whether the given FunctionDecl has an __arm[_locally]_streaming attribute.
Definition: Decl.cpp:5764
unsigned long uint64_t
Definition: Format.h:5433
#define true
Definition: stdbool.h:25
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:57
LangOptions::SignReturnAddressScopeKind SignReturnAddr
Definition: TargetInfo.h:1409
LangOptions::SignReturnAddressKeyKind SignKey
Definition: TargetInfo.h:1410
const char * getSignReturnAddrStr() const
Definition: TargetInfo.h:1417