clang  19.0.0git
CGAtomic.cpp
Go to the documentation of this file.
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30  class AtomicInfo {
31  CodeGenFunction &CGF;
32  QualType AtomicTy;
33  QualType ValueTy;
34  uint64_t AtomicSizeInBits;
35  uint64_t ValueSizeInBits;
36  CharUnits AtomicAlign;
37  CharUnits ValueAlign;
38  TypeEvaluationKind EvaluationKind;
39  bool UseLibcall;
40  LValue LVal;
41  CGBitFieldInfo BFI;
42  public:
43  AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44  : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45  EvaluationKind(TEK_Scalar), UseLibcall(true) {
46  assert(!lvalue.isGlobalReg());
47  ASTContext &C = CGF.getContext();
48  if (lvalue.isSimple()) {
49  AtomicTy = lvalue.getType();
50  if (auto *ATy = AtomicTy->getAs<AtomicType>())
51  ValueTy = ATy->getValueType();
52  else
53  ValueTy = AtomicTy;
54  EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56  uint64_t ValueAlignInBits;
57  uint64_t AtomicAlignInBits;
58  TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59  ValueSizeInBits = ValueTI.Width;
60  ValueAlignInBits = ValueTI.Align;
61 
62  TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63  AtomicSizeInBits = AtomicTI.Width;
64  AtomicAlignInBits = AtomicTI.Align;
65 
66  assert(ValueSizeInBits <= AtomicSizeInBits);
67  assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69  AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70  ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71  if (lvalue.getAlignment().isZero())
72  lvalue.setAlignment(AtomicAlign);
73 
74  LVal = lvalue;
75  } else if (lvalue.isBitField()) {
76  ValueTy = lvalue.getType();
77  ValueSizeInBits = C.getTypeSize(ValueTy);
78  auto &OrigBFI = lvalue.getBitFieldInfo();
79  auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80  AtomicSizeInBits = C.toBits(
81  C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82  .alignTo(lvalue.getAlignment()));
83  llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
84  auto OffsetInChars =
85  (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86  lvalue.getAlignment();
87  llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
88  CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
89  StoragePtr = CGF.Builder.CreateAddrSpaceCast(
90  StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
91  BFI = OrigBFI;
92  BFI.Offset = Offset;
93  BFI.StorageSize = AtomicSizeInBits;
94  BFI.StorageOffset += OffsetInChars;
95  llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
96  LVal = LValue::MakeBitfield(
97  Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
98  lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
99 
100  AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101  if (AtomicTy.isNull()) {
103  /*numBits=*/32,
104  C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105  AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,
107  /*IndexTypeQuals=*/0);
108  }
109  AtomicAlign = ValueAlign = lvalue.getAlignment();
110  } else if (lvalue.isVectorElt()) {
111  ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112  ValueSizeInBits = C.getTypeSize(ValueTy);
113  AtomicTy = lvalue.getType();
114  AtomicSizeInBits = C.getTypeSize(AtomicTy);
115  AtomicAlign = ValueAlign = lvalue.getAlignment();
116  LVal = lvalue;
117  } else {
118  assert(lvalue.isExtVectorElt());
119  ValueTy = lvalue.getType();
120  ValueSizeInBits = C.getTypeSize(ValueTy);
121  AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122  lvalue.getType(), cast<llvm::FixedVectorType>(
124  ->getNumElements());
125  AtomicSizeInBits = C.getTypeSize(AtomicTy);
126  AtomicAlign = ValueAlign = lvalue.getAlignment();
127  LVal = lvalue;
128  }
129  UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130  AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131  }
132 
133  QualType getAtomicType() const { return AtomicTy; }
134  QualType getValueType() const { return ValueTy; }
135  CharUnits getAtomicAlignment() const { return AtomicAlign; }
136  uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137  uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138  TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139  bool shouldUseLibcall() const { return UseLibcall; }
140  const LValue &getAtomicLValue() const { return LVal; }
141  llvm::Value *getAtomicPointer() const {
142  if (LVal.isSimple())
143  return LVal.emitRawPointer(CGF);
144  else if (LVal.isBitField())
145  return LVal.getRawBitFieldPointer(CGF);
146  else if (LVal.isVectorElt())
147  return LVal.getRawVectorPointer(CGF);
148  assert(LVal.isExtVectorElt());
149  return LVal.getRawExtVectorPointer(CGF);
150  }
151  Address getAtomicAddress() const {
152  llvm::Type *ElTy;
153  if (LVal.isSimple())
154  ElTy = LVal.getAddress().getElementType();
155  else if (LVal.isBitField())
156  ElTy = LVal.getBitFieldAddress().getElementType();
157  else if (LVal.isVectorElt())
158  ElTy = LVal.getVectorAddress().getElementType();
159  else
160  ElTy = LVal.getExtVectorAddress().getElementType();
161  return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
162  }
163 
164  Address getAtomicAddressAsAtomicIntPointer() const {
165  return castToAtomicIntPointer(getAtomicAddress());
166  }
167 
168  /// Is the atomic size larger than the underlying value type?
169  ///
170  /// Note that the absence of padding does not mean that atomic
171  /// objects are completely interchangeable with non-atomic
172  /// objects: we might have promoted the alignment of a type
173  /// without making it bigger.
174  bool hasPadding() const {
175  return (ValueSizeInBits != AtomicSizeInBits);
176  }
177 
178  bool emitMemSetZeroIfNecessary() const;
179 
180  llvm::Value *getAtomicSizeValue() const {
181  CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
182  return CGF.CGM.getSize(size);
183  }
184 
185  /// Cast the given pointer to an integer pointer suitable for atomic
186  /// operations if the source.
187  Address castToAtomicIntPointer(Address Addr) const;
188 
189  /// If Addr is compatible with the iN that will be used for an atomic
190  /// operation, bitcast it. Otherwise, create a temporary that is suitable
191  /// and copy the value across.
192  Address convertToAtomicIntPointer(Address Addr) const;
193 
194  /// Turn an atomic-layout object into an r-value.
195  RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
196  SourceLocation loc, bool AsValue) const;
197 
198  llvm::Value *getScalarRValValueOrNull(RValue RVal) const;
199 
200  /// Converts an rvalue to integer value if needed.
201  llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;
202 
203  RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
204  SourceLocation Loc, bool AsValue,
205  bool CmpXchg = false) const;
206 
207  /// Copy an atomic r-value into atomic-layout memory.
208  void emitCopyIntoMemory(RValue rvalue) const;
209 
210  /// Project an l-value down to the value field.
211  LValue projectValue() const {
212  assert(LVal.isSimple());
213  Address addr = getAtomicAddress();
214  if (hasPadding())
215  addr = CGF.Builder.CreateStructGEP(addr, 0);
216 
217  return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
218  LVal.getBaseInfo(), LVal.getTBAAInfo());
219  }
220 
221  /// Emits atomic load.
222  /// \returns Loaded value.
223  RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
224  bool AsValue, llvm::AtomicOrdering AO,
225  bool IsVolatile);
226 
227  /// Emits atomic compare-and-exchange sequence.
228  /// \param Expected Expected value.
229  /// \param Desired Desired value.
230  /// \param Success Atomic ordering for success operation.
231  /// \param Failure Atomic ordering for failed operation.
232  /// \param IsWeak true if atomic operation is weak, false otherwise.
233  /// \returns Pair of values: previous value from storage (value type) and
234  /// boolean flag (i1 type) with true if success and false otherwise.
235  std::pair<RValue, llvm::Value *>
236  EmitAtomicCompareExchange(RValue Expected, RValue Desired,
237  llvm::AtomicOrdering Success =
238  llvm::AtomicOrdering::SequentiallyConsistent,
239  llvm::AtomicOrdering Failure =
240  llvm::AtomicOrdering::SequentiallyConsistent,
241  bool IsWeak = false);
242 
243  /// Emits atomic update.
244  /// \param AO Atomic ordering.
245  /// \param UpdateOp Update operation for the current lvalue.
246  void EmitAtomicUpdate(llvm::AtomicOrdering AO,
247  const llvm::function_ref<RValue(RValue)> &UpdateOp,
248  bool IsVolatile);
249  /// Emits atomic update.
250  /// \param AO Atomic ordering.
251  void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
252  bool IsVolatile);
253 
254  /// Materialize an atomic r-value in atomic-layout memory.
255  Address materializeRValue(RValue rvalue) const;
256 
257  /// Creates temp alloca for intermediate operations on atomic value.
258  Address CreateTempAlloca() const;
259  private:
260  bool requiresMemSetZero(llvm::Type *type) const;
261 
262 
263  /// Emits atomic load as a libcall.
264  void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
265  llvm::AtomicOrdering AO, bool IsVolatile);
266  /// Emits atomic load as LLVM instruction.
267  llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,
268  bool CmpXchg = false);
269  /// Emits atomic compare-and-exchange op as a libcall.
270  llvm::Value *EmitAtomicCompareExchangeLibcall(
271  llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
272  llvm::AtomicOrdering Success =
273  llvm::AtomicOrdering::SequentiallyConsistent,
274  llvm::AtomicOrdering Failure =
275  llvm::AtomicOrdering::SequentiallyConsistent);
276  /// Emits atomic compare-and-exchange op as LLVM instruction.
277  std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
278  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
279  llvm::AtomicOrdering Success =
280  llvm::AtomicOrdering::SequentiallyConsistent,
281  llvm::AtomicOrdering Failure =
282  llvm::AtomicOrdering::SequentiallyConsistent,
283  bool IsWeak = false);
284  /// Emit atomic update as libcalls.
285  void
286  EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
287  const llvm::function_ref<RValue(RValue)> &UpdateOp,
288  bool IsVolatile);
289  /// Emit atomic update as LLVM instructions.
290  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
291  const llvm::function_ref<RValue(RValue)> &UpdateOp,
292  bool IsVolatile);
293  /// Emit atomic update as libcalls.
294  void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
295  bool IsVolatile);
296  /// Emit atomic update as LLVM instructions.
297  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
298  bool IsVolatile);
299  };
300 }
301 
302 Address AtomicInfo::CreateTempAlloca() const {
303  Address TempAlloca = CGF.CreateMemTemp(
304  (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
305  : AtomicTy,
306  getAtomicAlignment(),
307  "atomic-temp");
308  // Cast to pointer to value type for bitfields.
309  if (LVal.isBitField())
311  TempAlloca, getAtomicAddress().getType(),
312  getAtomicAddress().getElementType());
313  return TempAlloca;
314 }
315 
317  StringRef fnName,
318  QualType resultType,
319  CallArgList &args) {
320  const CGFunctionInfo &fnInfo =
321  CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
322  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
323  llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
324  fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
325  fnAttrB.addAttribute(llvm::Attribute::WillReturn);
326  llvm::AttributeList fnAttrs = llvm::AttributeList::get(
327  CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
328 
329  llvm::FunctionCallee fn =
330  CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
331  auto callee = CGCallee::forDirect(fn);
332  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
333 }
334 
335 /// Does a store of the given IR type modify the full expected width?
336 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
337  uint64_t expectedSize) {
338  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
339 }
340 
341 /// Does the atomic type require memsetting to zero before initialization?
342 ///
343 /// The IR type is provided as a way of making certain queries faster.
344 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
345  // If the atomic type has size padding, we definitely need a memset.
346  if (hasPadding()) return true;
347 
348  // Otherwise, do some simple heuristics to try to avoid it:
349  switch (getEvaluationKind()) {
350  // For scalars and complexes, check whether the store size of the
351  // type uses the full size.
352  case TEK_Scalar:
353  return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
354  case TEK_Complex:
355  return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
356  AtomicSizeInBits / 2);
357 
358  // Padding in structs has an undefined bit pattern. User beware.
359  case TEK_Aggregate:
360  return false;
361  }
362  llvm_unreachable("bad evaluation kind");
363 }
364 
365 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
366  assert(LVal.isSimple());
367  Address addr = LVal.getAddress();
368  if (!requiresMemSetZero(addr.getElementType()))
369  return false;
370 
371  CGF.Builder.CreateMemSet(
372  addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
373  CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
374  LVal.getAlignment().getAsAlign());
375  return true;
376 }
377 
378 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
379  Address Dest, Address Ptr,
380  Address Val1, Address Val2,
381  uint64_t Size,
382  llvm::AtomicOrdering SuccessOrder,
383  llvm::AtomicOrdering FailureOrder,
385  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
386  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
387  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
388 
389  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
390  Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);
391  Pair->setVolatile(E->isVolatile());
392  Pair->setWeak(IsWeak);
393 
394  // Cmp holds the result of the compare-exchange operation: true on success,
395  // false on failure.
396  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
397  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
398 
399  // This basic block is used to hold the store instruction if the operation
400  // failed.
401  llvm::BasicBlock *StoreExpectedBB =
402  CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
403 
404  // This basic block is the exit point of the operation, we should end up
405  // here regardless of whether or not the operation succeeded.
406  llvm::BasicBlock *ContinueBB =
407  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
408 
409  // Update Expected if Expected isn't equal to Old, otherwise branch to the
410  // exit point.
411  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
412 
413  CGF.Builder.SetInsertPoint(StoreExpectedBB);
414  // Update the memory at Expected with Old's value.
415  CGF.Builder.CreateStore(Old, Val1);
416  // Finally, branch to the exit point.
417  CGF.Builder.CreateBr(ContinueBB);
418 
419  CGF.Builder.SetInsertPoint(ContinueBB);
420  // Update the memory at Dest with Cmp's value.
421  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
422 }
423 
424 /// Given an ordering required on success, emit all possible cmpxchg
425 /// instructions to cope with the provided (but possibly only dynamically known)
426 /// FailureOrder.
428  bool IsWeak, Address Dest, Address Ptr,
429  Address Val1, Address Val2,
430  llvm::Value *FailureOrderVal,
431  uint64_t Size,
432  llvm::AtomicOrdering SuccessOrder,
434  llvm::AtomicOrdering FailureOrder;
435  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
436  auto FOS = FO->getSExtValue();
437  if (!llvm::isValidAtomicOrderingCABI(FOS))
438  FailureOrder = llvm::AtomicOrdering::Monotonic;
439  else
440  switch ((llvm::AtomicOrderingCABI)FOS) {
441  case llvm::AtomicOrderingCABI::relaxed:
442  // 31.7.2.18: "The failure argument shall not be memory_order_release
443  // nor memory_order_acq_rel". Fallback to monotonic.
444  case llvm::AtomicOrderingCABI::release:
445  case llvm::AtomicOrderingCABI::acq_rel:
446  FailureOrder = llvm::AtomicOrdering::Monotonic;
447  break;
448  case llvm::AtomicOrderingCABI::consume:
449  case llvm::AtomicOrderingCABI::acquire:
450  FailureOrder = llvm::AtomicOrdering::Acquire;
451  break;
452  case llvm::AtomicOrderingCABI::seq_cst:
453  FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
454  break;
455  }
456  // Prior to c++17, "the failure argument shall be no stronger than the
457  // success argument". This condition has been lifted and the only
458  // precondition is 31.7.2.18. Effectively treat this as a DR and skip
459  // language version checks.
460  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
461  FailureOrder, Scope);
462  return;
463  }
464 
465  // Create all the relevant BB's
466  auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
467  auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
468  auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
469  auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
470 
471  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
472  // doesn't matter unless someone is crazy enough to use something that
473  // doesn't fold to a constant for the ordering.
474  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
475  // Implemented as acquire, since it's the closest in LLVM.
476  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
477  AcquireBB);
478  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
479  AcquireBB);
480  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
481  SeqCstBB);
482 
483  // Emit all the different atomics
484  CGF.Builder.SetInsertPoint(MonotonicBB);
485  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
486  Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
487  CGF.Builder.CreateBr(ContBB);
488 
489  CGF.Builder.SetInsertPoint(AcquireBB);
490  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
491  llvm::AtomicOrdering::Acquire, Scope);
492  CGF.Builder.CreateBr(ContBB);
493 
494  CGF.Builder.SetInsertPoint(SeqCstBB);
495  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
496  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
497  CGF.Builder.CreateBr(ContBB);
498 
499  CGF.Builder.SetInsertPoint(ContBB);
500 }
501 
502 /// Duplicate the atomic min/max operation in conventional IR for the builtin
503 /// variants that return the new rather than the original value.
504 static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
506  bool IsSigned,
507  llvm::Value *OldVal,
508  llvm::Value *RHS) {
509  llvm::CmpInst::Predicate Pred;
510  switch (Op) {
511  default:
512  llvm_unreachable("Unexpected min/max operation");
513  case AtomicExpr::AO__atomic_max_fetch:
514  case AtomicExpr::AO__scoped_atomic_max_fetch:
515  Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
516  break;
517  case AtomicExpr::AO__atomic_min_fetch:
518  case AtomicExpr::AO__scoped_atomic_min_fetch:
519  Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
520  break;
521  }
522  llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
523  return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
524 }
525 
526 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
527  Address Ptr, Address Val1, Address Val2,
528  llvm::Value *IsWeak, llvm::Value *FailureOrder,
529  uint64_t Size, llvm::AtomicOrdering Order,
531  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
532  bool PostOpMinMax = false;
533  unsigned PostOp = 0;
534 
535  switch (E->getOp()) {
536  case AtomicExpr::AO__c11_atomic_init:
537  case AtomicExpr::AO__opencl_atomic_init:
538  llvm_unreachable("Already handled!");
539 
540  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
541  case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
542  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
543  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
544  FailureOrder, Size, Order, Scope);
545  return;
546  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
547  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
548  case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
549  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
550  FailureOrder, Size, Order, Scope);
551  return;
552  case AtomicExpr::AO__atomic_compare_exchange:
553  case AtomicExpr::AO__atomic_compare_exchange_n:
554  case AtomicExpr::AO__scoped_atomic_compare_exchange:
555  case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
556  if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
557  emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
558  Val1, Val2, FailureOrder, Size, Order, Scope);
559  } else {
560  // Create all the relevant BB's
561  llvm::BasicBlock *StrongBB =
562  CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
563  llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
564  llvm::BasicBlock *ContBB =
565  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
566 
567  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
568  SI->addCase(CGF.Builder.getInt1(false), StrongBB);
569 
570  CGF.Builder.SetInsertPoint(StrongBB);
571  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
572  FailureOrder, Size, Order, Scope);
573  CGF.Builder.CreateBr(ContBB);
574 
575  CGF.Builder.SetInsertPoint(WeakBB);
576  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
577  FailureOrder, Size, Order, Scope);
578  CGF.Builder.CreateBr(ContBB);
579 
580  CGF.Builder.SetInsertPoint(ContBB);
581  }
582  return;
583  }
584  case AtomicExpr::AO__c11_atomic_load:
585  case AtomicExpr::AO__opencl_atomic_load:
586  case AtomicExpr::AO__hip_atomic_load:
587  case AtomicExpr::AO__atomic_load_n:
588  case AtomicExpr::AO__atomic_load:
589  case AtomicExpr::AO__scoped_atomic_load_n:
590  case AtomicExpr::AO__scoped_atomic_load: {
591  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
592  Load->setAtomic(Order, Scope);
593  Load->setVolatile(E->isVolatile());
594  CGF.Builder.CreateStore(Load, Dest);
595  return;
596  }
597 
598  case AtomicExpr::AO__c11_atomic_store:
599  case AtomicExpr::AO__opencl_atomic_store:
600  case AtomicExpr::AO__hip_atomic_store:
601  case AtomicExpr::AO__atomic_store:
602  case AtomicExpr::AO__atomic_store_n:
603  case AtomicExpr::AO__scoped_atomic_store:
604  case AtomicExpr::AO__scoped_atomic_store_n: {
605  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
606  llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
607  Store->setAtomic(Order, Scope);
608  Store->setVolatile(E->isVolatile());
609  return;
610  }
611 
612  case AtomicExpr::AO__c11_atomic_exchange:
613  case AtomicExpr::AO__hip_atomic_exchange:
614  case AtomicExpr::AO__opencl_atomic_exchange:
615  case AtomicExpr::AO__atomic_exchange_n:
616  case AtomicExpr::AO__atomic_exchange:
617  case AtomicExpr::AO__scoped_atomic_exchange_n:
618  case AtomicExpr::AO__scoped_atomic_exchange:
619  Op = llvm::AtomicRMWInst::Xchg;
620  break;
621 
622  case AtomicExpr::AO__atomic_add_fetch:
623  case AtomicExpr::AO__scoped_atomic_add_fetch:
624  PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
626  [[fallthrough]];
627  case AtomicExpr::AO__c11_atomic_fetch_add:
628  case AtomicExpr::AO__hip_atomic_fetch_add:
629  case AtomicExpr::AO__opencl_atomic_fetch_add:
630  case AtomicExpr::AO__atomic_fetch_add:
631  case AtomicExpr::AO__scoped_atomic_fetch_add:
632  Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
634  break;
635 
636  case AtomicExpr::AO__atomic_sub_fetch:
637  case AtomicExpr::AO__scoped_atomic_sub_fetch:
638  PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
640  [[fallthrough]];
641  case AtomicExpr::AO__c11_atomic_fetch_sub:
642  case AtomicExpr::AO__hip_atomic_fetch_sub:
643  case AtomicExpr::AO__opencl_atomic_fetch_sub:
644  case AtomicExpr::AO__atomic_fetch_sub:
645  case AtomicExpr::AO__scoped_atomic_fetch_sub:
646  Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
648  break;
649 
650  case AtomicExpr::AO__atomic_min_fetch:
651  case AtomicExpr::AO__scoped_atomic_min_fetch:
652  PostOpMinMax = true;
653  [[fallthrough]];
654  case AtomicExpr::AO__c11_atomic_fetch_min:
655  case AtomicExpr::AO__hip_atomic_fetch_min:
656  case AtomicExpr::AO__opencl_atomic_fetch_min:
657  case AtomicExpr::AO__atomic_fetch_min:
658  case AtomicExpr::AO__scoped_atomic_fetch_min:
659  Op = E->getValueType()->isFloatingType()
660  ? llvm::AtomicRMWInst::FMin
662  ? llvm::AtomicRMWInst::Min
663  : llvm::AtomicRMWInst::UMin);
664  break;
665 
666  case AtomicExpr::AO__atomic_max_fetch:
667  case AtomicExpr::AO__scoped_atomic_max_fetch:
668  PostOpMinMax = true;
669  [[fallthrough]];
670  case AtomicExpr::AO__c11_atomic_fetch_max:
671  case AtomicExpr::AO__hip_atomic_fetch_max:
672  case AtomicExpr::AO__opencl_atomic_fetch_max:
673  case AtomicExpr::AO__atomic_fetch_max:
674  case AtomicExpr::AO__scoped_atomic_fetch_max:
675  Op = E->getValueType()->isFloatingType()
676  ? llvm::AtomicRMWInst::FMax
678  ? llvm::AtomicRMWInst::Max
679  : llvm::AtomicRMWInst::UMax);
680  break;
681 
682  case AtomicExpr::AO__atomic_and_fetch:
683  case AtomicExpr::AO__scoped_atomic_and_fetch:
684  PostOp = llvm::Instruction::And;
685  [[fallthrough]];
686  case AtomicExpr::AO__c11_atomic_fetch_and:
687  case AtomicExpr::AO__hip_atomic_fetch_and:
688  case AtomicExpr::AO__opencl_atomic_fetch_and:
689  case AtomicExpr::AO__atomic_fetch_and:
690  case AtomicExpr::AO__scoped_atomic_fetch_and:
692  break;
693 
694  case AtomicExpr::AO__atomic_or_fetch:
695  case AtomicExpr::AO__scoped_atomic_or_fetch:
696  PostOp = llvm::Instruction::Or;
697  [[fallthrough]];
698  case AtomicExpr::AO__c11_atomic_fetch_or:
699  case AtomicExpr::AO__hip_atomic_fetch_or:
700  case AtomicExpr::AO__opencl_atomic_fetch_or:
701  case AtomicExpr::AO__atomic_fetch_or:
702  case AtomicExpr::AO__scoped_atomic_fetch_or:
703  Op = llvm::AtomicRMWInst::Or;
704  break;
705 
706  case AtomicExpr::AO__atomic_xor_fetch:
707  case AtomicExpr::AO__scoped_atomic_xor_fetch:
708  PostOp = llvm::Instruction::Xor;
709  [[fallthrough]];
710  case AtomicExpr::AO__c11_atomic_fetch_xor:
711  case AtomicExpr::AO__hip_atomic_fetch_xor:
712  case AtomicExpr::AO__opencl_atomic_fetch_xor:
713  case AtomicExpr::AO__atomic_fetch_xor:
714  case AtomicExpr::AO__scoped_atomic_fetch_xor:
715  Op = llvm::AtomicRMWInst::Xor;
716  break;
717 
718  case AtomicExpr::AO__atomic_nand_fetch:
719  case AtomicExpr::AO__scoped_atomic_nand_fetch:
720  PostOp = llvm::Instruction::And; // the NOT is special cased below
721  [[fallthrough]];
722  case AtomicExpr::AO__c11_atomic_fetch_nand:
723  case AtomicExpr::AO__atomic_fetch_nand:
724  case AtomicExpr::AO__scoped_atomic_fetch_nand:
725  Op = llvm::AtomicRMWInst::Nand;
726  break;
727  }
728 
729  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
730  llvm::AtomicRMWInst *RMWI =
731  CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order, Scope);
732  RMWI->setVolatile(E->isVolatile());
733 
734  // For __atomic_*_fetch operations, perform the operation again to
735  // determine the value which was written.
736  llvm::Value *Result = RMWI;
737  if (PostOpMinMax)
738  Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
740  RMWI, LoadVal1);
741  else if (PostOp)
742  Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
743  LoadVal1);
744  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
745  E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
746  Result = CGF.Builder.CreateNot(Result);
747  CGF.Builder.CreateStore(Result, Dest);
748 }
749 
750 // This function emits any expression (scalar, complex, or aggregate)
751 // into a temporary alloca.
752 static Address
754  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
755  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
756  /*Init*/ true);
757  return DeclPtr;
758 }
759 
761  Address Ptr, Address Val1, Address Val2,
762  llvm::Value *IsWeak, llvm::Value *FailureOrder,
763  uint64_t Size, llvm::AtomicOrdering Order,
764  llvm::Value *Scope) {
765  auto ScopeModel = Expr->getScopeModel();
766 
767  // LLVM atomic instructions always have synch scope. If clang atomic
768  // expression has no scope operand, use default LLVM synch scope.
769  if (!ScopeModel) {
770  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
771  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
772  return;
773  }
774 
775  // Handle constant scope.
776  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
777  auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
778  CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
779  Order, CGF.CGM.getLLVMContext());
780  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
781  Order, SCID);
782  return;
783  }
784 
785  // Handle non-constant scope.
786  auto &Builder = CGF.Builder;
787  auto Scopes = ScopeModel->getRuntimeValues();
788  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
789  for (auto S : Scopes)
790  BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
791 
792  llvm::BasicBlock *ContBB =
793  CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
794 
795  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
796  // If unsupported synch scope is encountered at run time, assume a fallback
797  // synch scope value.
798  auto FallBack = ScopeModel->getFallBackValue();
799  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
800  for (auto S : Scopes) {
801  auto *B = BB[S];
802  if (S != FallBack)
803  SI->addCase(Builder.getInt32(S), B);
804 
805  Builder.SetInsertPoint(B);
806  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
807  Order,
809  ScopeModel->map(S),
810  Order,
811  CGF.getLLVMContext()));
812  Builder.CreateBr(ContBB);
813  }
814 
815  Builder.SetInsertPoint(ContBB);
816 }
817 
819  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
820  QualType MemTy = AtomicTy;
821  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
822  MemTy = AT->getValueType();
823  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
824 
825  Address Val1 = Address::invalid();
826  Address Val2 = Address::invalid();
827  Address Dest = Address::invalid();
829 
830  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
831  E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
832  LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
833  EmitAtomicInit(E->getVal1(), lvalue);
834  return RValue::get(nullptr);
835  }
836 
837  auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
838  uint64_t Size = TInfo.Width.getQuantity();
839  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
840 
841  CharUnits MaxInlineWidth =
842  getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
843  DiagnosticsEngine &Diags = CGM.getDiags();
844  bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
845  bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
846  if (Misaligned) {
847  Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
848  << (int)TInfo.Width.getQuantity()
849  << (int)Ptr.getAlignment().getQuantity();
850  }
851  if (Oversized) {
852  Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
853  << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
854  }
855 
856  llvm::Value *Order = EmitScalarExpr(E->getOrder());
857  llvm::Value *Scope =
858  E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
859  bool ShouldCastToIntPtrTy = true;
860 
861  switch (E->getOp()) {
862  case AtomicExpr::AO__c11_atomic_init:
863  case AtomicExpr::AO__opencl_atomic_init:
864  llvm_unreachable("Already handled above with EmitAtomicInit!");
865 
866  case AtomicExpr::AO__atomic_load_n:
867  case AtomicExpr::AO__scoped_atomic_load_n:
868  case AtomicExpr::AO__c11_atomic_load:
869  case AtomicExpr::AO__opencl_atomic_load:
870  case AtomicExpr::AO__hip_atomic_load:
871  break;
872 
873  case AtomicExpr::AO__atomic_load:
874  case AtomicExpr::AO__scoped_atomic_load:
875  Dest = EmitPointerWithAlignment(E->getVal1());
876  break;
877 
878  case AtomicExpr::AO__atomic_store:
879  case AtomicExpr::AO__scoped_atomic_store:
880  Val1 = EmitPointerWithAlignment(E->getVal1());
881  break;
882 
883  case AtomicExpr::AO__atomic_exchange:
884  case AtomicExpr::AO__scoped_atomic_exchange:
885  Val1 = EmitPointerWithAlignment(E->getVal1());
886  Dest = EmitPointerWithAlignment(E->getVal2());
887  break;
888 
889  case AtomicExpr::AO__atomic_compare_exchange:
890  case AtomicExpr::AO__atomic_compare_exchange_n:
891  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
892  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
893  case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
894  case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
895  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
896  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
897  case AtomicExpr::AO__scoped_atomic_compare_exchange:
898  case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
899  Val1 = EmitPointerWithAlignment(E->getVal1());
900  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
901  E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
902  Val2 = EmitPointerWithAlignment(E->getVal2());
903  else
904  Val2 = EmitValToTemp(*this, E->getVal2());
905  OrderFail = EmitScalarExpr(E->getOrderFail());
906  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
907  E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
908  E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
909  E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
910  IsWeak = EmitScalarExpr(E->getWeak());
911  break;
912 
913  case AtomicExpr::AO__c11_atomic_fetch_add:
914  case AtomicExpr::AO__c11_atomic_fetch_sub:
915  case AtomicExpr::AO__hip_atomic_fetch_add:
916  case AtomicExpr::AO__hip_atomic_fetch_sub:
917  case AtomicExpr::AO__opencl_atomic_fetch_add:
918  case AtomicExpr::AO__opencl_atomic_fetch_sub:
919  if (MemTy->isPointerType()) {
920  // For pointer arithmetic, we're required to do a bit of math:
921  // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
922  // ... but only for the C11 builtins. The GNU builtins expect the
923  // user to multiply by sizeof(T).
924  QualType Val1Ty = E->getVal1()->getType();
925  llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
926  CharUnits PointeeIncAmt =
928  Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
929  auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
930  Val1 = Temp;
931  EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
932  break;
933  }
934  [[fallthrough]];
935  case AtomicExpr::AO__atomic_fetch_add:
936  case AtomicExpr::AO__atomic_fetch_max:
937  case AtomicExpr::AO__atomic_fetch_min:
938  case AtomicExpr::AO__atomic_fetch_sub:
939  case AtomicExpr::AO__atomic_add_fetch:
940  case AtomicExpr::AO__atomic_max_fetch:
941  case AtomicExpr::AO__atomic_min_fetch:
942  case AtomicExpr::AO__atomic_sub_fetch:
943  case AtomicExpr::AO__c11_atomic_fetch_max:
944  case AtomicExpr::AO__c11_atomic_fetch_min:
945  case AtomicExpr::AO__opencl_atomic_fetch_max:
946  case AtomicExpr::AO__opencl_atomic_fetch_min:
947  case AtomicExpr::AO__hip_atomic_fetch_max:
948  case AtomicExpr::AO__hip_atomic_fetch_min:
949  case AtomicExpr::AO__scoped_atomic_fetch_add:
950  case AtomicExpr::AO__scoped_atomic_fetch_max:
951  case AtomicExpr::AO__scoped_atomic_fetch_min:
952  case AtomicExpr::AO__scoped_atomic_fetch_sub:
953  case AtomicExpr::AO__scoped_atomic_add_fetch:
954  case AtomicExpr::AO__scoped_atomic_max_fetch:
955  case AtomicExpr::AO__scoped_atomic_min_fetch:
956  case AtomicExpr::AO__scoped_atomic_sub_fetch:
957  ShouldCastToIntPtrTy = !MemTy->isFloatingType();
958  [[fallthrough]];
959 
960  case AtomicExpr::AO__atomic_fetch_and:
961  case AtomicExpr::AO__atomic_fetch_nand:
962  case AtomicExpr::AO__atomic_fetch_or:
963  case AtomicExpr::AO__atomic_fetch_xor:
964  case AtomicExpr::AO__atomic_and_fetch:
965  case AtomicExpr::AO__atomic_nand_fetch:
966  case AtomicExpr::AO__atomic_or_fetch:
967  case AtomicExpr::AO__atomic_xor_fetch:
968  case AtomicExpr::AO__atomic_store_n:
969  case AtomicExpr::AO__atomic_exchange_n:
970  case AtomicExpr::AO__c11_atomic_fetch_and:
971  case AtomicExpr::AO__c11_atomic_fetch_nand:
972  case AtomicExpr::AO__c11_atomic_fetch_or:
973  case AtomicExpr::AO__c11_atomic_fetch_xor:
974  case AtomicExpr::AO__c11_atomic_store:
975  case AtomicExpr::AO__c11_atomic_exchange:
976  case AtomicExpr::AO__hip_atomic_fetch_and:
977  case AtomicExpr::AO__hip_atomic_fetch_or:
978  case AtomicExpr::AO__hip_atomic_fetch_xor:
979  case AtomicExpr::AO__hip_atomic_store:
980  case AtomicExpr::AO__hip_atomic_exchange:
981  case AtomicExpr::AO__opencl_atomic_fetch_and:
982  case AtomicExpr::AO__opencl_atomic_fetch_or:
983  case AtomicExpr::AO__opencl_atomic_fetch_xor:
984  case AtomicExpr::AO__opencl_atomic_store:
985  case AtomicExpr::AO__opencl_atomic_exchange:
986  case AtomicExpr::AO__scoped_atomic_fetch_and:
987  case AtomicExpr::AO__scoped_atomic_fetch_nand:
988  case AtomicExpr::AO__scoped_atomic_fetch_or:
989  case AtomicExpr::AO__scoped_atomic_fetch_xor:
990  case AtomicExpr::AO__scoped_atomic_and_fetch:
991  case AtomicExpr::AO__scoped_atomic_nand_fetch:
992  case AtomicExpr::AO__scoped_atomic_or_fetch:
993  case AtomicExpr::AO__scoped_atomic_xor_fetch:
994  case AtomicExpr::AO__scoped_atomic_store_n:
995  case AtomicExpr::AO__scoped_atomic_exchange_n:
996  Val1 = EmitValToTemp(*this, E->getVal1());
997  break;
998  }
999 
1000  QualType RValTy = E->getType().getUnqualifiedType();
1001 
1002  // The inlined atomics only function on iN types, where N is a power of 2. We
1003  // need to make sure (via temporaries if necessary) that all incoming values
1004  // are compatible.
1005  LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
1006  AtomicInfo Atomics(*this, AtomicVal);
1007 
1008  if (ShouldCastToIntPtrTy) {
1009  Ptr = Atomics.castToAtomicIntPointer(Ptr);
1010  if (Val1.isValid())
1011  Val1 = Atomics.convertToAtomicIntPointer(Val1);
1012  if (Val2.isValid())
1013  Val2 = Atomics.convertToAtomicIntPointer(Val2);
1014  }
1015  if (Dest.isValid()) {
1016  if (ShouldCastToIntPtrTy)
1017  Dest = Atomics.castToAtomicIntPointer(Dest);
1018  } else if (E->isCmpXChg())
1019  Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
1020  else if (!RValTy->isVoidType()) {
1021  Dest = Atomics.CreateTempAlloca();
1022  if (ShouldCastToIntPtrTy)
1023  Dest = Atomics.castToAtomicIntPointer(Dest);
1024  }
1025 
1026  bool PowerOf2Size = (Size & (Size - 1)) == 0;
1027  bool UseLibcall = !PowerOf2Size || (Size > 16);
1028 
1029  // For atomics larger than 16 bytes, emit a libcall from the frontend. This
1030  // avoids the overhead of dealing with excessively-large value types in IR.
1031  // Non-power-of-2 values also lower to libcall here, as they are not currently
1032  // permitted in IR instructions (although that constraint could be relaxed in
1033  // the future). For other cases where a libcall is required on a given
1034  // platform, we let the backend handle it (this includes handling for all of
1035  // the size-optimized libcall variants, which are only valid up to 16 bytes.)
1036  //
1037  // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
1038  if (UseLibcall) {
1039  CallArgList Args;
1040  // For non-optimized library calls, the size is the first parameter.
1041  Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1042  getContext().getSizeType());
1043 
1044  // The atomic address is the second parameter.
1045  // The OpenCL atomic library functions only accept pointer arguments to
1046  // generic address space.
1047  auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1048  if (!E->isOpenCL())
1049  return V;
1050  auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1051  if (AS == LangAS::opencl_generic)
1052  return V;
1054  auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);
1055 
1057  *this, V, AS, LangAS::opencl_generic, DestType, false);
1058  };
1059 
1060  Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),
1061  E->getPtr()->getType())),
1062  getContext().VoidPtrTy);
1063 
1064  // The next 1-3 parameters are op-dependent.
1065  std::string LibCallName;
1066  QualType RetTy;
1067  bool HaveRetTy = false;
1068  switch (E->getOp()) {
1069  case AtomicExpr::AO__c11_atomic_init:
1070  case AtomicExpr::AO__opencl_atomic_init:
1071  llvm_unreachable("Already handled!");
1072 
1073  // There is only one libcall for compare an exchange, because there is no
1074  // optimisation benefit possible from a libcall version of a weak compare
1075  // and exchange.
1076  // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1077  // void *desired, int success, int failure)
1078  case AtomicExpr::AO__atomic_compare_exchange:
1079  case AtomicExpr::AO__atomic_compare_exchange_n:
1080  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1081  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1082  case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1083  case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1084  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1085  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1086  case AtomicExpr::AO__scoped_atomic_compare_exchange:
1087  case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1088  LibCallName = "__atomic_compare_exchange";
1089  RetTy = getContext().BoolTy;
1090  HaveRetTy = true;
1091  Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1092  E->getVal1()->getType())),
1093  getContext().VoidPtrTy);
1094  Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),
1095  E->getVal2()->getType())),
1096  getContext().VoidPtrTy);
1097  Args.add(RValue::get(Order), getContext().IntTy);
1098  Order = OrderFail;
1099  break;
1100  // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1101  // int order)
1102  case AtomicExpr::AO__atomic_exchange:
1103  case AtomicExpr::AO__atomic_exchange_n:
1104  case AtomicExpr::AO__c11_atomic_exchange:
1105  case AtomicExpr::AO__hip_atomic_exchange:
1106  case AtomicExpr::AO__opencl_atomic_exchange:
1107  case AtomicExpr::AO__scoped_atomic_exchange:
1108  case AtomicExpr::AO__scoped_atomic_exchange_n:
1109  LibCallName = "__atomic_exchange";
1110  Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1111  E->getVal1()->getType())),
1112  getContext().VoidPtrTy);
1113  break;
1114  // void __atomic_store(size_t size, void *mem, void *val, int order)
1115  case AtomicExpr::AO__atomic_store:
1116  case AtomicExpr::AO__atomic_store_n:
1117  case AtomicExpr::AO__c11_atomic_store:
1118  case AtomicExpr::AO__hip_atomic_store:
1119  case AtomicExpr::AO__opencl_atomic_store:
1120  case AtomicExpr::AO__scoped_atomic_store:
1121  case AtomicExpr::AO__scoped_atomic_store_n:
1122  LibCallName = "__atomic_store";
1123  RetTy = getContext().VoidTy;
1124  HaveRetTy = true;
1125  Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1126  E->getVal1()->getType())),
1127  getContext().VoidPtrTy);
1128  break;
1129  // void __atomic_load(size_t size, void *mem, void *return, int order)
1130  case AtomicExpr::AO__atomic_load:
1131  case AtomicExpr::AO__atomic_load_n:
1132  case AtomicExpr::AO__c11_atomic_load:
1133  case AtomicExpr::AO__hip_atomic_load:
1134  case AtomicExpr::AO__opencl_atomic_load:
1135  case AtomicExpr::AO__scoped_atomic_load:
1136  case AtomicExpr::AO__scoped_atomic_load_n:
1137  LibCallName = "__atomic_load";
1138  break;
1139  case AtomicExpr::AO__atomic_add_fetch:
1140  case AtomicExpr::AO__scoped_atomic_add_fetch:
1141  case AtomicExpr::AO__atomic_fetch_add:
1142  case AtomicExpr::AO__c11_atomic_fetch_add:
1143  case AtomicExpr::AO__hip_atomic_fetch_add:
1144  case AtomicExpr::AO__opencl_atomic_fetch_add:
1145  case AtomicExpr::AO__scoped_atomic_fetch_add:
1146  case AtomicExpr::AO__atomic_and_fetch:
1147  case AtomicExpr::AO__scoped_atomic_and_fetch:
1148  case AtomicExpr::AO__atomic_fetch_and:
1149  case AtomicExpr::AO__c11_atomic_fetch_and:
1150  case AtomicExpr::AO__hip_atomic_fetch_and:
1151  case AtomicExpr::AO__opencl_atomic_fetch_and:
1152  case AtomicExpr::AO__scoped_atomic_fetch_and:
1153  case AtomicExpr::AO__atomic_or_fetch:
1154  case AtomicExpr::AO__scoped_atomic_or_fetch:
1155  case AtomicExpr::AO__atomic_fetch_or:
1156  case AtomicExpr::AO__c11_atomic_fetch_or:
1157  case AtomicExpr::AO__hip_atomic_fetch_or:
1158  case AtomicExpr::AO__opencl_atomic_fetch_or:
1159  case AtomicExpr::AO__scoped_atomic_fetch_or:
1160  case AtomicExpr::AO__atomic_sub_fetch:
1161  case AtomicExpr::AO__scoped_atomic_sub_fetch:
1162  case AtomicExpr::AO__atomic_fetch_sub:
1163  case AtomicExpr::AO__c11_atomic_fetch_sub:
1164  case AtomicExpr::AO__hip_atomic_fetch_sub:
1165  case AtomicExpr::AO__opencl_atomic_fetch_sub:
1166  case AtomicExpr::AO__scoped_atomic_fetch_sub:
1167  case AtomicExpr::AO__atomic_xor_fetch:
1168  case AtomicExpr::AO__scoped_atomic_xor_fetch:
1169  case AtomicExpr::AO__atomic_fetch_xor:
1170  case AtomicExpr::AO__c11_atomic_fetch_xor:
1171  case AtomicExpr::AO__hip_atomic_fetch_xor:
1172  case AtomicExpr::AO__opencl_atomic_fetch_xor:
1173  case AtomicExpr::AO__scoped_atomic_fetch_xor:
1174  case AtomicExpr::AO__atomic_nand_fetch:
1175  case AtomicExpr::AO__atomic_fetch_nand:
1176  case AtomicExpr::AO__c11_atomic_fetch_nand:
1177  case AtomicExpr::AO__scoped_atomic_fetch_nand:
1178  case AtomicExpr::AO__scoped_atomic_nand_fetch:
1179  case AtomicExpr::AO__atomic_min_fetch:
1180  case AtomicExpr::AO__atomic_fetch_min:
1181  case AtomicExpr::AO__c11_atomic_fetch_min:
1182  case AtomicExpr::AO__hip_atomic_fetch_min:
1183  case AtomicExpr::AO__opencl_atomic_fetch_min:
1184  case AtomicExpr::AO__scoped_atomic_fetch_min:
1185  case AtomicExpr::AO__scoped_atomic_min_fetch:
1186  case AtomicExpr::AO__atomic_max_fetch:
1187  case AtomicExpr::AO__atomic_fetch_max:
1188  case AtomicExpr::AO__c11_atomic_fetch_max:
1189  case AtomicExpr::AO__hip_atomic_fetch_max:
1190  case AtomicExpr::AO__opencl_atomic_fetch_max:
1191  case AtomicExpr::AO__scoped_atomic_fetch_max:
1192  case AtomicExpr::AO__scoped_atomic_max_fetch:
1193  llvm_unreachable("Integral atomic operations always become atomicrmw!");
1194  }
1195 
1196  if (E->isOpenCL()) {
1197  LibCallName =
1198  std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();
1199  }
1200  // By default, assume we return a value of the atomic type.
1201  if (!HaveRetTy) {
1202  // Value is returned through parameter before the order.
1203  RetTy = getContext().VoidTy;
1204  Args.add(RValue::get(
1205  CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),
1206  getContext().VoidPtrTy);
1207  }
1208  // Order is always the last parameter.
1209  Args.add(RValue::get(Order),
1210  getContext().IntTy);
1211  if (E->isOpenCL())
1212  Args.add(RValue::get(Scope), getContext().IntTy);
1213 
1214  RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1215  // The value is returned directly from the libcall.
1216  if (E->isCmpXChg())
1217  return Res;
1218 
1219  if (RValTy->isVoidType())
1220  return RValue::get(nullptr);
1221 
1223  RValTy, E->getExprLoc());
1224  }
1225 
1226  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1227  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1228  E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1229  E->getOp() == AtomicExpr::AO__atomic_store ||
1230  E->getOp() == AtomicExpr::AO__atomic_store_n ||
1231  E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1232  E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;
1233  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1234  E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1235  E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1236  E->getOp() == AtomicExpr::AO__atomic_load ||
1237  E->getOp() == AtomicExpr::AO__atomic_load_n ||
1238  E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1239  E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1240 
1241  if (isa<llvm::ConstantInt>(Order)) {
1242  auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1243  // We should not ever get to a case where the ordering isn't a valid C ABI
1244  // value, but it's hard to enforce that in general.
1245  if (llvm::isValidAtomicOrderingCABI(ord))
1246  switch ((llvm::AtomicOrderingCABI)ord) {
1247  case llvm::AtomicOrderingCABI::relaxed:
1248  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1249  llvm::AtomicOrdering::Monotonic, Scope);
1250  break;
1251  case llvm::AtomicOrderingCABI::consume:
1252  case llvm::AtomicOrderingCABI::acquire:
1253  if (IsStore)
1254  break; // Avoid crashing on code with undefined behavior
1255  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1256  llvm::AtomicOrdering::Acquire, Scope);
1257  break;
1258  case llvm::AtomicOrderingCABI::release:
1259  if (IsLoad)
1260  break; // Avoid crashing on code with undefined behavior
1261  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1262  llvm::AtomicOrdering::Release, Scope);
1263  break;
1264  case llvm::AtomicOrderingCABI::acq_rel:
1265  if (IsLoad || IsStore)
1266  break; // Avoid crashing on code with undefined behavior
1267  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1268  llvm::AtomicOrdering::AcquireRelease, Scope);
1269  break;
1270  case llvm::AtomicOrderingCABI::seq_cst:
1271  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1272  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1273  break;
1274  }
1275  if (RValTy->isVoidType())
1276  return RValue::get(nullptr);
1277 
1279  RValTy, E->getExprLoc());
1280  }
1281 
1282  // Long case, when Order isn't obviously constant.
1283 
1284  // Create all the relevant BB's
1285  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1286  *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1287  *SeqCstBB = nullptr;
1288  MonotonicBB = createBasicBlock("monotonic", CurFn);
1289  if (!IsStore)
1290  AcquireBB = createBasicBlock("acquire", CurFn);
1291  if (!IsLoad)
1292  ReleaseBB = createBasicBlock("release", CurFn);
1293  if (!IsLoad && !IsStore)
1294  AcqRelBB = createBasicBlock("acqrel", CurFn);
1295  SeqCstBB = createBasicBlock("seqcst", CurFn);
1296  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1297 
1298  // Create the switch for the split
1299  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1300  // doesn't matter unless someone is crazy enough to use something that
1301  // doesn't fold to a constant for the ordering.
1302  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1303  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1304 
1305  // Emit all the different atomics
1306  Builder.SetInsertPoint(MonotonicBB);
1307  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1308  llvm::AtomicOrdering::Monotonic, Scope);
1309  Builder.CreateBr(ContBB);
1310  if (!IsStore) {
1311  Builder.SetInsertPoint(AcquireBB);
1312  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1313  llvm::AtomicOrdering::Acquire, Scope);
1314  Builder.CreateBr(ContBB);
1315  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1316  AcquireBB);
1317  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1318  AcquireBB);
1319  }
1320  if (!IsLoad) {
1321  Builder.SetInsertPoint(ReleaseBB);
1322  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1323  llvm::AtomicOrdering::Release, Scope);
1324  Builder.CreateBr(ContBB);
1325  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1326  ReleaseBB);
1327  }
1328  if (!IsLoad && !IsStore) {
1329  Builder.SetInsertPoint(AcqRelBB);
1330  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1331  llvm::AtomicOrdering::AcquireRelease, Scope);
1332  Builder.CreateBr(ContBB);
1333  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1334  AcqRelBB);
1335  }
1336  Builder.SetInsertPoint(SeqCstBB);
1337  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1338  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1339  Builder.CreateBr(ContBB);
1340  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1341  SeqCstBB);
1342 
1343  // Cleanup and return
1344  Builder.SetInsertPoint(ContBB);
1345  if (RValTy->isVoidType())
1346  return RValue::get(nullptr);
1347 
1348  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1350  RValTy, E->getExprLoc());
1351 }
1352 
1353 Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
1354  llvm::IntegerType *ty =
1355  llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1356  return addr.withElementType(ty);
1357 }
1358 
1359 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1360  llvm::Type *Ty = Addr.getElementType();
1361  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1362  if (SourceSizeInBits != AtomicSizeInBits) {
1363  Address Tmp = CreateTempAlloca();
1364  CGF.Builder.CreateMemCpy(Tmp, Addr,
1365  std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1366  Addr = Tmp;
1367  }
1368 
1369  return castToAtomicIntPointer(Addr);
1370 }
1371 
1372 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1373  AggValueSlot resultSlot,
1374  SourceLocation loc,
1375  bool asValue) const {
1376  if (LVal.isSimple()) {
1377  if (EvaluationKind == TEK_Aggregate)
1378  return resultSlot.asRValue();
1379 
1380  // Drill into the padding structure if we have one.
1381  if (hasPadding())
1382  addr = CGF.Builder.CreateStructGEP(addr, 0);
1383 
1384  // Otherwise, just convert the temporary to an r-value using the
1385  // normal conversion routine.
1386  return CGF.convertTempToRValue(addr, getValueType(), loc);
1387  }
1388  if (!asValue)
1389  // Get RValue from temp memory as atomic for non-simple lvalues
1390  return RValue::get(CGF.Builder.CreateLoad(addr));
1391  if (LVal.isBitField())
1392  return CGF.EmitLoadOfBitfieldLValue(
1393  LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1394  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1395  if (LVal.isVectorElt())
1396  return CGF.EmitLoadOfLValue(
1397  LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1398  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1399  assert(LVal.isExtVectorElt());
1401  addr, LVal.getExtVectorElts(), LVal.getType(),
1402  LVal.getBaseInfo(), TBAAAccessInfo()));
1403 }
1404 
1405 /// Return true if \param ValTy is a type that should be casted to integer
1406 /// around the atomic memory operation. If \param CmpXchg is true, then the
1407 /// cast of a floating point type is made as that instruction can not have
1408 /// floating point operands. TODO: Allow compare-and-exchange and FP - see
1409 /// comment in AtomicExpandPass.cpp.
1410 static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
1411  if (ValTy->isFloatingPointTy())
1412  return ValTy->isX86_FP80Ty() || CmpXchg;
1413  return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1414 }
1415 
1416 RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1417  AggValueSlot ResultSlot,
1418  SourceLocation Loc, bool AsValue,
1419  bool CmpXchg) const {
1420  // Try not to in some easy cases.
1421  assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1422  Val->getType()->isIEEELikeFPTy()) &&
1423  "Expected integer, pointer or floating point value when converting "
1424  "result.");
1425  if (getEvaluationKind() == TEK_Scalar &&
1426  (((!LVal.isBitField() ||
1427  LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1428  !hasPadding()) ||
1429  !AsValue)) {
1430  auto *ValTy = AsValue
1431  ? CGF.ConvertTypeForMem(ValueTy)
1432  : getAtomicAddress().getElementType();
1433  if (!shouldCastToInt(ValTy, CmpXchg)) {
1434  assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1435  "Different integer types.");
1436  return RValue::get(CGF.EmitFromMemory(Val, ValueTy));
1437  }
1438  if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1439  return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy));
1440  }
1441 
1442  // Create a temporary. This needs to be big enough to hold the
1443  // atomic integer.
1444  Address Temp = Address::invalid();
1445  bool TempIsVolatile = false;
1446  if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1447  assert(!ResultSlot.isIgnored());
1448  Temp = ResultSlot.getAddress();
1449  TempIsVolatile = ResultSlot.isVolatile();
1450  } else {
1451  Temp = CreateTempAlloca();
1452  }
1453 
1454  // Slam the integer into the temporary.
1455  Address CastTemp = castToAtomicIntPointer(Temp);
1456  CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);
1457 
1458  return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1459 }
1460 
1461 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1462  llvm::AtomicOrdering AO, bool) {
1463  // void __atomic_load(size_t size, void *mem, void *return, int order);
1464  CallArgList Args;
1465  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1466  Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1467  Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);
1468  Args.add(
1469  RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1470  CGF.getContext().IntTy);
1471  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1472 }
1473 
1474 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1475  bool IsVolatile, bool CmpXchg) {
1476  // Okay, we're doing this natively.
1477  Address Addr = getAtomicAddress();
1478  if (shouldCastToInt(Addr.getElementType(), CmpXchg))
1479  Addr = castToAtomicIntPointer(Addr);
1480  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1481  Load->setAtomic(AO);
1482 
1483  // Other decoration.
1484  if (IsVolatile)
1485  Load->setVolatile(true);
1487  return Load;
1488 }
1489 
1490 /// An LValue is a candidate for having its loads and stores be made atomic if
1491 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1492 /// performing such an operation can be performed without a libcall.
1494  if (!CGM.getLangOpts().MSVolatile) return false;
1495  AtomicInfo AI(*this, LV);
1496  bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1497  // An atomic is inline if we don't need to use a libcall.
1498  bool AtomicIsInline = !AI.shouldUseLibcall();
1499  // MSVC doesn't seem to do this for types wider than a pointer.
1500  if (getContext().getTypeSize(LV.getType()) >
1501  getContext().getTypeSize(getContext().getIntPtrType()))
1502  return false;
1503  return IsVolatile && AtomicIsInline;
1504 }
1505 
1507  AggValueSlot Slot) {
1508  llvm::AtomicOrdering AO;
1509  bool IsVolatile = LV.isVolatileQualified();
1510  if (LV.getType()->isAtomicType()) {
1511  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1512  } else {
1513  AO = llvm::AtomicOrdering::Acquire;
1514  IsVolatile = true;
1515  }
1516  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1517 }
1518 
1519 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1520  bool AsValue, llvm::AtomicOrdering AO,
1521  bool IsVolatile) {
1522  // Check whether we should use a library call.
1523  if (shouldUseLibcall()) {
1524  Address TempAddr = Address::invalid();
1525  if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1526  assert(getEvaluationKind() == TEK_Aggregate);
1527  TempAddr = ResultSlot.getAddress();
1528  } else
1529  TempAddr = CreateTempAlloca();
1530 
1531  EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);
1532 
1533  // Okay, turn that back into the original value or whole atomic (for
1534  // non-simple lvalues) type.
1535  return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1536  }
1537 
1538  // Okay, we're doing this natively.
1539  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1540 
1541  // If we're ignoring an aggregate return, don't do anything.
1542  if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1543  return RValue::getAggregate(Address::invalid(), false);
1544 
1545  // Okay, turn that back into the original value or atomic (for non-simple
1546  // lvalues) type.
1547  return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1548 }
1549 
1550 /// Emit a load from an l-value of atomic type. Note that the r-value
1551 /// we produce is an r-value of the atomic *value* type.
1553  llvm::AtomicOrdering AO, bool IsVolatile,
1554  AggValueSlot resultSlot) {
1555  AtomicInfo Atomics(*this, src);
1556  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1557  IsVolatile);
1558 }
1559 
1560 /// Copy an r-value into memory as part of storing to an atomic type.
1561 /// This needs to create a bit-pattern suitable for atomic operations.
1562 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1563  assert(LVal.isSimple());
1564  // If we have an r-value, the rvalue should be of the atomic type,
1565  // which means that the caller is responsible for having zeroed
1566  // any padding. Just do an aggregate copy of that type.
1567  if (rvalue.isAggregate()) {
1568  LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1569  LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1570  getAtomicType());
1571  bool IsVolatile = rvalue.isVolatileQualified() ||
1572  LVal.isVolatileQualified();
1573  CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1574  AggValueSlot::DoesNotOverlap, IsVolatile);
1575  return;
1576  }
1577 
1578  // Okay, otherwise we're copying stuff.
1579 
1580  // Zero out the buffer if necessary.
1581  emitMemSetZeroIfNecessary();
1582 
1583  // Drill past the padding if present.
1584  LValue TempLVal = projectValue();
1585 
1586  // Okay, store the rvalue in.
1587  if (rvalue.isScalar()) {
1588  CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1589  } else {
1590  CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1591  }
1592 }
1593 
1594 
1595 /// Materialize an r-value into memory for the purposes of storing it
1596 /// to an atomic type.
1597 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1598  // Aggregate r-values are already in memory, and EmitAtomicStore
1599  // requires them to be values of the atomic type.
1600  if (rvalue.isAggregate())
1601  return rvalue.getAggregateAddress();
1602 
1603  // Otherwise, make a temporary and materialize into it.
1604  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1605  AtomicInfo Atomics(CGF, TempLV);
1606  Atomics.emitCopyIntoMemory(rvalue);
1607  return TempLV.getAddress();
1608 }
1609 
1610 llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
1611  if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple()))
1612  return RVal.getScalarVal();
1613  return nullptr;
1614 }
1615 
1616 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {
1617  // If we've got a scalar value of the right size, try to avoid going
1618  // through memory. Floats get casted if needed by AtomicExpandPass.
1619  if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {
1620  if (!shouldCastToInt(Value->getType(), CmpXchg))
1621  return CGF.EmitToMemory(Value, ValueTy);
1622  else {
1623  llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1624  CGF.getLLVMContext(),
1625  LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1626  if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1627  return CGF.Builder.CreateBitCast(Value, InputIntTy);
1628  }
1629  }
1630  // Otherwise, we need to go through memory.
1631  // Put the r-value in memory.
1632  Address Addr = materializeRValue(RVal);
1633 
1634  // Cast the temporary to the atomic int type and pull a value out.
1635  Addr = castToAtomicIntPointer(Addr);
1636  return CGF.Builder.CreateLoad(Addr);
1637 }
1638 
1639 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1640  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1641  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1642  // Do the atomic store.
1643  Address Addr = getAtomicAddressAsAtomicIntPointer();
1644  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1645  Success, Failure);
1646  // Other decoration.
1647  Inst->setVolatile(LVal.isVolatileQualified());
1648  Inst->setWeak(IsWeak);
1649 
1650  // Okay, turn that back into the original value type.
1651  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1652  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1653  return std::make_pair(PreviousVal, SuccessFailureVal);
1654 }
1655 
1656 llvm::Value *
1657 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1658  llvm::Value *DesiredAddr,
1659  llvm::AtomicOrdering Success,
1660  llvm::AtomicOrdering Failure) {
1661  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1662  // void *desired, int success, int failure);
1663  CallArgList Args;
1664  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1665  Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1666  Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);
1667  Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);
1668  Args.add(RValue::get(
1669  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1670  CGF.getContext().IntTy);
1671  Args.add(RValue::get(
1672  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1673  CGF.getContext().IntTy);
1674  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1675  CGF.getContext().BoolTy, Args);
1676 
1677  return SuccessFailureRVal.getScalarVal();
1678 }
1679 
1680 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1681  RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1682  llvm::AtomicOrdering Failure, bool IsWeak) {
1683  // Check whether we should use a library call.
1684  if (shouldUseLibcall()) {
1685  // Produce a source address.
1686  Address ExpectedAddr = materializeRValue(Expected);
1687  llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1688  llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1689  auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1690  Success, Failure);
1691  return std::make_pair(
1692  convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1693  SourceLocation(), /*AsValue=*/false),
1694  Res);
1695  }
1696 
1697  // If we've got a scalar value of the right size, try to avoid going
1698  // through memory.
1699  auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true);
1700  auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true);
1701  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1702  Failure, IsWeak);
1703  return std::make_pair(
1704  ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1705  SourceLocation(), /*AsValue=*/false,
1706  /*CmpXchg=*/true),
1707  Res.second);
1708 }
1709 
1710 static void
1711 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1712  const llvm::function_ref<RValue(RValue)> &UpdateOp,
1713  Address DesiredAddr) {
1714  RValue UpRVal;
1715  LValue AtomicLVal = Atomics.getAtomicLValue();
1716  LValue DesiredLVal;
1717  if (AtomicLVal.isSimple()) {
1718  UpRVal = OldRVal;
1719  DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1720  } else {
1721  // Build new lvalue for temp address.
1722  Address Ptr = Atomics.materializeRValue(OldRVal);
1723  LValue UpdateLVal;
1724  if (AtomicLVal.isBitField()) {
1725  UpdateLVal =
1726  LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1727  AtomicLVal.getType(),
1728  AtomicLVal.getBaseInfo(),
1729  AtomicLVal.getTBAAInfo());
1730  DesiredLVal =
1731  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1732  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1733  AtomicLVal.getTBAAInfo());
1734  } else if (AtomicLVal.isVectorElt()) {
1735  UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1736  AtomicLVal.getType(),
1737  AtomicLVal.getBaseInfo(),
1738  AtomicLVal.getTBAAInfo());
1739  DesiredLVal = LValue::MakeVectorElt(
1740  DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1741  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1742  } else {
1743  assert(AtomicLVal.isExtVectorElt());
1744  UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1745  AtomicLVal.getType(),
1746  AtomicLVal.getBaseInfo(),
1747  AtomicLVal.getTBAAInfo());
1748  DesiredLVal = LValue::MakeExtVectorElt(
1749  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1750  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1751  }
1752  UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1753  }
1754  // Store new value in the corresponding memory area.
1755  RValue NewRVal = UpdateOp(UpRVal);
1756  if (NewRVal.isScalar()) {
1757  CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1758  } else {
1759  assert(NewRVal.isComplex());
1760  CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1761  /*isInit=*/false);
1762  }
1763 }
1764 
1765 void AtomicInfo::EmitAtomicUpdateLibcall(
1766  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1767  bool IsVolatile) {
1768  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1769 
1770  Address ExpectedAddr = CreateTempAlloca();
1771 
1772  EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1773  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1774  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1775  CGF.EmitBlock(ContBB);
1776  Address DesiredAddr = CreateTempAlloca();
1777  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1778  requiresMemSetZero(getAtomicAddress().getElementType())) {
1779  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1780  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1781  }
1782  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1784  SourceLocation(), /*AsValue=*/false);
1785  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1786  llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1787  llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1788  auto *Res =
1789  EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1790  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1791  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1792 }
1793 
1794 void AtomicInfo::EmitAtomicUpdateOp(
1795  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1796  bool IsVolatile) {
1797  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1798 
1799  // Do the atomic load.
1800  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1801  // For non-simple lvalues perform compare-and-swap procedure.
1802  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1803  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1804  auto *CurBB = CGF.Builder.GetInsertBlock();
1805  CGF.EmitBlock(ContBB);
1806  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1807  /*NumReservedValues=*/2);
1808  PHI->addIncoming(OldVal, CurBB);
1809  Address NewAtomicAddr = CreateTempAlloca();
1810  Address NewAtomicIntAddr =
1811  shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)
1812  ? castToAtomicIntPointer(NewAtomicAddr)
1813  : NewAtomicAddr;
1814 
1815  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1816  requiresMemSetZero(getAtomicAddress().getElementType())) {
1817  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1818  }
1819  auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(),
1820  SourceLocation(), /*AsValue=*/false,
1821  /*CmpXchg=*/true);
1822  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1823  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1824  // Try to write new value using cmpxchg operation.
1825  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1826  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1827  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1828  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1829 }
1830 
1831 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1832  RValue UpdateRVal, Address DesiredAddr) {
1833  LValue AtomicLVal = Atomics.getAtomicLValue();
1834  LValue DesiredLVal;
1835  // Build new lvalue for temp address.
1836  if (AtomicLVal.isBitField()) {
1837  DesiredLVal =
1838  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1839  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1840  AtomicLVal.getTBAAInfo());
1841  } else if (AtomicLVal.isVectorElt()) {
1842  DesiredLVal =
1843  LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1844  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1845  AtomicLVal.getTBAAInfo());
1846  } else {
1847  assert(AtomicLVal.isExtVectorElt());
1848  DesiredLVal = LValue::MakeExtVectorElt(
1849  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1850  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1851  }
1852  // Store new value in the corresponding memory area.
1853  assert(UpdateRVal.isScalar());
1854  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1855 }
1856 
1857 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1858  RValue UpdateRVal, bool IsVolatile) {
1859  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1860 
1861  Address ExpectedAddr = CreateTempAlloca();
1862 
1863  EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1864  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1865  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1866  CGF.EmitBlock(ContBB);
1867  Address DesiredAddr = CreateTempAlloca();
1868  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1869  requiresMemSetZero(getAtomicAddress().getElementType())) {
1870  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1871  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1872  }
1873  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1874  llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1875  llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1876  auto *Res =
1877  EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1878  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1879  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1880 }
1881 
1882 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1883  bool IsVolatile) {
1884  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1885 
1886  // Do the atomic load.
1887  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1888  // For non-simple lvalues perform compare-and-swap procedure.
1889  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1890  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1891  auto *CurBB = CGF.Builder.GetInsertBlock();
1892  CGF.EmitBlock(ContBB);
1893  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1894  /*NumReservedValues=*/2);
1895  PHI->addIncoming(OldVal, CurBB);
1896  Address NewAtomicAddr = CreateTempAlloca();
1897  Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1898  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1899  requiresMemSetZero(getAtomicAddress().getElementType())) {
1900  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1901  }
1902  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1903  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1904  // Try to write new value using cmpxchg operation.
1905  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1906  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1907  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1908  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1909 }
1910 
1911 void AtomicInfo::EmitAtomicUpdate(
1912  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1913  bool IsVolatile) {
1914  if (shouldUseLibcall()) {
1915  EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1916  } else {
1917  EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1918  }
1919 }
1920 
1921 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1922  bool IsVolatile) {
1923  if (shouldUseLibcall()) {
1924  EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1925  } else {
1926  EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1927  }
1928 }
1929 
1931  bool isInit) {
1932  bool IsVolatile = lvalue.isVolatileQualified();
1933  llvm::AtomicOrdering AO;
1934  if (lvalue.getType()->isAtomicType()) {
1935  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1936  } else {
1937  AO = llvm::AtomicOrdering::Release;
1938  IsVolatile = true;
1939  }
1940  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1941 }
1942 
1943 /// Emit a store to an l-value of atomic type.
1944 ///
1945 /// Note that the r-value is expected to be an r-value *of the atomic
1946 /// type*; this means that for aggregate r-values, it should include
1947 /// storage for any padding that was necessary.
1949  llvm::AtomicOrdering AO, bool IsVolatile,
1950  bool isInit) {
1951  // If this is an aggregate r-value, it should agree in type except
1952  // maybe for address-space qualification.
1953  assert(!rvalue.isAggregate() ||
1954  rvalue.getAggregateAddress().getElementType() ==
1955  dest.getAddress().getElementType());
1956 
1957  AtomicInfo atomics(*this, dest);
1958  LValue LVal = atomics.getAtomicLValue();
1959 
1960  // If this is an initialization, just put the value there normally.
1961  if (LVal.isSimple()) {
1962  if (isInit) {
1963  atomics.emitCopyIntoMemory(rvalue);
1964  return;
1965  }
1966 
1967  // Check whether we should use a library call.
1968  if (atomics.shouldUseLibcall()) {
1969  // Produce a source address.
1970  Address srcAddr = atomics.materializeRValue(rvalue);
1971 
1972  // void __atomic_store(size_t size, void *mem, void *val, int order)
1973  CallArgList args;
1974  args.add(RValue::get(atomics.getAtomicSizeValue()),
1975  getContext().getSizeType());
1976  args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
1977  args.add(RValue::get(srcAddr.emitRawPointer(*this)),
1978  getContext().VoidPtrTy);
1979  args.add(
1980  RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1981  getContext().IntTy);
1982  emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1983  return;
1984  }
1985 
1986  // Okay, we're doing this natively.
1987  llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
1988 
1989  // Do the atomic store.
1990  Address Addr = atomics.getAtomicAddress();
1991  if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))
1992  if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) {
1993  Addr = atomics.castToAtomicIntPointer(Addr);
1994  ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(),
1995  /*isSigned=*/false);
1996  }
1997  llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr);
1998 
1999  if (AO == llvm::AtomicOrdering::Acquire)
2000  AO = llvm::AtomicOrdering::Monotonic;
2001  else if (AO == llvm::AtomicOrdering::AcquireRelease)
2002  AO = llvm::AtomicOrdering::Release;
2003  // Initializations don't need to be atomic.
2004  if (!isInit)
2005  store->setAtomic(AO);
2006 
2007  // Other decoration.
2008  if (IsVolatile)
2009  store->setVolatile(true);
2011  return;
2012  }
2013 
2014  // Emit simple atomic update operation.
2015  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2016 }
2017 
2018 /// Emit a compare-and-exchange op for atomic type.
2019 ///
2020 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2021  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2022  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2023  AggValueSlot Slot) {
2024  // If this is an aggregate r-value, it should agree in type except
2025  // maybe for address-space qualification.
2026  assert(!Expected.isAggregate() ||
2027  Expected.getAggregateAddress().getElementType() ==
2028  Obj.getAddress().getElementType());
2029  assert(!Desired.isAggregate() ||
2030  Desired.getAggregateAddress().getElementType() ==
2031  Obj.getAddress().getElementType());
2032  AtomicInfo Atomics(*this, Obj);
2033 
2034  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2035  IsWeak);
2036 }
2037 
2039  LValue LVal, llvm::AtomicOrdering AO,
2040  const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2041  AtomicInfo Atomics(*this, LVal);
2042  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2043 }
2044 
2046  AtomicInfo atomics(*this, dest);
2047 
2048  switch (atomics.getEvaluationKind()) {
2049  case TEK_Scalar: {
2050  llvm::Value *value = EmitScalarExpr(init);
2051  atomics.emitCopyIntoMemory(RValue::get(value));
2052  return;
2053  }
2054 
2055  case TEK_Complex: {
2056  ComplexPairTy value = EmitComplexExpr(init);
2057  atomics.emitCopyIntoMemory(RValue::getComplex(value));
2058  return;
2059  }
2060 
2061  case TEK_Aggregate: {
2062  // Fix up the destination if the initializer isn't an expression
2063  // of atomic type.
2064  bool Zeroed = false;
2065  if (!init->getType()->isAtomicType()) {
2066  Zeroed = atomics.emitMemSetZeroIfNecessary();
2067  dest = atomics.projectValue();
2068  }
2069 
2070  // Evaluate the expression directly into the destination.
2076 
2077  EmitAggExpr(init, slot);
2078  return;
2079  }
2080  }
2081  llvm_unreachable("bad evaluation kind");
2082 }
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3299
static char ID
Definition: Arena.cpp:183
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
Definition: CGAtomic.cpp:336
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
Definition: CGAtomic.cpp:1711
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition: CGAtomic.cpp:753
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
Definition: CGAtomic.cpp:504
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:526
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition: CGAtomic.cpp:316
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
Definition: CGAtomic.cpp:427
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:378
static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)
Return true if.
Definition: CGAtomic.cpp:1410
unsigned Offset
Definition: Format.cpp:2978
SourceLocation Loc
Definition: SemaObjC.cpp:755
__DEVICE__ int min(int __a, int __b)
__device__ int
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:185
CanQualType VoidPtrTy
Definition: ASTContext.h:1121
CanQualType BoolTy
Definition: ASTContext.h:1095
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CanQualType IntTy
Definition: ASTContext.h:1103
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1094
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6478
Expr * getScope() const
Definition: Expr.h:6516
Expr * getOrderFail() const
Definition: Expr.h:6526
Expr * getOrder() const
Definition: Expr.h:6513
QualType getValueType() const
Definition: Expr.cpp:5118
Expr * getVal2() const
Definition: Expr.h:6530
bool isCmpXChg() const
Definition: Expr.h:6564
AtomicOp getOp() const
Definition: Expr.h:6542
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition: Expr.h:6603
bool isOpenCL() const
Definition: Expr.h:6577
Expr * getPtr() const
Definition: Expr.h:6510
Expr * getVal1() const
Definition: Expr.h:6520
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:6585
Expr * getWeak() const
Definition: Expr.h:6536
bool isVolatile() const
Definition: Expr.h:6560
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
CharUnits getAlignment() const
Definition: Address.h:166
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
An aggregate value slot.
Definition: CGValue.h:509
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:577
Address getAddress() const
Definition: CGValue.h:649
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:607
RValue asRValue() const
Definition: CGValue.h:671
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:397
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition: CGBuilder.h:180
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition: CGBuilder.h:169
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:364
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:128
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:257
void add(RValue rvalue, QualType type)
Definition: CGCall.h:281
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitAtomicInit(Expr *E, LValue lvalue)
Definition: CGAtomic.cpp:2045
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition: CGExpr.cpp:6100
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::LLVMContext & getLLVMContext()
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
Definition: CGAtomic.cpp:2038
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:2020
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition: CGCall.cpp:5108
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:2093
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:2168
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1506
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition: CGExpr.cpp:2015
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
const TargetInfo & getTarget() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2344
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:254
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition: CGExpr.cpp:2230
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:818
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1387
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
Definition: CGAtomic.cpp:1493
const TargetCodeGenInfo & getTargetHooks() const
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:147
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Definition: CGExprAgg.cpp:2030
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition: CGExpr.cpp:2267
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Definition: CGAtomic.cpp:1930
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition: CGExpr.cpp:2029
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:578
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const llvm::DataLayout & getDataLayout() const
const LangOptions & getLangOpts() const
llvm::LLVMContext & getLLVMContext()
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
DiagnosticsEngine & getDiags() const
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1641
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:670
LValue - This represents an lvalue references.
Definition: CGValue.h:181
bool isBitField() const
Definition: CGValue.h:283
llvm::Value * getVectorIdx() const
Definition: CGValue.h:387
llvm::Value * getRawBitFieldPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:424
llvm::Value * getRawVectorPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:379
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:365
llvm::Value * getRawExtVectorPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:410
void setAlignment(CharUnits A)
Definition: CGValue.h:347
bool isVectorElt() const
Definition: CGValue.h:282
bool isSimple() const
Definition: CGValue.h:281
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:429
bool isVolatileQualified() const
Definition: CGValue.h:288
CharUnits getAlignment() const
Definition: CGValue.h:346
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:437
bool isVolatile() const
Definition: CGValue.h:331
bool isGlobalReg() const
Definition: CGValue.h:285
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:457
Address getAddress() const
Definition: CGValue.h:370
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:414
bool isExtVectorElt() const
Definition: CGValue.h:284
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:349
QualType getType() const
Definition: CGValue.h:294
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:338
Address getVectorAddress() const
Definition: CGValue.h:375
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:473
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:447
Address getExtVectorAddress() const
Definition: CGValue.h:406
Address getBitFieldAddress() const
Definition: CGValue.h:420
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:77
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:124
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:107
bool isAggregate() const
Definition: CGValue.h:65
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
bool isComplex() const
Definition: CGValue.h:64
bool isVolatileQualified() const
Definition: CGValue.h:67
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:355
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Definition: TargetInfo.cpp:154
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:193
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1553
This represents one expression.
Definition: Expr.h:110
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3151
A (possibly-)qualified type.
Definition: Type.h:940
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7411
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:7465
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:834
bool isVoidType() const
Definition: Type.h:7939
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2145
bool isPointerType() const
Definition: Type.h:7624
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8227
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:705
bool isAtomicType() const
Definition: Type.h:7773
bool isFloatingType() const
Definition: Type.cpp:2248
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8160
QualType getType() const
Definition: Value.cpp:234
Represents a GCC generic vector type.
Definition: Type.h:3981
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
llvm::APInt APInt
Definition: Integral.h:29
bool Sub(InterpState &S, CodePtr OpPC)
Definition: Interp.h:331
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1396
bool Add(InterpState &S, CodePtr OpPC)
Definition: Interp.h:313
The JSON file list parser is used to communicate input to InstallAPI.
llvm::StringRef getAsString(SyncScope S)
Definition: SyncScope.h:60
unsigned long uint64_t
#define true
Definition: stdbool.h:25
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * IntTy
int
uint64_t Width
Definition: ASTContext.h:156
unsigned Align
Definition: ASTContext.h:157