clang  19.0.0git
CGStmt.cpp
Go to the documentation of this file.
1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/Assumptions.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/MDBuilder.h"
36 #include "llvm/Support/SaveAndRestore.h"
37 #include <optional>
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 //===----------------------------------------------------------------------===//
43 // Statement Emission
44 //===----------------------------------------------------------------------===//
45 
46 namespace llvm {
47 extern cl::opt<bool> EnableSingleByteCoverage;
48 } // namespace llvm
49 
51  if (CGDebugInfo *DI = getDebugInfo()) {
53  Loc = S->getBeginLoc();
54  DI->EmitLocation(Builder, Loc);
55 
56  LastStopPoint = Loc;
57  }
58 }
59 
61  assert(S && "Null statement?");
62  PGO.setCurrentStmt(S);
63 
64  // These statements have their own debug info handling.
65  if (EmitSimpleStmt(S, Attrs))
66  return;
67 
68  // Check if we are generating unreachable code.
69  if (!HaveInsertPoint()) {
70  // If so, and the statement doesn't contain a label, then we do not need to
71  // generate actual code. This is safe because (1) the current point is
72  // unreachable, so we don't need to execute the code, and (2) we've already
73  // handled the statements which update internal data structures (like the
74  // local variable map) which could be used by subsequent statements.
75  if (!ContainsLabel(S)) {
76  // Verify that any decl statements were handled as simple, they may be in
77  // scope of subsequent reachable statements.
78  assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
79  return;
80  }
81 
82  // Otherwise, make a new block to hold the code.
84  }
85 
86  // Generate a stoppoint if we are emitting debug info.
87  EmitStopPoint(S);
88 
89  // Ignore all OpenMP directives except for simd if OpenMP with Simd is
90  // enabled.
91  if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
92  if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
94  return;
95  }
96  }
97 
98  switch (S->getStmtClass()) {
99  case Stmt::NoStmtClass:
100  case Stmt::CXXCatchStmtClass:
101  case Stmt::SEHExceptStmtClass:
102  case Stmt::SEHFinallyStmtClass:
103  case Stmt::MSDependentExistsStmtClass:
104  llvm_unreachable("invalid statement class to emit generically");
105  case Stmt::NullStmtClass:
106  case Stmt::CompoundStmtClass:
107  case Stmt::DeclStmtClass:
108  case Stmt::LabelStmtClass:
109  case Stmt::AttributedStmtClass:
110  case Stmt::GotoStmtClass:
111  case Stmt::BreakStmtClass:
112  case Stmt::ContinueStmtClass:
113  case Stmt::DefaultStmtClass:
114  case Stmt::CaseStmtClass:
115  case Stmt::SEHLeaveStmtClass:
116  llvm_unreachable("should have emitted these statements as simple");
117 
118 #define STMT(Type, Base)
119 #define ABSTRACT_STMT(Op)
120 #define EXPR(Type, Base) \
121  case Stmt::Type##Class:
122 #include "clang/AST/StmtNodes.inc"
123  {
124  // Remember the block we came in on.
125  llvm::BasicBlock *incoming = Builder.GetInsertBlock();
126  assert(incoming && "expression emission must have an insertion point");
127 
128  EmitIgnoredExpr(cast<Expr>(S));
129 
130  llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
131  assert(outgoing && "expression emission cleared block!");
132 
133  // The expression emitters assume (reasonably!) that the insertion
134  // point is always set. To maintain that, the call-emission code
135  // for noreturn functions has to enter a new block with no
136  // predecessors. We want to kill that block and mark the current
137  // insertion point unreachable in the common case of a call like
138  // "exit();". Since expression emission doesn't otherwise create
139  // blocks with no predecessors, we can just test for that.
140  // However, we must be careful not to do this to our incoming
141  // block, because *statement* emission does sometimes create
142  // reachable blocks which will have no predecessors until later in
143  // the function. This occurs with, e.g., labels that are not
144  // reachable by fallthrough.
145  if (incoming != outgoing && outgoing->use_empty()) {
146  outgoing->eraseFromParent();
147  Builder.ClearInsertionPoint();
148  }
149  break;
150  }
151 
152  case Stmt::IndirectGotoStmtClass:
153  EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
154 
155  case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
156  case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
157  case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
158  case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
159 
160  case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
161 
162  case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
163  case Stmt::GCCAsmStmtClass: // Intentional fall-through.
164  case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
165  case Stmt::CoroutineBodyStmtClass:
166  EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
167  break;
168  case Stmt::CoreturnStmtClass:
169  EmitCoreturnStmt(cast<CoreturnStmt>(*S));
170  break;
171  case Stmt::CapturedStmtClass: {
172  const CapturedStmt *CS = cast<CapturedStmt>(S);
174  }
175  break;
176  case Stmt::ObjCAtTryStmtClass:
177  EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
178  break;
179  case Stmt::ObjCAtCatchStmtClass:
180  llvm_unreachable(
181  "@catch statements should be handled by EmitObjCAtTryStmt");
182  case Stmt::ObjCAtFinallyStmtClass:
183  llvm_unreachable(
184  "@finally statements should be handled by EmitObjCAtTryStmt");
185  case Stmt::ObjCAtThrowStmtClass:
186  EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
187  break;
188  case Stmt::ObjCAtSynchronizedStmtClass:
189  EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
190  break;
191  case Stmt::ObjCForCollectionStmtClass:
192  EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
193  break;
194  case Stmt::ObjCAutoreleasePoolStmtClass:
195  EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
196  break;
197 
198  case Stmt::CXXTryStmtClass:
199  EmitCXXTryStmt(cast<CXXTryStmt>(*S));
200  break;
201  case Stmt::CXXForRangeStmtClass:
202  EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
203  break;
204  case Stmt::SEHTryStmtClass:
205  EmitSEHTryStmt(cast<SEHTryStmt>(*S));
206  break;
207  case Stmt::OMPMetaDirectiveClass:
208  EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
209  break;
210  case Stmt::OMPCanonicalLoopClass:
211  EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
212  break;
213  case Stmt::OMPParallelDirectiveClass:
214  EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
215  break;
216  case Stmt::OMPSimdDirectiveClass:
217  EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
218  break;
219  case Stmt::OMPTileDirectiveClass:
220  EmitOMPTileDirective(cast<OMPTileDirective>(*S));
221  break;
222  case Stmt::OMPUnrollDirectiveClass:
223  EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
224  break;
225  case Stmt::OMPForDirectiveClass:
226  EmitOMPForDirective(cast<OMPForDirective>(*S));
227  break;
228  case Stmt::OMPForSimdDirectiveClass:
229  EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
230  break;
231  case Stmt::OMPSectionsDirectiveClass:
232  EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
233  break;
234  case Stmt::OMPSectionDirectiveClass:
235  EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
236  break;
237  case Stmt::OMPSingleDirectiveClass:
238  EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
239  break;
240  case Stmt::OMPMasterDirectiveClass:
241  EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
242  break;
243  case Stmt::OMPCriticalDirectiveClass:
244  EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
245  break;
246  case Stmt::OMPParallelForDirectiveClass:
247  EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
248  break;
249  case Stmt::OMPParallelForSimdDirectiveClass:
250  EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
251  break;
252  case Stmt::OMPParallelMasterDirectiveClass:
253  EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
254  break;
255  case Stmt::OMPParallelSectionsDirectiveClass:
256  EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
257  break;
258  case Stmt::OMPTaskDirectiveClass:
259  EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
260  break;
261  case Stmt::OMPTaskyieldDirectiveClass:
262  EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
263  break;
264  case Stmt::OMPErrorDirectiveClass:
265  EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
266  break;
267  case Stmt::OMPBarrierDirectiveClass:
268  EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
269  break;
270  case Stmt::OMPTaskwaitDirectiveClass:
271  EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
272  break;
273  case Stmt::OMPTaskgroupDirectiveClass:
274  EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
275  break;
276  case Stmt::OMPFlushDirectiveClass:
277  EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
278  break;
279  case Stmt::OMPDepobjDirectiveClass:
280  EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
281  break;
282  case Stmt::OMPScanDirectiveClass:
283  EmitOMPScanDirective(cast<OMPScanDirective>(*S));
284  break;
285  case Stmt::OMPOrderedDirectiveClass:
286  EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
287  break;
288  case Stmt::OMPAtomicDirectiveClass:
289  EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
290  break;
291  case Stmt::OMPTargetDirectiveClass:
292  EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
293  break;
294  case Stmt::OMPTeamsDirectiveClass:
295  EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
296  break;
297  case Stmt::OMPCancellationPointDirectiveClass:
298  EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
299  break;
300  case Stmt::OMPCancelDirectiveClass:
301  EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
302  break;
303  case Stmt::OMPTargetDataDirectiveClass:
304  EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
305  break;
306  case Stmt::OMPTargetEnterDataDirectiveClass:
307  EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
308  break;
309  case Stmt::OMPTargetExitDataDirectiveClass:
310  EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
311  break;
312  case Stmt::OMPTargetParallelDirectiveClass:
313  EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
314  break;
315  case Stmt::OMPTargetParallelForDirectiveClass:
316  EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
317  break;
318  case Stmt::OMPTaskLoopDirectiveClass:
319  EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
320  break;
321  case Stmt::OMPTaskLoopSimdDirectiveClass:
322  EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
323  break;
324  case Stmt::OMPMasterTaskLoopDirectiveClass:
325  EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
326  break;
327  case Stmt::OMPMaskedTaskLoopDirectiveClass:
328  llvm_unreachable("masked taskloop directive not supported yet.");
329  break;
330  case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
332  cast<OMPMasterTaskLoopSimdDirective>(*S));
333  break;
334  case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
335  llvm_unreachable("masked taskloop simd directive not supported yet.");
336  break;
337  case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
339  cast<OMPParallelMasterTaskLoopDirective>(*S));
340  break;
341  case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
342  llvm_unreachable("parallel masked taskloop directive not supported yet.");
343  break;
344  case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
346  cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
347  break;
348  case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
349  llvm_unreachable(
350  "parallel masked taskloop simd directive not supported yet.");
351  break;
352  case Stmt::OMPDistributeDirectiveClass:
353  EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
354  break;
355  case Stmt::OMPTargetUpdateDirectiveClass:
356  EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
357  break;
358  case Stmt::OMPDistributeParallelForDirectiveClass:
360  cast<OMPDistributeParallelForDirective>(*S));
361  break;
362  case Stmt::OMPDistributeParallelForSimdDirectiveClass:
364  cast<OMPDistributeParallelForSimdDirective>(*S));
365  break;
366  case Stmt::OMPDistributeSimdDirectiveClass:
367  EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
368  break;
369  case Stmt::OMPTargetParallelForSimdDirectiveClass:
371  cast<OMPTargetParallelForSimdDirective>(*S));
372  break;
373  case Stmt::OMPTargetSimdDirectiveClass:
374  EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
375  break;
376  case Stmt::OMPTeamsDistributeDirectiveClass:
377  EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
378  break;
379  case Stmt::OMPTeamsDistributeSimdDirectiveClass:
381  cast<OMPTeamsDistributeSimdDirective>(*S));
382  break;
383  case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
385  cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
386  break;
387  case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
389  cast<OMPTeamsDistributeParallelForDirective>(*S));
390  break;
391  case Stmt::OMPTargetTeamsDirectiveClass:
392  EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
393  break;
394  case Stmt::OMPTargetTeamsDistributeDirectiveClass:
396  cast<OMPTargetTeamsDistributeDirective>(*S));
397  break;
398  case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
400  cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
401  break;
402  case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
404  cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
405  break;
406  case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
408  cast<OMPTargetTeamsDistributeSimdDirective>(*S));
409  break;
410  case Stmt::OMPInteropDirectiveClass:
411  EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
412  break;
413  case Stmt::OMPDispatchDirectiveClass:
414  CGM.ErrorUnsupported(S, "OpenMP dispatch directive");
415  break;
416  case Stmt::OMPScopeDirectiveClass:
417  llvm_unreachable("scope not supported with FE outlining");
418  case Stmt::OMPMaskedDirectiveClass:
419  EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
420  break;
421  case Stmt::OMPGenericLoopDirectiveClass:
422  EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
423  break;
424  case Stmt::OMPTeamsGenericLoopDirectiveClass:
425  EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
426  break;
427  case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
429  cast<OMPTargetTeamsGenericLoopDirective>(*S));
430  break;
431  case Stmt::OMPParallelGenericLoopDirectiveClass:
433  cast<OMPParallelGenericLoopDirective>(*S));
434  break;
435  case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
437  cast<OMPTargetParallelGenericLoopDirective>(*S));
438  break;
439  case Stmt::OMPParallelMaskedDirectiveClass:
440  EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
441  break;
442  case Stmt::OpenACCComputeConstructClass:
443  EmitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*S));
444  break;
445  }
446 }
447 
449  ArrayRef<const Attr *> Attrs) {
450  switch (S->getStmtClass()) {
451  default:
452  return false;
453  case Stmt::NullStmtClass:
454  break;
455  case Stmt::CompoundStmtClass:
456  EmitCompoundStmt(cast<CompoundStmt>(*S));
457  break;
458  case Stmt::DeclStmtClass:
459  EmitDeclStmt(cast<DeclStmt>(*S));
460  break;
461  case Stmt::LabelStmtClass:
462  EmitLabelStmt(cast<LabelStmt>(*S));
463  break;
464  case Stmt::AttributedStmtClass:
465  EmitAttributedStmt(cast<AttributedStmt>(*S));
466  break;
467  case Stmt::GotoStmtClass:
468  EmitGotoStmt(cast<GotoStmt>(*S));
469  break;
470  case Stmt::BreakStmtClass:
471  EmitBreakStmt(cast<BreakStmt>(*S));
472  break;
473  case Stmt::ContinueStmtClass:
474  EmitContinueStmt(cast<ContinueStmt>(*S));
475  break;
476  case Stmt::DefaultStmtClass:
477  EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
478  break;
479  case Stmt::CaseStmtClass:
480  EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
481  break;
482  case Stmt::SEHLeaveStmtClass:
483  EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
484  break;
485  }
486  return true;
487 }
488 
489 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
490 /// this captures the expression result of the last sub-statement and returns it
491 /// (for use by the statement expression extension).
493  AggValueSlot AggSlot) {
494  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
495  "LLVM IR generation of compound statement ('{}')");
496 
497  // Keep track of the current cleanup stack depth, including debug scopes.
498  LexicalScope Scope(*this, S.getSourceRange());
499 
500  return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
501 }
502 
503 Address
505  bool GetLast,
506  AggValueSlot AggSlot) {
507 
508  const Stmt *ExprResult = S.getStmtExprResult();
509  assert((!GetLast || (GetLast && ExprResult)) &&
510  "If GetLast is true then the CompoundStmt must have a StmtExprResult");
511 
512  Address RetAlloca = Address::invalid();
513 
514  for (auto *CurStmt : S.body()) {
515  if (GetLast && ExprResult == CurStmt) {
516  // We have to special case labels here. They are statements, but when put
517  // at the end of a statement expression, they yield the value of their
518  // subexpression. Handle this by walking through all labels we encounter,
519  // emitting them before we evaluate the subexpr.
520  // Similar issues arise for attributed statements.
521  while (!isa<Expr>(ExprResult)) {
522  if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
523  EmitLabel(LS->getDecl());
524  ExprResult = LS->getSubStmt();
525  } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
526  // FIXME: Update this if we ever have attributes that affect the
527  // semantics of an expression.
528  ExprResult = AS->getSubStmt();
529  } else {
530  llvm_unreachable("unknown value statement");
531  }
532  }
533 
535 
536  const Expr *E = cast<Expr>(ExprResult);
537  QualType ExprTy = E->getType();
538  if (hasAggregateEvaluationKind(ExprTy)) {
539  EmitAggExpr(E, AggSlot);
540  } else {
541  // We can't return an RValue here because there might be cleanups at
542  // the end of the StmtExpr. Because of that, we have to emit the result
543  // here into a temporary alloca.
544  RetAlloca = CreateMemTemp(ExprTy);
545  EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
546  /*IsInit*/ false);
547  }
548  } else {
549  EmitStmt(CurStmt);
550  }
551  }
552 
553  return RetAlloca;
554 }
555 
556 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
557  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
558 
559  // If there is a cleanup stack, then we it isn't worth trying to
560  // simplify this block (we would need to remove it from the scope map
561  // and cleanup entry).
562  if (!EHStack.empty())
563  return;
564 
565  // Can only simplify direct branches.
566  if (!BI || !BI->isUnconditional())
567  return;
568 
569  // Can only simplify empty blocks.
570  if (BI->getIterator() != BB->begin())
571  return;
572 
573  BB->replaceAllUsesWith(BI->getSuccessor(0));
574  BI->eraseFromParent();
575  BB->eraseFromParent();
576 }
577 
578 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
579  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
580 
581  // Fall out of the current block (if necessary).
582  EmitBranch(BB);
583 
584  if (IsFinished && BB->use_empty()) {
585  delete BB;
586  return;
587  }
588 
589  // Place the block after the current block, if possible, or else at
590  // the end of the function.
591  if (CurBB && CurBB->getParent())
592  CurFn->insert(std::next(CurBB->getIterator()), BB);
593  else
594  CurFn->insert(CurFn->end(), BB);
595  Builder.SetInsertPoint(BB);
596 }
597 
598 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
599  // Emit a branch from the current block to the target one if this
600  // was a real block. If this was just a fall-through block after a
601  // terminator, don't emit it.
602  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
603 
604  if (!CurBB || CurBB->getTerminator()) {
605  // If there is no insert point or the previous block is already
606  // terminated, don't touch it.
607  } else {
608  // Otherwise, create a fall-through branch.
609  Builder.CreateBr(Target);
610  }
611 
612  Builder.ClearInsertionPoint();
613 }
614 
615 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
616  bool inserted = false;
617  for (llvm::User *u : block->users()) {
618  if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
619  CurFn->insert(std::next(insn->getParent()->getIterator()), block);
620  inserted = true;
621  break;
622  }
623  }
624 
625  if (!inserted)
626  CurFn->insert(CurFn->end(), block);
627 
628  Builder.SetInsertPoint(block);
629 }
630 
633  JumpDest &Dest = LabelMap[D];
634  if (Dest.isValid()) return Dest;
635 
636  // Create, but don't insert, the new block.
637  Dest = JumpDest(createBasicBlock(D->getName()),
640  return Dest;
641 }
642 
644  // Add this label to the current lexical scope if we're within any
645  // normal cleanups. Jumps "in" to this label --- when permitted by
646  // the language --- may need to be routed around such cleanups.
647  if (EHStack.hasNormalCleanups() && CurLexicalScope)
648  CurLexicalScope->addLabel(D);
649 
650  JumpDest &Dest = LabelMap[D];
651 
652  // If we didn't need a forward reference to this label, just go
653  // ahead and create a destination at the current scope.
654  if (!Dest.isValid()) {
655  Dest = getJumpDestInCurrentScope(D->getName());
656 
657  // Otherwise, we need to give this label a target depth and remove
658  // it from the branch-fixups list.
659  } else {
660  assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
663  }
664 
665  EmitBlock(Dest.getBlock());
666 
667  // Emit debug info for labels.
668  if (CGDebugInfo *DI = getDebugInfo()) {
670  DI->setLocation(D->getLocation());
671  DI->EmitLabel(D, Builder);
672  }
673  }
674 
676 }
677 
678 /// Change the cleanup scope of the labels in this lexical scope to
679 /// match the scope of the enclosing context.
681  assert(!Labels.empty());
682  EHScopeStack::stable_iterator innermostScope
684 
685  // Change the scope depth of all the labels.
687  i = Labels.begin(), e = Labels.end(); i != e; ++i) {
688  assert(CGF.LabelMap.count(*i));
689  JumpDest &dest = CGF.LabelMap.find(*i)->second;
690  assert(dest.getScopeDepth().isValid());
691  assert(innermostScope.encloses(dest.getScopeDepth()));
692  dest.setScopeDepth(innermostScope);
693  }
694 
695  // Reparent the labels if the new scope also has cleanups.
696  if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
697  ParentScope->Labels.append(Labels.begin(), Labels.end());
698  }
699 }
700 
701 
703  EmitLabel(S.getDecl());
704 
705  // IsEHa - emit eha.scope.begin if it's a side entry of a scope
706  if (getLangOpts().EHAsynch && S.isSideEntry())
708 
709  EmitStmt(S.getSubStmt());
710 }
711 
713  bool nomerge = false;
714  bool noinline = false;
715  bool alwaysinline = false;
716  const CallExpr *musttail = nullptr;
717 
718  for (const auto *A : S.getAttrs()) {
719  switch (A->getKind()) {
720  default:
721  break;
722  case attr::NoMerge:
723  nomerge = true;
724  break;
725  case attr::NoInline:
726  noinline = true;
727  break;
728  case attr::AlwaysInline:
729  alwaysinline = true;
730  break;
731  case attr::MustTail: {
732  const Stmt *Sub = S.getSubStmt();
733  const ReturnStmt *R = cast<ReturnStmt>(Sub);
734  musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
735  } break;
736  case attr::CXXAssume: {
737  const Expr *Assumption = cast<CXXAssumeAttr>(A)->getAssumption();
738  if (getLangOpts().CXXAssumptions &&
739  !Assumption->HasSideEffects(getContext())) {
740  llvm::Value *AssumptionVal = EvaluateExprAsBool(Assumption);
741  Builder.CreateAssumption(AssumptionVal);
742  }
743  } break;
744  }
745  }
746  SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
747  SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
748  SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
749  SaveAndRestore save_musttail(MustTailCall, musttail);
750  EmitStmt(S.getSubStmt(), S.getAttrs());
751 }
752 
754  // If this code is reachable then emit a stop point (if generating
755  // debug info). We have to do this ourselves because we are on the
756  // "simple" statement path.
757  if (HaveInsertPoint())
758  EmitStopPoint(&S);
759 
761 }
762 
763 
765  if (const LabelDecl *Target = S.getConstantTarget()) {
767  return;
768  }
769 
770  // Ensure that we have an i8* for our PHI node.
771  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
772  Int8PtrTy, "addr");
773  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
774 
775  // Get the basic block for the indirect goto.
776  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
777 
778  // The first instruction in the block has to be the PHI for the switch dest,
779  // add an entry for this branch.
780  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
781 
782  EmitBranch(IndGotoBB);
783 }
784 
786  // The else branch of a consteval if statement is always the only branch that
787  // can be runtime evaluated.
788  if (S.isConsteval()) {
789  const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
790  if (Executed) {
791  RunCleanupsScope ExecutedScope(*this);
792  EmitStmt(Executed);
793  }
794  return;
795  }
796 
797  // C99 6.8.4.1: The first substatement is executed if the expression compares
798  // unequal to 0. The condition must be a scalar type.
799  LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
800 
801  if (S.getInit())
802  EmitStmt(S.getInit());
803 
804  if (S.getConditionVariable())
805  EmitDecl(*S.getConditionVariable());
806 
807  // If the condition constant folds and can be elided, try to avoid emitting
808  // the condition and the dead arm of the if/else.
809  bool CondConstant;
810  if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
811  S.isConstexpr())) {
812  // Figure out which block (then or else) is executed.
813  const Stmt *Executed = S.getThen();
814  const Stmt *Skipped = S.getElse();
815  if (!CondConstant) // Condition false?
816  std::swap(Executed, Skipped);
817 
818  // If the skipped block has no labels in it, just emit the executed block.
819  // This avoids emitting dead code and simplifies the CFG substantially.
820  if (S.isConstexpr() || !ContainsLabel(Skipped)) {
821  if (CondConstant)
823  if (Executed) {
824  RunCleanupsScope ExecutedScope(*this);
825  EmitStmt(Executed);
826  }
827  return;
828  }
829  }
830 
831  // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
832  // the conditional branch.
833  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
834  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
835  llvm::BasicBlock *ElseBlock = ContBlock;
836  if (S.getElse())
837  ElseBlock = createBasicBlock("if.else");
838 
839  // Prefer the PGO based weights over the likelihood attribute.
840  // When the build isn't optimized the metadata isn't used, so don't generate
841  // it.
842  // Also, differentiate between disabled PGO and a never executed branch with
843  // PGO. Assuming PGO is in use:
844  // - we want to ignore the [[likely]] attribute if the branch is never
845  // executed,
846  // - assuming the profile is poor, preserving the attribute may still be
847  // beneficial.
848  // As an approximation, preserve the attribute only if both the branch and the
849  // parent context were not executed.
851  uint64_t ThenCount = getProfileCount(S.getThen());
852  if (!ThenCount && !getCurrentProfileCount() &&
853  CGM.getCodeGenOpts().OptimizationLevel)
854  LH = Stmt::getLikelihood(S.getThen(), S.getElse());
855 
856  // When measuring MC/DC, always fully evaluate the condition up front using
857  // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
858  // executing the body of the if.then or if.else. This is useful for when
859  // there is a 'return' within the body, but this is particularly beneficial
860  // when one if-stmt is nested within another if-stmt so that all of the MC/DC
861  // updates are kept linear and consistent.
862  if (!CGM.getCodeGenOpts().MCDCCoverage)
863  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
864  else {
865  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
866  Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock);
867  }
868 
869  // Emit the 'then' code.
870  EmitBlock(ThenBlock);
872  incrementProfileCounter(S.getThen());
873  else
875  {
876  RunCleanupsScope ThenScope(*this);
877  EmitStmt(S.getThen());
878  }
879  EmitBranch(ContBlock);
880 
881  // Emit the 'else' code if present.
882  if (const Stmt *Else = S.getElse()) {
883  {
884  // There is no need to emit line number for an unconditional branch.
885  auto NL = ApplyDebugLocation::CreateEmpty(*this);
886  EmitBlock(ElseBlock);
887  }
888  // When single byte coverage mode is enabled, add a counter to else block.
891  {
892  RunCleanupsScope ElseScope(*this);
893  EmitStmt(Else);
894  }
895  {
896  // There is no need to emit line number for an unconditional branch.
897  auto NL = ApplyDebugLocation::CreateEmpty(*this);
898  EmitBranch(ContBlock);
899  }
900  }
901 
902  // Emit the continuation block for code after the if.
903  EmitBlock(ContBlock, true);
904 
905  // When single byte coverage mode is enabled, add a counter to continuation
906  // block.
909 }
910 
911 bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
912  bool HasEmptyBody) {
913  if (CGM.getCodeGenOpts().getFiniteLoops() ==
914  CodeGenOptions::FiniteLoopsKind::Never)
915  return false;
916 
917  // Now apply rules for plain C (see 6.8.5.6 in C11).
918  // Loops with constant conditions do not have to make progress in any C
919  // version.
920  // As an extension, we consisider loops whose constant expression
921  // can be constant-folded.
922  Expr::EvalResult Result;
923  bool CondIsConstInt =
924  !ControllingExpression ||
925  (ControllingExpression->EvaluateAsInt(Result, getContext()) &&
926  Result.Val.isInt());
927 
928  bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
929  Result.Val.getInt().getBoolValue());
930 
931  // Loops with non-constant conditions must make progress in C11 and later.
932  if (getLangOpts().C11 && !CondIsConstInt)
933  return true;
934 
935  // [C++26][intro.progress] (DR)
936  // The implementation may assume that any thread will eventually do one of the
937  // following:
938  // [...]
939  // - continue execution of a trivial infinite loop ([stmt.iter.general]).
940  if (CGM.getCodeGenOpts().getFiniteLoops() ==
941  CodeGenOptions::FiniteLoopsKind::Always ||
942  getLangOpts().CPlusPlus11) {
943  if (HasEmptyBody && CondIsTrue) {
944  CurFn->removeFnAttr(llvm::Attribute::MustProgress);
945  return false;
946  }
947  return true;
948  }
949  return false;
950 }
951 
952 // [C++26][stmt.iter.general] (DR)
953 // A trivially empty iteration statement is an iteration statement matching one
954 // of the following forms:
955 // - while ( expression ) ;
956 // - while ( expression ) { }
957 // - do ; while ( expression ) ;
958 // - do { } while ( expression ) ;
959 // - for ( init-statement expression(opt); ) ;
960 // - for ( init-statement expression(opt); ) { }
961 template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
962  if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
963  if (S.getInc())
964  return false;
965  }
966  const Stmt *Body = S.getBody();
967  if (!Body || isa<NullStmt>(Body))
968  return true;
969  if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body))
970  return Compound->body_empty();
971  return false;
972 }
973 
975  ArrayRef<const Attr *> WhileAttrs) {
976  // Emit the header for the loop, which will also become
977  // the continue target.
978  JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
979  EmitBlock(LoopHeader.getBlock());
980 
982  ConvergenceTokenStack.push_back(emitConvergenceLoopToken(
983  LoopHeader.getBlock(), ConvergenceTokenStack.back()));
984 
985  // Create an exit block for when the condition fails, which will
986  // also become the break target.
988 
989  // Store the blocks to use for break and continue.
990  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
991 
992  // C++ [stmt.while]p2:
993  // When the condition of a while statement is a declaration, the
994  // scope of the variable that is declared extends from its point
995  // of declaration (3.3.2) to the end of the while statement.
996  // [...]
997  // The object created in a condition is destroyed and created
998  // with each iteration of the loop.
999  RunCleanupsScope ConditionScope(*this);
1000 
1001  if (S.getConditionVariable())
1002  EmitDecl(*S.getConditionVariable());
1003 
1004  // Evaluate the conditional in the while header. C99 6.8.5.1: The
1005  // evaluation of the controlling expression takes place before each
1006  // execution of the loop body.
1007  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1008 
1009  // while(1) is common, avoid extra exit blocks. Be sure
1010  // to correctly handle break/continue though.
1011  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1012  bool EmitBoolCondBranch = !C || !C->isOne();
1013  const SourceRange &R = S.getSourceRange();
1014  LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
1015  WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
1017  checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1018 
1019  // When single byte coverage mode is enabled, add a counter to loop condition.
1021  incrementProfileCounter(S.getCond());
1022 
1023  // As long as the condition is true, go to the loop body.
1024  llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
1025  if (EmitBoolCondBranch) {
1026  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1027  if (ConditionScope.requiresCleanups())
1028  ExitBlock = createBasicBlock("while.exit");
1029  llvm::MDNode *Weights =
1030  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1031  if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1032  BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1033  BoolCondVal, Stmt::getLikelihood(S.getBody()));
1034  Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
1035 
1036  if (ExitBlock != LoopExit.getBlock()) {
1037  EmitBlock(ExitBlock);
1039  }
1040  } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
1041  CGM.getDiags().Report(A->getLocation(),
1042  diag::warn_attribute_has_no_effect_on_infinite_loop)
1043  << A << A->getRange();
1044  CGM.getDiags().Report(
1045  S.getWhileLoc(),
1046  diag::note_attribute_has_no_effect_on_infinite_loop_here)
1047  << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1048  }
1049 
1050  // Emit the loop body. We have to emit this in a cleanup scope
1051  // because it might be a singleton DeclStmt.
1052  {
1053  RunCleanupsScope BodyScope(*this);
1054  EmitBlock(LoopBody);
1055  // When single byte coverage mode is enabled, add a counter to the body.
1057  incrementProfileCounter(S.getBody());
1058  else
1060  EmitStmt(S.getBody());
1061  }
1062 
1063  BreakContinueStack.pop_back();
1064 
1065  // Immediately force cleanup.
1066  ConditionScope.ForceCleanup();
1067 
1068  EmitStopPoint(&S);
1069  // Branch to the loop header again.
1070  EmitBranch(LoopHeader.getBlock());
1071 
1072  LoopStack.pop();
1073 
1074  // Emit the exit block.
1075  EmitBlock(LoopExit.getBlock(), true);
1076 
1077  // The LoopHeader typically is just a branch if we skipped emitting
1078  // a branch, try to erase it.
1079  if (!EmitBoolCondBranch)
1080  SimplifyForwardingBlocks(LoopHeader.getBlock());
1081 
1082  // When single byte coverage mode is enabled, add a counter to continuation
1083  // block.
1086 
1088  ConvergenceTokenStack.pop_back();
1089 }
1090 
1092  ArrayRef<const Attr *> DoAttrs) {
1094  JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
1095 
1096  uint64_t ParentCount = getCurrentProfileCount();
1097 
1098  // Store the blocks to use for break and continue.
1099  BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
1100 
1101  // Emit the body of the loop.
1102  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
1103 
1105  EmitBlockWithFallThrough(LoopBody, S.getBody());
1106  else
1107  EmitBlockWithFallThrough(LoopBody, &S);
1108 
1110  ConvergenceTokenStack.push_back(
1111  emitConvergenceLoopToken(LoopBody, ConvergenceTokenStack.back()));
1112 
1113  {
1114  RunCleanupsScope BodyScope(*this);
1115  EmitStmt(S.getBody());
1116  }
1117 
1118  EmitBlock(LoopCond.getBlock());
1119  // When single byte coverage mode is enabled, add a counter to loop condition.
1121  incrementProfileCounter(S.getCond());
1122 
1123  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1124  // after each execution of the loop body."
1125 
1126  // Evaluate the conditional in the while header.
1127  // C99 6.8.5p2/p4: The first substatement is executed if the expression
1128  // compares unequal to 0. The condition must be a scalar type.
1129  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1130 
1131  BreakContinueStack.pop_back();
1132 
1133  // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1134  // to correctly handle break/continue though.
1135  llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1136  bool EmitBoolCondBranch = !C || !C->isZero();
1137 
1138  const SourceRange &R = S.getSourceRange();
1139  LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1142  checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1143 
1144  // As long as the condition is true, iterate the loop.
1145  if (EmitBoolCondBranch) {
1146  uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1147  Builder.CreateCondBr(
1148  BoolCondVal, LoopBody, LoopExit.getBlock(),
1149  createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1150  }
1151 
1152  LoopStack.pop();
1153 
1154  // Emit the exit block.
1155  EmitBlock(LoopExit.getBlock());
1156 
1157  // The DoCond block typically is just a branch if we skipped
1158  // emitting a branch, try to erase it.
1159  if (!EmitBoolCondBranch)
1160  SimplifyForwardingBlocks(LoopCond.getBlock());
1161 
1162  // When single byte coverage mode is enabled, add a counter to continuation
1163  // block.
1166 
1168  ConvergenceTokenStack.pop_back();
1169 }
1170 
1172  ArrayRef<const Attr *> ForAttrs) {
1174 
1175  LexicalScope ForScope(*this, S.getSourceRange());
1176 
1177  // Evaluate the first part before the loop.
1178  if (S.getInit())
1179  EmitStmt(S.getInit());
1180 
1181  // Start the loop with a block that tests the condition.
1182  // If there's an increment, the continue scope will be overwritten
1183  // later.
1184  JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1185  llvm::BasicBlock *CondBlock = CondDest.getBlock();
1186  EmitBlock(CondBlock);
1187 
1189  ConvergenceTokenStack.push_back(
1190  emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1191 
1192  const SourceRange &R = S.getSourceRange();
1193  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1196  checkIfLoopMustProgress(S.getCond(), hasEmptyLoopBody(S)));
1197 
1198  // Create a cleanup scope for the condition variable cleanups.
1199  LexicalScope ConditionScope(*this, S.getSourceRange());
1200 
1201  // If the for loop doesn't have an increment we can just use the condition as
1202  // the continue block. Otherwise, if there is no condition variable, we can
1203  // form the continue block now. If there is a condition variable, we can't
1204  // form the continue block until after we've emitted the condition, because
1205  // the condition is in scope in the increment, but Sema's jump diagnostics
1206  // ensure that there are no continues from the condition variable that jump
1207  // to the loop increment.
1208  JumpDest Continue;
1209  if (!S.getInc())
1210  Continue = CondDest;
1211  else if (!S.getConditionVariable())
1212  Continue = getJumpDestInCurrentScope("for.inc");
1213  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1214 
1215  if (S.getCond()) {
1216  // If the for statement has a condition scope, emit the local variable
1217  // declaration.
1218  if (S.getConditionVariable()) {
1219  EmitDecl(*S.getConditionVariable());
1220 
1221  // We have entered the condition variable's scope, so we're now able to
1222  // jump to the continue block.
1223  Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1224  BreakContinueStack.back().ContinueBlock = Continue;
1225  }
1226 
1227  // When single byte coverage mode is enabled, add a counter to loop
1228  // condition.
1230  incrementProfileCounter(S.getCond());
1231 
1232  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1233  // If there are any cleanups between here and the loop-exit scope,
1234  // create a block to stage a loop exit along.
1235  if (ForScope.requiresCleanups())
1236  ExitBlock = createBasicBlock("for.cond.cleanup");
1237 
1238  // As long as the condition is true, iterate the loop.
1239  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1240 
1241  // C99 6.8.5p2/p4: The first substatement is executed if the expression
1242  // compares unequal to 0. The condition must be a scalar type.
1243  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1244  llvm::MDNode *Weights =
1245  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1246  if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1247  BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1248  BoolCondVal, Stmt::getLikelihood(S.getBody()));
1249 
1250  Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1251 
1252  if (ExitBlock != LoopExit.getBlock()) {
1253  EmitBlock(ExitBlock);
1255  }
1256 
1257  EmitBlock(ForBody);
1258  } else {
1259  // Treat it as a non-zero constant. Don't even create a new block for the
1260  // body, just fall into it.
1261  }
1262 
1263  // When single byte coverage mode is enabled, add a counter to the body.
1265  incrementProfileCounter(S.getBody());
1266  else
1268  {
1269  // Create a separate cleanup scope for the body, in case it is not
1270  // a compound statement.
1271  RunCleanupsScope BodyScope(*this);
1272  EmitStmt(S.getBody());
1273  }
1274 
1275  // If there is an increment, emit it next.
1276  if (S.getInc()) {
1277  EmitBlock(Continue.getBlock());
1278  EmitStmt(S.getInc());
1280  incrementProfileCounter(S.getInc());
1281  }
1282 
1283  BreakContinueStack.pop_back();
1284 
1285  ConditionScope.ForceCleanup();
1286 
1287  EmitStopPoint(&S);
1288  EmitBranch(CondBlock);
1289 
1290  ForScope.ForceCleanup();
1291 
1292  LoopStack.pop();
1293 
1294  // Emit the fall-through block.
1295  EmitBlock(LoopExit.getBlock(), true);
1296 
1297  // When single byte coverage mode is enabled, add a counter to continuation
1298  // block.
1301 
1303  ConvergenceTokenStack.pop_back();
1304 }
1305 
1306 void
1308  ArrayRef<const Attr *> ForAttrs) {
1310 
1311  LexicalScope ForScope(*this, S.getSourceRange());
1312 
1313  // Evaluate the first pieces before the loop.
1314  if (S.getInit())
1315  EmitStmt(S.getInit());
1316  EmitStmt(S.getRangeStmt());
1317  EmitStmt(S.getBeginStmt());
1318  EmitStmt(S.getEndStmt());
1319 
1320  // Start the loop with a block that tests the condition.
1321  // If there's an increment, the continue scope will be overwritten
1322  // later.
1323  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1324  EmitBlock(CondBlock);
1325 
1327  ConvergenceTokenStack.push_back(
1328  emitConvergenceLoopToken(CondBlock, ConvergenceTokenStack.back()));
1329 
1330  const SourceRange &R = S.getSourceRange();
1331  LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1334 
1335  // If there are any cleanups between here and the loop-exit scope,
1336  // create a block to stage a loop exit along.
1337  llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1338  if (ForScope.requiresCleanups())
1339  ExitBlock = createBasicBlock("for.cond.cleanup");
1340 
1341  // The loop body, consisting of the specified body and the loop variable.
1342  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1343 
1344  // The body is executed if the expression, contextually converted
1345  // to bool, is true.
1346  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1347  llvm::MDNode *Weights =
1348  createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1349  if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1350  BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1351  BoolCondVal, Stmt::getLikelihood(S.getBody()));
1352  Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1353 
1354  if (ExitBlock != LoopExit.getBlock()) {
1355  EmitBlock(ExitBlock);
1357  }
1358 
1359  EmitBlock(ForBody);
1361  incrementProfileCounter(S.getBody());
1362  else
1364 
1365  // Create a block for the increment. In case of a 'continue', we jump there.
1366  JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1367 
1368  // Store the blocks to use for break and continue.
1369  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1370 
1371  {
1372  // Create a separate cleanup scope for the loop variable and body.
1373  LexicalScope BodyScope(*this, S.getSourceRange());
1374  EmitStmt(S.getLoopVarStmt());
1375  EmitStmt(S.getBody());
1376  }
1377 
1378  EmitStopPoint(&S);
1379  // If there is an increment, emit it next.
1380  EmitBlock(Continue.getBlock());
1381  EmitStmt(S.getInc());
1382 
1383  BreakContinueStack.pop_back();
1384 
1385  EmitBranch(CondBlock);
1386 
1387  ForScope.ForceCleanup();
1388 
1389  LoopStack.pop();
1390 
1391  // Emit the fall-through block.
1392  EmitBlock(LoopExit.getBlock(), true);
1393 
1394  // When single byte coverage mode is enabled, add a counter to continuation
1395  // block.
1398 
1400  ConvergenceTokenStack.pop_back();
1401 }
1402 
1403 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1404  if (RV.isScalar()) {
1406  } else if (RV.isAggregate()) {
1407  LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1408  LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1409  EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1410  } else {
1412  /*init*/ true);
1413  }
1415 }
1416 
1417 namespace {
1418 // RAII struct used to save and restore a return statment's result expression.
1419 struct SaveRetExprRAII {
1420  SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1421  : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1422  CGF.RetExpr = RetExpr;
1423  }
1424  ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1425  const Expr *OldRetExpr;
1426  CodeGenFunction &CGF;
1427 };
1428 } // namespace
1429 
1430 /// Determine if the given call uses the swiftasync calling convention.
1431 static bool isSwiftAsyncCallee(const CallExpr *CE) {
1432  auto calleeQualType = CE->getCallee()->getType();
1433  const FunctionType *calleeType = nullptr;
1434  if (calleeQualType->isFunctionPointerType() ||
1435  calleeQualType->isFunctionReferenceType() ||
1436  calleeQualType->isBlockPointerType() ||
1437  calleeQualType->isMemberFunctionPointerType()) {
1438  calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1439  } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1440  calleeType = ty;
1441  } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1442  if (auto methodDecl = CMCE->getMethodDecl()) {
1443  // getMethodDecl() doesn't handle member pointers at the moment.
1444  calleeType = methodDecl->getType()->castAs<FunctionType>();
1445  } else {
1446  return false;
1447  }
1448  } else {
1449  return false;
1450  }
1451  return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1452 }
1453 
1454 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1455 /// if the function returns void, or may be missing one if the function returns
1456 /// non-void. Fun stuff :).
1458  if (requiresReturnValueCheck()) {
1459  llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1460  auto *SLocPtr =
1461  new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1462  llvm::GlobalVariable::PrivateLinkage, SLoc);
1463  SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1465  assert(ReturnLocation.isValid() && "No valid return location");
1466  Builder.CreateStore(SLocPtr, ReturnLocation);
1467  }
1468 
1469  // Returning from an outlined SEH helper is UB, and we already warn on it.
1470  if (IsOutlinedSEHHelper) {
1471  Builder.CreateUnreachable();
1472  Builder.ClearInsertionPoint();
1473  }
1474 
1475  // Emit the result value, even if unused, to evaluate the side effects.
1476  const Expr *RV = S.getRetValue();
1477 
1478  // Record the result expression of the return statement. The recorded
1479  // expression is used to determine whether a block capture's lifetime should
1480  // end at the end of the full expression as opposed to the end of the scope
1481  // enclosing the block expression.
1482  //
1483  // This permits a small, easily-implemented exception to our over-conservative
1484  // rules about not jumping to statements following block literals with
1485  // non-trivial cleanups.
1486  SaveRetExprRAII SaveRetExpr(RV, *this);
1487 
1488  RunCleanupsScope cleanupScope(*this);
1489  if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1490  RV = EWC->getSubExpr();
1491 
1492  // If we're in a swiftasynccall function, and the return expression is a
1493  // call to a swiftasynccall function, mark the call as the musttail call.
1494  std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1495  if (RV && CurFnInfo &&
1497  if (auto CE = dyn_cast<CallExpr>(RV)) {
1498  if (isSwiftAsyncCallee(CE)) {
1499  SaveMustTail.emplace(MustTailCall, CE);
1500  }
1501  }
1502  }
1503 
1504  // FIXME: Clean this up by using an LValue for ReturnTemp,
1505  // EmitStoreThroughLValue, and EmitAnyExpr.
1506  // Check if the NRVO candidate was not globalized in OpenMP mode.
1507  if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1508  S.getNRVOCandidate()->isNRVOVariable() &&
1509  (!getLangOpts().OpenMP ||
1511  .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1512  .isValid())) {
1513  // Apply the named return value optimization for this return statement,
1514  // which means doing nothing: the appropriate result has already been
1515  // constructed into the NRVO variable.
1516 
1517  // If there is an NRVO flag for this variable, set it to 1 into indicate
1518  // that the cleanup code should not destroy the variable.
1519  if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1520  Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1521  } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1522  // Make sure not to return anything, but evaluate the expression
1523  // for side effects.
1524  if (RV) {
1525  EmitAnyExpr(RV);
1526  }
1527  } else if (!RV) {
1528  // Do nothing (return value is left uninitialized)
1529  } else if (FnRetTy->isReferenceType()) {
1530  // If this function returns a reference, take the address of the expression
1531  // rather than the value.
1532  RValue Result = EmitReferenceBindingToExpr(RV);
1533  Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1534  } else {
1535  switch (getEvaluationKind(RV->getType())) {
1536  case TEK_Scalar:
1538  break;
1539  case TEK_Complex:
1541  /*isInit*/ true);
1542  break;
1543  case TEK_Aggregate:
1550  break;
1551  }
1552  }
1553 
1554  ++NumReturnExprs;
1555  if (!RV || RV->isEvaluatable(getContext()))
1556  ++NumSimpleReturnExprs;
1557 
1558  cleanupScope.ForceCleanup();
1560 }
1561 
1563  // As long as debug info is modeled with instructions, we have to ensure we
1564  // have a place to insert here and write the stop point here.
1565  if (HaveInsertPoint())
1566  EmitStopPoint(&S);
1567 
1568  for (const auto *I : S.decls())
1569  EmitDecl(*I);
1570 }
1571 
1573  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1574 
1575  // If this code is reachable then emit a stop point (if generating
1576  // debug info). We have to do this ourselves because we are on the
1577  // "simple" statement path.
1578  if (HaveInsertPoint())
1579  EmitStopPoint(&S);
1580 
1581  EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1582 }
1583 
1585  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1586 
1587  // If this code is reachable then emit a stop point (if generating
1588  // debug info). We have to do this ourselves because we are on the
1589  // "simple" statement path.
1590  if (HaveInsertPoint())
1591  EmitStopPoint(&S);
1592 
1593  EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1594 }
1595 
1596 /// EmitCaseStmtRange - If case statement range is not too big then
1597 /// add multiple cases to switch instruction, one for each value within
1598 /// the range. If range is too big then emit "if" condition check.
1600  ArrayRef<const Attr *> Attrs) {
1601  assert(S.getRHS() && "Expected RHS value in CaseStmt");
1602 
1603  llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1604  llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1605 
1606  // Emit the code for this case. We do this first to make sure it is
1607  // properly chained from our predecessor before generating the
1608  // switch machinery to enter this block.
1609  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1610  EmitBlockWithFallThrough(CaseDest, &S);
1611  EmitStmt(S.getSubStmt());
1612 
1613  // If range is empty, do nothing.
1614  if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1615  return;
1616 
1618  llvm::APInt Range = RHS - LHS;
1619  // FIXME: parameters such as this should not be hardcoded.
1620  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1621  // Range is small enough to add multiple switch instruction cases.
1622  uint64_t Total = getProfileCount(&S);
1623  unsigned NCases = Range.getZExtValue() + 1;
1624  // We only have one region counter for the entire set of cases here, so we
1625  // need to divide the weights evenly between the generated cases, ensuring
1626  // that the total weight is preserved. E.g., a weight of 5 over three cases
1627  // will be distributed as weights of 2, 2, and 1.
1628  uint64_t Weight = Total / NCases, Rem = Total % NCases;
1629  for (unsigned I = 0; I != NCases; ++I) {
1630  if (SwitchWeights)
1631  SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1632  else if (SwitchLikelihood)
1633  SwitchLikelihood->push_back(LH);
1634 
1635  if (Rem)
1636  Rem--;
1637  SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1638  ++LHS;
1639  }
1640  return;
1641  }
1642 
1643  // The range is too big. Emit "if" condition into a new block,
1644  // making sure to save and restore the current insertion point.
1645  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1646 
1647  // Push this test onto the chain of range checks (which terminates
1648  // in the default basic block). The switch's default will be changed
1649  // to the top of this chain after switch emission is complete.
1650  llvm::BasicBlock *FalseDest = CaseRangeBlock;
1651  CaseRangeBlock = createBasicBlock("sw.caserange");
1652 
1653  CurFn->insert(CurFn->end(), CaseRangeBlock);
1654  Builder.SetInsertPoint(CaseRangeBlock);
1655 
1656  // Emit range check.
1657  llvm::Value *Diff =
1658  Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1659  llvm::Value *Cond =
1660  Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1661 
1662  llvm::MDNode *Weights = nullptr;
1663  if (SwitchWeights) {
1664  uint64_t ThisCount = getProfileCount(&S);
1665  uint64_t DefaultCount = (*SwitchWeights)[0];
1666  Weights = createProfileWeights(ThisCount, DefaultCount);
1667 
1668  // Since we're chaining the switch default through each large case range, we
1669  // need to update the weight for the default, ie, the first case, to include
1670  // this case.
1671  (*SwitchWeights)[0] += ThisCount;
1672  } else if (SwitchLikelihood)
1673  Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1674 
1675  Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1676 
1677  // Restore the appropriate insertion point.
1678  if (RestoreBB)
1679  Builder.SetInsertPoint(RestoreBB);
1680  else
1681  Builder.ClearInsertionPoint();
1682 }
1683 
1685  ArrayRef<const Attr *> Attrs) {
1686  // If there is no enclosing switch instance that we're aware of, then this
1687  // case statement and its block can be elided. This situation only happens
1688  // when we've constant-folded the switch, are emitting the constant case,
1689  // and part of the constant case includes another case statement. For
1690  // instance: switch (4) { case 4: do { case 5: } while (1); }
1691  if (!SwitchInsn) {
1692  EmitStmt(S.getSubStmt());
1693  return;
1694  }
1695 
1696  // Handle case ranges.
1697  if (S.getRHS()) {
1698  EmitCaseStmtRange(S, Attrs);
1699  return;
1700  }
1701 
1702  llvm::ConstantInt *CaseVal =
1703  Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1704 
1705  // Emit debuginfo for the case value if it is an enum value.
1706  const ConstantExpr *CE;
1707  if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1708  CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1709  else
1710  CE = dyn_cast<ConstantExpr>(S.getLHS());
1711  if (CE) {
1712  if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1713  if (CGDebugInfo *Dbg = getDebugInfo())
1715  Dbg->EmitGlobalVariable(DE->getDecl(),
1716  APValue(llvm::APSInt(CaseVal->getValue())));
1717  }
1718 
1719  if (SwitchLikelihood)
1720  SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1721 
1722  // If the body of the case is just a 'break', try to not emit an empty block.
1723  // If we're profiling or we're not optimizing, leave the block in for better
1724  // debug and coverage analysis.
1726  CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1727  isa<BreakStmt>(S.getSubStmt())) {
1728  JumpDest Block = BreakContinueStack.back().BreakBlock;
1729 
1730  // Only do this optimization if there are no cleanups that need emitting.
1732  if (SwitchWeights)
1733  SwitchWeights->push_back(getProfileCount(&S));
1734  SwitchInsn->addCase(CaseVal, Block.getBlock());
1735 
1736  // If there was a fallthrough into this case, make sure to redirect it to
1737  // the end of the switch as well.
1738  if (Builder.GetInsertBlock()) {
1739  Builder.CreateBr(Block.getBlock());
1740  Builder.ClearInsertionPoint();
1741  }
1742  return;
1743  }
1744  }
1745 
1746  llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1747  EmitBlockWithFallThrough(CaseDest, &S);
1748  if (SwitchWeights)
1749  SwitchWeights->push_back(getProfileCount(&S));
1750  SwitchInsn->addCase(CaseVal, CaseDest);
1751 
1752  // Recursively emitting the statement is acceptable, but is not wonderful for
1753  // code where we have many case statements nested together, i.e.:
1754  // case 1:
1755  // case 2:
1756  // case 3: etc.
1757  // Handling this recursively will create a new block for each case statement
1758  // that falls through to the next case which is IR intensive. It also causes
1759  // deep recursion which can run into stack depth limitations. Handle
1760  // sequential non-range case statements specially.
1761  //
1762  // TODO When the next case has a likelihood attribute the code returns to the
1763  // recursive algorithm. Maybe improve this case if it becomes common practice
1764  // to use a lot of attributes.
1765  const CaseStmt *CurCase = &S;
1766  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1767 
1768  // Otherwise, iteratively add consecutive cases to this switch stmt.
1769  while (NextCase && NextCase->getRHS() == nullptr) {
1770  CurCase = NextCase;
1771  llvm::ConstantInt *CaseVal =
1772  Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1773 
1774  if (SwitchWeights)
1775  SwitchWeights->push_back(getProfileCount(NextCase));
1777  CaseDest = createBasicBlock("sw.bb");
1778  EmitBlockWithFallThrough(CaseDest, CurCase);
1779  }
1780  // Since this loop is only executed when the CaseStmt has no attributes
1781  // use a hard-coded value.
1782  if (SwitchLikelihood)
1783  SwitchLikelihood->push_back(Stmt::LH_None);
1784 
1785  SwitchInsn->addCase(CaseVal, CaseDest);
1786  NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1787  }
1788 
1789  // Generate a stop point for debug info if the case statement is
1790  // followed by a default statement. A fallthrough case before a
1791  // default case gets its own branch target.
1792  if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1793  EmitStopPoint(CurCase);
1794 
1795  // Normal default recursion for non-cases.
1796  EmitStmt(CurCase->getSubStmt());
1797 }
1798 
1800  ArrayRef<const Attr *> Attrs) {
1801  // If there is no enclosing switch instance that we're aware of, then this
1802  // default statement can be elided. This situation only happens when we've
1803  // constant-folded the switch.
1804  if (!SwitchInsn) {
1805  EmitStmt(S.getSubStmt());
1806  return;
1807  }
1808 
1809  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1810  assert(DefaultBlock->empty() &&
1811  "EmitDefaultStmt: Default block already defined?");
1812 
1813  if (SwitchLikelihood)
1814  SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1815 
1816  EmitBlockWithFallThrough(DefaultBlock, &S);
1817 
1818  EmitStmt(S.getSubStmt());
1819 }
1820 
1821 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1822 /// constant value that is being switched on, see if we can dead code eliminate
1823 /// the body of the switch to a simple series of statements to emit. Basically,
1824 /// on a switch (5) we want to find these statements:
1825 /// case 5:
1826 /// printf(...); <--
1827 /// ++i; <--
1828 /// break;
1829 ///
1830 /// and add them to the ResultStmts vector. If it is unsafe to do this
1831 /// transformation (for example, one of the elided statements contains a label
1832 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1833 /// should include statements after it (e.g. the printf() line is a substmt of
1834 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1835 /// statement, then return CSFC_Success.
1836 ///
1837 /// If Case is non-null, then we are looking for the specified case, checking
1838 /// that nothing we jump over contains labels. If Case is null, then we found
1839 /// the case and are looking for the break.
1840 ///
1841 /// If the recursive walk actually finds our Case, then we set FoundCase to
1842 /// true.
1843 ///
1846  const SwitchCase *Case,
1847  bool &FoundCase,
1848  SmallVectorImpl<const Stmt*> &ResultStmts) {
1849  // If this is a null statement, just succeed.
1850  if (!S)
1851  return Case ? CSFC_Success : CSFC_FallThrough;
1852 
1853  // If this is the switchcase (case 4: or default) that we're looking for, then
1854  // we're in business. Just add the substatement.
1855  if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1856  if (S == Case) {
1857  FoundCase = true;
1858  return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1859  ResultStmts);
1860  }
1861 
1862  // Otherwise, this is some other case or default statement, just ignore it.
1863  return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1864  ResultStmts);
1865  }
1866 
1867  // If we are in the live part of the code and we found our break statement,
1868  // return a success!
1869  if (!Case && isa<BreakStmt>(S))
1870  return CSFC_Success;
1871 
1872  // If this is a switch statement, then it might contain the SwitchCase, the
1873  // break, or neither.
1874  if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1875  // Handle this as two cases: we might be looking for the SwitchCase (if so
1876  // the skipped statements must be skippable) or we might already have it.
1877  CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1878  bool StartedInLiveCode = FoundCase;
1879  unsigned StartSize = ResultStmts.size();
1880 
1881  // If we've not found the case yet, scan through looking for it.
1882  if (Case) {
1883  // Keep track of whether we see a skipped declaration. The code could be
1884  // using the declaration even if it is skipped, so we can't optimize out
1885  // the decl if the kept statements might refer to it.
1886  bool HadSkippedDecl = false;
1887 
1888  // If we're looking for the case, just see if we can skip each of the
1889  // substatements.
1890  for (; Case && I != E; ++I) {
1891  HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1892 
1893  switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1894  case CSFC_Failure: return CSFC_Failure;
1895  case CSFC_Success:
1896  // A successful result means that either 1) that the statement doesn't
1897  // have the case and is skippable, or 2) does contain the case value
1898  // and also contains the break to exit the switch. In the later case,
1899  // we just verify the rest of the statements are elidable.
1900  if (FoundCase) {
1901  // If we found the case and skipped declarations, we can't do the
1902  // optimization.
1903  if (HadSkippedDecl)
1904  return CSFC_Failure;
1905 
1906  for (++I; I != E; ++I)
1907  if (CodeGenFunction::ContainsLabel(*I, true))
1908  return CSFC_Failure;
1909  return CSFC_Success;
1910  }
1911  break;
1912  case CSFC_FallThrough:
1913  // If we have a fallthrough condition, then we must have found the
1914  // case started to include statements. Consider the rest of the
1915  // statements in the compound statement as candidates for inclusion.
1916  assert(FoundCase && "Didn't find case but returned fallthrough?");
1917  // We recursively found Case, so we're not looking for it anymore.
1918  Case = nullptr;
1919 
1920  // If we found the case and skipped declarations, we can't do the
1921  // optimization.
1922  if (HadSkippedDecl)
1923  return CSFC_Failure;
1924  break;
1925  }
1926  }
1927 
1928  if (!FoundCase)
1929  return CSFC_Success;
1930 
1931  assert(!HadSkippedDecl && "fallthrough after skipping decl");
1932  }
1933 
1934  // If we have statements in our range, then we know that the statements are
1935  // live and need to be added to the set of statements we're tracking.
1936  bool AnyDecls = false;
1937  for (; I != E; ++I) {
1938  AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1939 
1940  switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1941  case CSFC_Failure: return CSFC_Failure;
1942  case CSFC_FallThrough:
1943  // A fallthrough result means that the statement was simple and just
1944  // included in ResultStmt, keep adding them afterwards.
1945  break;
1946  case CSFC_Success:
1947  // A successful result means that we found the break statement and
1948  // stopped statement inclusion. We just ensure that any leftover stmts
1949  // are skippable and return success ourselves.
1950  for (++I; I != E; ++I)
1951  if (CodeGenFunction::ContainsLabel(*I, true))
1952  return CSFC_Failure;
1953  return CSFC_Success;
1954  }
1955  }
1956 
1957  // If we're about to fall out of a scope without hitting a 'break;', we
1958  // can't perform the optimization if there were any decls in that scope
1959  // (we'd lose their end-of-lifetime).
1960  if (AnyDecls) {
1961  // If the entire compound statement was live, there's one more thing we
1962  // can try before giving up: emit the whole thing as a single statement.
1963  // We can do that unless the statement contains a 'break;'.
1964  // FIXME: Such a break must be at the end of a construct within this one.
1965  // We could emit this by just ignoring the BreakStmts entirely.
1966  if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1967  ResultStmts.resize(StartSize);
1968  ResultStmts.push_back(S);
1969  } else {
1970  return CSFC_Failure;
1971  }
1972  }
1973 
1974  return CSFC_FallThrough;
1975  }
1976 
1977  // Okay, this is some other statement that we don't handle explicitly, like a
1978  // for statement or increment etc. If we are skipping over this statement,
1979  // just verify it doesn't have labels, which would make it invalid to elide.
1980  if (Case) {
1981  if (CodeGenFunction::ContainsLabel(S, true))
1982  return CSFC_Failure;
1983  return CSFC_Success;
1984  }
1985 
1986  // Otherwise, we want to include this statement. Everything is cool with that
1987  // so long as it doesn't contain a break out of the switch we're in.
1989 
1990  // Otherwise, everything is great. Include the statement and tell the caller
1991  // that we fall through and include the next statement as well.
1992  ResultStmts.push_back(S);
1993  return CSFC_FallThrough;
1994 }
1995 
1996 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1997 /// then invoke CollectStatementsForCase to find the list of statements to emit
1998 /// for a switch on constant. See the comment above CollectStatementsForCase
1999 /// for more details.
2001  const llvm::APSInt &ConstantCondValue,
2002  SmallVectorImpl<const Stmt*> &ResultStmts,
2003  ASTContext &C,
2004  const SwitchCase *&ResultCase) {
2005  // First step, find the switch case that is being branched to. We can do this
2006  // efficiently by scanning the SwitchCase list.
2007  const SwitchCase *Case = S.getSwitchCaseList();
2008  const DefaultStmt *DefaultCase = nullptr;
2009 
2010  for (; Case; Case = Case->getNextSwitchCase()) {
2011  // It's either a default or case. Just remember the default statement in
2012  // case we're not jumping to any numbered cases.
2013  if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
2014  DefaultCase = DS;
2015  continue;
2016  }
2017 
2018  // Check to see if this case is the one we're looking for.
2019  const CaseStmt *CS = cast<CaseStmt>(Case);
2020  // Don't handle case ranges yet.
2021  if (CS->getRHS()) return false;
2022 
2023  // If we found our case, remember it as 'case'.
2024  if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
2025  break;
2026  }
2027 
2028  // If we didn't find a matching case, we use a default if it exists, or we
2029  // elide the whole switch body!
2030  if (!Case) {
2031  // It is safe to elide the body of the switch if it doesn't contain labels
2032  // etc. If it is safe, return successfully with an empty ResultStmts list.
2033  if (!DefaultCase)
2034  return !CodeGenFunction::ContainsLabel(&S);
2035  Case = DefaultCase;
2036  }
2037 
2038  // Ok, we know which case is being jumped to, try to collect all the
2039  // statements that follow it. This can fail for a variety of reasons. Also,
2040  // check to see that the recursive walk actually found our case statement.
2041  // Insane cases like this can fail to find it in the recursive walk since we
2042  // don't handle every stmt kind:
2043  // switch (4) {
2044  // while (1) {
2045  // case 4: ...
2046  bool FoundCase = false;
2047  ResultCase = Case;
2048  return CollectStatementsForCase(S.getBody(), Case, FoundCase,
2049  ResultStmts) != CSFC_Failure &&
2050  FoundCase;
2051 }
2052 
2053 static std::optional<SmallVector<uint64_t, 16>>
2055  // Are there enough branches to weight them?
2056  if (Likelihoods.size() <= 1)
2057  return std::nullopt;
2058 
2059  uint64_t NumUnlikely = 0;
2060  uint64_t NumNone = 0;
2061  uint64_t NumLikely = 0;
2062  for (const auto LH : Likelihoods) {
2063  switch (LH) {
2064  case Stmt::LH_Unlikely:
2065  ++NumUnlikely;
2066  break;
2067  case Stmt::LH_None:
2068  ++NumNone;
2069  break;
2070  case Stmt::LH_Likely:
2071  ++NumLikely;
2072  break;
2073  }
2074  }
2075 
2076  // Is there a likelihood attribute used?
2077  if (NumUnlikely == 0 && NumLikely == 0)
2078  return std::nullopt;
2079 
2080  // When multiple cases share the same code they can be combined during
2081  // optimization. In that case the weights of the branch will be the sum of
2082  // the individual weights. Make sure the combined sum of all neutral cases
2083  // doesn't exceed the value of a single likely attribute.
2084  // The additions both avoid divisions by 0 and make sure the weights of None
2085  // don't exceed the weight of Likely.
2086  const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2087  const uint64_t None = Likely / (NumNone + 1);
2088  const uint64_t Unlikely = 0;
2089 
2091  Result.reserve(Likelihoods.size());
2092  for (const auto LH : Likelihoods) {
2093  switch (LH) {
2094  case Stmt::LH_Unlikely:
2095  Result.push_back(Unlikely);
2096  break;
2097  case Stmt::LH_None:
2098  Result.push_back(None);
2099  break;
2100  case Stmt::LH_Likely:
2101  Result.push_back(Likely);
2102  break;
2103  }
2104  }
2105 
2106  return Result;
2107 }
2108 
2110  // Handle nested switch statements.
2111  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2112  SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2113  SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2114  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2115 
2116  // See if we can constant fold the condition of the switch and therefore only
2117  // emit the live case statement (if any) of the switch.
2118  llvm::APSInt ConstantCondValue;
2119  if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
2120  SmallVector<const Stmt*, 4> CaseStmts;
2121  const SwitchCase *Case = nullptr;
2122  if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
2123  getContext(), Case)) {
2124  if (Case)
2126  RunCleanupsScope ExecutedScope(*this);
2127 
2128  if (S.getInit())
2129  EmitStmt(S.getInit());
2130 
2131  // Emit the condition variable if needed inside the entire cleanup scope
2132  // used by this special case for constant folded switches.
2133  if (S.getConditionVariable())
2134  EmitDecl(*S.getConditionVariable());
2135 
2136  // At this point, we are no longer "within" a switch instance, so
2137  // we can temporarily enforce this to ensure that any embedded case
2138  // statements are not emitted.
2139  SwitchInsn = nullptr;
2140 
2141  // Okay, we can dead code eliminate everything except this case. Emit the
2142  // specified series of statements and we're good.
2143  for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
2144  EmitStmt(CaseStmts[i]);
2146 
2147  // Now we want to restore the saved switch instance so that nested
2148  // switches continue to function properly
2149  SwitchInsn = SavedSwitchInsn;
2150 
2151  return;
2152  }
2153  }
2154 
2155  JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
2156 
2157  RunCleanupsScope ConditionScope(*this);
2158 
2159  if (S.getInit())
2160  EmitStmt(S.getInit());
2161 
2162  if (S.getConditionVariable())
2163  EmitDecl(*S.getConditionVariable());
2164  llvm::Value *CondV = EmitScalarExpr(S.getCond());
2165 
2166  // Create basic block to hold stuff that comes after switch
2167  // statement. We also need to create a default block now so that
2168  // explicit case ranges tests can have a place to jump to on
2169  // failure.
2170  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
2171  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
2172  if (PGO.haveRegionCounts()) {
2173  // Walk the SwitchCase list to find how many there are.
2174  uint64_t DefaultCount = 0;
2175  unsigned NumCases = 0;
2176  for (const SwitchCase *Case = S.getSwitchCaseList();
2177  Case;
2178  Case = Case->getNextSwitchCase()) {
2179  if (isa<DefaultStmt>(Case))
2180  DefaultCount = getProfileCount(Case);
2181  NumCases += 1;
2182  }
2183  SwitchWeights = new SmallVector<uint64_t, 16>();
2184  SwitchWeights->reserve(NumCases);
2185  // The default needs to be first. We store the edge count, so we already
2186  // know the right weight.
2187  SwitchWeights->push_back(DefaultCount);
2188  } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2189  SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2190  // Initialize the default case.
2191  SwitchLikelihood->push_back(Stmt::LH_None);
2192  }
2193 
2194  CaseRangeBlock = DefaultBlock;
2195 
2196  // Clear the insertion point to indicate we are in unreachable code.
2197  Builder.ClearInsertionPoint();
2198 
2199  // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2200  // then reuse last ContinueBlock.
2201  JumpDest OuterContinue;
2202  if (!BreakContinueStack.empty())
2203  OuterContinue = BreakContinueStack.back().ContinueBlock;
2204 
2205  BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2206 
2207  // Emit switch body.
2208  EmitStmt(S.getBody());
2209 
2210  BreakContinueStack.pop_back();
2211 
2212  // Update the default block in case explicit case range tests have
2213  // been chained on top.
2214  SwitchInsn->setDefaultDest(CaseRangeBlock);
2215 
2216  // If a default was never emitted:
2217  if (!DefaultBlock->getParent()) {
2218  // If we have cleanups, emit the default block so that there's a
2219  // place to jump through the cleanups from.
2220  if (ConditionScope.requiresCleanups()) {
2221  EmitBlock(DefaultBlock);
2222 
2223  // Otherwise, just forward the default block to the switch end.
2224  } else {
2225  DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2226  delete DefaultBlock;
2227  }
2228  }
2229 
2230  ConditionScope.ForceCleanup();
2231 
2232  // Emit continuation.
2233  EmitBlock(SwitchExit.getBlock(), true);
2235 
2236  // If the switch has a condition wrapped by __builtin_unpredictable,
2237  // create metadata that specifies that the switch is unpredictable.
2238  // Don't bother if not optimizing because that metadata would not be used.
2239  auto *Call = dyn_cast<CallExpr>(S.getCond());
2240  if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2241  auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2242  if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2243  llvm::MDBuilder MDHelper(getLLVMContext());
2244  SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2245  MDHelper.createUnpredictable());
2246  }
2247  }
2248 
2249  if (SwitchWeights) {
2250  assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2251  "switch weights do not match switch cases");
2252  // If there's only one jump destination there's no sense weighting it.
2253  if (SwitchWeights->size() > 1)
2254  SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2255  createProfileWeights(*SwitchWeights));
2256  delete SwitchWeights;
2257  } else if (SwitchLikelihood) {
2258  assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2259  "switch likelihoods do not match switch cases");
2260  std::optional<SmallVector<uint64_t, 16>> LHW =
2261  getLikelihoodWeights(*SwitchLikelihood);
2262  if (LHW) {
2263  llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2264  SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2265  createProfileWeights(*LHW));
2266  }
2267  delete SwitchLikelihood;
2268  }
2269  SwitchInsn = SavedSwitchInsn;
2270  SwitchWeights = SavedSwitchWeights;
2271  SwitchLikelihood = SavedSwitchLikelihood;
2272  CaseRangeBlock = SavedCRBlock;
2273 }
2274 
2275 static std::string
2276 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2278  std::string Result;
2279 
2280  while (*Constraint) {
2281  switch (*Constraint) {
2282  default:
2283  Result += Target.convertConstraint(Constraint);
2284  break;
2285  // Ignore these
2286  case '*':
2287  case '?':
2288  case '!':
2289  case '=': // Will see this and the following in mult-alt constraints.
2290  case '+':
2291  break;
2292  case '#': // Ignore the rest of the constraint alternative.
2293  while (Constraint[1] && Constraint[1] != ',')
2294  Constraint++;
2295  break;
2296  case '&':
2297  case '%':
2298  Result += *Constraint;
2299  while (Constraint[1] && Constraint[1] == *Constraint)
2300  Constraint++;
2301  break;
2302  case ',':
2303  Result += "|";
2304  break;
2305  case 'g':
2306  Result += "imr";
2307  break;
2308  case '[': {
2309  assert(OutCons &&
2310  "Must pass output names to constraints with a symbolic name");
2311  unsigned Index;
2312  bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2313  assert(result && "Could not resolve symbolic name"); (void)result;
2314  Result += llvm::utostr(Index);
2315  break;
2316  }
2317  }
2318 
2319  Constraint++;
2320  }
2321 
2322  return Result;
2323 }
2324 
2325 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2326 /// as using a particular register add that as a constraint that will be used
2327 /// in this asm stmt.
2328 static std::string
2329 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2331  const AsmStmt &Stmt, const bool EarlyClobber,
2332  std::string *GCCReg = nullptr) {
2333  const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2334  if (!AsmDeclRef)
2335  return Constraint;
2336  const ValueDecl &Value = *AsmDeclRef->getDecl();
2337  const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2338  if (!Variable)
2339  return Constraint;
2340  if (Variable->getStorageClass() != SC_Register)
2341  return Constraint;
2342  AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2343  if (!Attr)
2344  return Constraint;
2345  StringRef Register = Attr->getLabel();
2346  assert(Target.isValidGCCRegisterName(Register));
2347  // We're using validateOutputConstraint here because we only care if
2348  // this is a register constraint.
2349  TargetInfo::ConstraintInfo Info(Constraint, "");
2350  if (Target.validateOutputConstraint(Info) &&
2351  !Info.allowsRegister()) {
2352  CGM.ErrorUnsupported(&Stmt, "__asm__");
2353  return Constraint;
2354  }
2355  // Canonicalize the register here before returning it.
2356  Register = Target.getNormalizedGCCRegisterName(Register);
2357  if (GCCReg != nullptr)
2358  *GCCReg = Register.str();
2359  return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2360 }
2361 
2362 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2363  const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2364  QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2365  if (Info.allowsRegister() || !Info.allowsMemory()) {
2367  return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2368 
2369  llvm::Type *Ty = ConvertType(InputType);
2370  uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2371  if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2372  getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2373  Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2374 
2375  return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
2376  nullptr};
2377  }
2378  }
2379 
2380  Address Addr = InputValue.getAddress();
2381  ConstraintStr += '*';
2382  return {InputValue.getPointer(*this), Addr.getElementType()};
2383 }
2384 
2385 std::pair<llvm::Value *, llvm::Type *>
2386 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2387  const Expr *InputExpr,
2388  std::string &ConstraintStr) {
2389  // If this can't be a register or memory, i.e., has to be a constant
2390  // (immediate or symbolic), try to emit it as such.
2391  if (!Info.allowsRegister() && !Info.allowsMemory()) {
2392  if (Info.requiresImmediateConstant()) {
2393  Expr::EvalResult EVResult;
2394  InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2395 
2396  llvm::APSInt IntResult;
2397  if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2398  getContext()))
2399  return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2400  }
2401 
2402  Expr::EvalResult Result;
2403  if (InputExpr->EvaluateAsInt(Result, getContext()))
2404  return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2405  nullptr};
2406  }
2407 
2408  if (Info.allowsRegister() || !Info.allowsMemory())
2410  return {EmitScalarExpr(InputExpr), nullptr};
2411  if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2412  return {EmitScalarExpr(InputExpr), nullptr};
2413  InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2414  LValue Dest = EmitLValue(InputExpr);
2415  return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2416  InputExpr->getExprLoc());
2417 }
2418 
2419 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2420 /// asm call instruction. The !srcloc MDNode contains a list of constant
2421 /// integers which are the source locations of the start of each line in the
2422 /// asm.
2423 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2424  CodeGenFunction &CGF) {
2426  // Add the location of the first line to the MDNode.
2427  Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2428  CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2429  StringRef StrVal = Str->getString();
2430  if (!StrVal.empty()) {
2431  const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2432  const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2433  unsigned StartToken = 0;
2434  unsigned ByteOffset = 0;
2435 
2436  // Add the location of the start of each subsequent line of the asm to the
2437  // MDNode.
2438  for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2439  if (StrVal[i] != '\n') continue;
2440  SourceLocation LineLoc = Str->getLocationOfByte(
2441  i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2442  Locs.push_back(llvm::ConstantAsMetadata::get(
2443  llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2444  }
2445  }
2446 
2447  return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2448 }
2449 
2450 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2451  bool HasUnwindClobber, bool ReadOnly,
2452  bool ReadNone, bool NoMerge, const AsmStmt &S,
2453  const std::vector<llvm::Type *> &ResultRegTypes,
2454  const std::vector<llvm::Type *> &ArgElemTypes,
2455  CodeGenFunction &CGF,
2456  std::vector<llvm::Value *> &RegResults) {
2457  if (!HasUnwindClobber)
2458  Result.addFnAttr(llvm::Attribute::NoUnwind);
2459 
2460  if (NoMerge)
2461  Result.addFnAttr(llvm::Attribute::NoMerge);
2462  // Attach readnone and readonly attributes.
2463  if (!HasSideEffect) {
2464  if (ReadNone)
2465  Result.setDoesNotAccessMemory();
2466  else if (ReadOnly)
2467  Result.setOnlyReadsMemory();
2468  }
2469 
2470  // Add elementtype attribute for indirect constraints.
2471  for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2472  if (Pair.value()) {
2473  auto Attr = llvm::Attribute::get(
2474  CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2475  Result.addParamAttr(Pair.index(), Attr);
2476  }
2477  }
2478 
2479  // Slap the source location of the inline asm into a !srcloc metadata on the
2480  // call.
2481  if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2482  Result.setMetadata("srcloc",
2483  getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2484  else {
2485  // At least put the line number on MS inline asm blobs.
2486  llvm::Constant *Loc =
2487  llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2488  Result.setMetadata("srcloc",
2489  llvm::MDNode::get(CGF.getLLVMContext(),
2490  llvm::ConstantAsMetadata::get(Loc)));
2491  }
2492 
2494  // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2495  // convergent (meaning, they may call an intrinsically convergent op, such
2496  // as bar.sync, and so can't have certain optimizations applied around
2497  // them).
2498  Result.addFnAttr(llvm::Attribute::Convergent);
2499  // Extract all of the register value results from the asm.
2500  if (ResultRegTypes.size() == 1) {
2501  RegResults.push_back(&Result);
2502  } else {
2503  for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2504  llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2505  RegResults.push_back(Tmp);
2506  }
2507  }
2508 }
2509 
2510 static void
2512  const llvm::ArrayRef<llvm::Value *> RegResults,
2513  const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2514  const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2515  const llvm::ArrayRef<LValue> ResultRegDests,
2516  const llvm::ArrayRef<QualType> ResultRegQualTys,
2517  const llvm::BitVector &ResultTypeRequiresCast,
2518  const llvm::BitVector &ResultRegIsFlagReg) {
2519  CGBuilderTy &Builder = CGF.Builder;
2520  CodeGenModule &CGM = CGF.CGM;
2521  llvm::LLVMContext &CTX = CGF.getLLVMContext();
2522 
2523  assert(RegResults.size() == ResultRegTypes.size());
2524  assert(RegResults.size() == ResultTruncRegTypes.size());
2525  assert(RegResults.size() == ResultRegDests.size());
2526  // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2527  // in which case its size may grow.
2528  assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2529  assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2530 
2531  for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2532  llvm::Value *Tmp = RegResults[i];
2533  llvm::Type *TruncTy = ResultTruncRegTypes[i];
2534 
2535  if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2536  // Target must guarantee the Value `Tmp` here is lowered to a boolean
2537  // value.
2538  llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2539  llvm::Value *IsBooleanValue =
2540  Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2541  llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2542  Builder.CreateCall(FnAssume, IsBooleanValue);
2543  }
2544 
2545  // If the result type of the LLVM IR asm doesn't match the result type of
2546  // the expression, do the conversion.
2547  if (ResultRegTypes[i] != TruncTy) {
2548 
2549  // Truncate the integer result to the right size, note that TruncTy can be
2550  // a pointer.
2551  if (TruncTy->isFloatingPointTy())
2552  Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2553  else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2554  uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2555  Tmp = Builder.CreateTrunc(
2556  Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2557  Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2558  } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2559  uint64_t TmpSize =
2560  CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2561  Tmp = Builder.CreatePtrToInt(
2562  Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2563  Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2564  } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2565  Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2566  } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2567  Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2568  }
2569  }
2570 
2571  LValue Dest = ResultRegDests[i];
2572  // ResultTypeRequiresCast elements correspond to the first
2573  // ResultTypeRequiresCast.size() elements of RegResults.
2574  if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2575  unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2576  Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
2577  if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2578  Builder.CreateStore(Tmp, A);
2579  continue;
2580  }
2581 
2582  QualType Ty =
2583  CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2584  if (Ty.isNull()) {
2585  const Expr *OutExpr = S.getOutputExpr(i);
2586  CGM.getDiags().Report(OutExpr->getExprLoc(),
2587  diag::err_store_value_to_reg);
2588  return;
2589  }
2590  Dest = CGF.MakeAddrLValue(A, Ty);
2591  }
2592  CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2593  }
2594 }
2595 
2597  const AsmStmt &S) {
2598  constexpr auto Name = "__ASM__hipstdpar_unsupported";
2599 
2600  StringRef Asm;
2601  if (auto GCCAsm = dyn_cast<GCCAsmStmt>(&S))
2602  Asm = GCCAsm->getAsmString()->getString();
2603 
2604  auto &Ctx = CGF->CGM.getLLVMContext();
2605 
2606  auto StrTy = llvm::ConstantDataArray::getString(Ctx, Asm);
2607  auto FnTy = llvm::FunctionType::get(llvm::Type::getVoidTy(Ctx),
2608  {StrTy->getType()}, false);
2609  auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2610 
2611  CGF->Builder.CreateCall(UBF, {StrTy});
2612 }
2613 
2615  // Pop all cleanup blocks at the end of the asm statement.
2616  CodeGenFunction::RunCleanupsScope Cleanups(*this);
2617 
2618  // Assemble the final asm string.
2619  std::string AsmString = S.generateAsmString(getContext());
2620 
2621  // Get all the output and input constraints together.
2622  SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2623  SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2624 
2625  bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2626  bool IsValidTargetAsm = true;
2627  for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2628  StringRef Name;
2629  if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2630  Name = GAS->getOutputName(i);
2631  TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2632  bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2633  if (IsHipStdPar && !IsValid)
2634  IsValidTargetAsm = false;
2635  else
2636  assert(IsValid && "Failed to parse output constraint");
2637  OutputConstraintInfos.push_back(Info);
2638  }
2639 
2640  for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2641  StringRef Name;
2642  if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2643  Name = GAS->getInputName(i);
2644  TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2645  bool IsValid =
2646  getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2647  if (IsHipStdPar && !IsValid)
2648  IsValidTargetAsm = false;
2649  else
2650  assert(IsValid && "Failed to parse input constraint");
2651  InputConstraintInfos.push_back(Info);
2652  }
2653 
2654  if (!IsValidTargetAsm)
2655  return EmitHipStdParUnsupportedAsm(this, S);
2656 
2657  std::string Constraints;
2658 
2659  std::vector<LValue> ResultRegDests;
2660  std::vector<QualType> ResultRegQualTys;
2661  std::vector<llvm::Type *> ResultRegTypes;
2662  std::vector<llvm::Type *> ResultTruncRegTypes;
2663  std::vector<llvm::Type *> ArgTypes;
2664  std::vector<llvm::Type *> ArgElemTypes;
2665  std::vector<llvm::Value*> Args;
2666  llvm::BitVector ResultTypeRequiresCast;
2667  llvm::BitVector ResultRegIsFlagReg;
2668 
2669  // Keep track of inout constraints.
2670  std::string InOutConstraints;
2671  std::vector<llvm::Value*> InOutArgs;
2672  std::vector<llvm::Type*> InOutArgTypes;
2673  std::vector<llvm::Type*> InOutArgElemTypes;
2674 
2675  // Keep track of out constraints for tied input operand.
2676  std::vector<std::string> OutputConstraints;
2677 
2678  // Keep track of defined physregs.
2679  llvm::SmallSet<std::string, 8> PhysRegOutputs;
2680 
2681  // An inline asm can be marked readonly if it meets the following conditions:
2682  // - it doesn't have any sideeffects
2683  // - it doesn't clobber memory
2684  // - it doesn't return a value by-reference
2685  // It can be marked readnone if it doesn't have any input memory constraints
2686  // in addition to meeting the conditions listed above.
2687  bool ReadOnly = true, ReadNone = true;
2688 
2689  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2690  TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2691 
2692  // Simplify the output constraint.
2693  std::string OutputConstraint(S.getOutputConstraint(i));
2694  OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2695  getTarget(), &OutputConstraintInfos);
2696 
2697  const Expr *OutExpr = S.getOutputExpr(i);
2698  OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2699 
2700  std::string GCCReg;
2701  OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2702  getTarget(), CGM, S,
2703  Info.earlyClobber(),
2704  &GCCReg);
2705  // Give an error on multiple outputs to same physreg.
2706  if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2707  CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2708 
2709  OutputConstraints.push_back(OutputConstraint);
2710  LValue Dest = EmitLValue(OutExpr);
2711  if (!Constraints.empty())
2712  Constraints += ',';
2713 
2714  // If this is a register output, then make the inline asm return it
2715  // by-value. If this is a memory result, return the value by-reference.
2716  QualType QTy = OutExpr->getType();
2717  const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2719  if (!Info.allowsMemory() && IsScalarOrAggregate) {
2720 
2721  Constraints += "=" + OutputConstraint;
2722  ResultRegQualTys.push_back(QTy);
2723  ResultRegDests.push_back(Dest);
2724 
2725  bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with("{@cc");
2726  ResultRegIsFlagReg.push_back(IsFlagReg);
2727 
2728  llvm::Type *Ty = ConvertTypeForMem(QTy);
2729  const bool RequiresCast = Info.allowsRegister() &&
2730  (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2731  Ty->isAggregateType());
2732 
2733  ResultTruncRegTypes.push_back(Ty);
2734  ResultTypeRequiresCast.push_back(RequiresCast);
2735 
2736  if (RequiresCast) {
2737  unsigned Size = getContext().getTypeSize(QTy);
2738  Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2739  }
2740  ResultRegTypes.push_back(Ty);
2741  // If this output is tied to an input, and if the input is larger, then
2742  // we need to set the actual result type of the inline asm node to be the
2743  // same as the input type.
2744  if (Info.hasMatchingInput()) {
2745  unsigned InputNo;
2746  for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2747  TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2748  if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2749  break;
2750  }
2751  assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2752 
2753  QualType InputTy = S.getInputExpr(InputNo)->getType();
2754  QualType OutputType = OutExpr->getType();
2755 
2756  uint64_t InputSize = getContext().getTypeSize(InputTy);
2757  if (getContext().getTypeSize(OutputType) < InputSize) {
2758  // Form the asm to return the value as a larger integer or fp type.
2759  ResultRegTypes.back() = ConvertType(InputTy);
2760  }
2761  }
2762  if (llvm::Type* AdjTy =
2763  getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2764  ResultRegTypes.back()))
2765  ResultRegTypes.back() = AdjTy;
2766  else {
2767  CGM.getDiags().Report(S.getAsmLoc(),
2768  diag::err_asm_invalid_type_in_input)
2769  << OutExpr->getType() << OutputConstraint;
2770  }
2771 
2772  // Update largest vector width for any vector types.
2773  if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2774  LargestVectorWidth =
2775  std::max((uint64_t)LargestVectorWidth,
2776  VT->getPrimitiveSizeInBits().getKnownMinValue());
2777  } else {
2778  Address DestAddr = Dest.getAddress();
2779  // Matrix types in memory are represented by arrays, but accessed through
2780  // vector pointers, with the alignment specified on the access operation.
2781  // For inline assembly, update pointer arguments to use vector pointers.
2782  // Otherwise there will be a mis-match if the matrix is also an
2783  // input-argument which is represented as vector.
2784  if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2785  DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2786 
2787  ArgTypes.push_back(DestAddr.getType());
2788  ArgElemTypes.push_back(DestAddr.getElementType());
2789  Args.push_back(DestAddr.emitRawPointer(*this));
2790  Constraints += "=*";
2791  Constraints += OutputConstraint;
2792  ReadOnly = ReadNone = false;
2793  }
2794 
2795  if (Info.isReadWrite()) {
2796  InOutConstraints += ',';
2797 
2798  const Expr *InputExpr = S.getOutputExpr(i);
2799  llvm::Value *Arg;
2800  llvm::Type *ArgElemType;
2801  std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2802  Info, Dest, InputExpr->getType(), InOutConstraints,
2803  InputExpr->getExprLoc());
2804 
2805  if (llvm::Type* AdjTy =
2806  getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2807  Arg->getType()))
2808  Arg = Builder.CreateBitCast(Arg, AdjTy);
2809 
2810  // Update largest vector width for any vector types.
2811  if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2812  LargestVectorWidth =
2813  std::max((uint64_t)LargestVectorWidth,
2814  VT->getPrimitiveSizeInBits().getKnownMinValue());
2815  // Only tie earlyclobber physregs.
2816  if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2817  InOutConstraints += llvm::utostr(i);
2818  else
2819  InOutConstraints += OutputConstraint;
2820 
2821  InOutArgTypes.push_back(Arg->getType());
2822  InOutArgElemTypes.push_back(ArgElemType);
2823  InOutArgs.push_back(Arg);
2824  }
2825  }
2826 
2827  // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2828  // to the return value slot. Only do this when returning in registers.
2829  if (isa<MSAsmStmt>(&S)) {
2830  const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2831  if (RetAI.isDirect() || RetAI.isExtend()) {
2832  // Make a fake lvalue for the return value slot.
2835  *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2836  ResultRegDests, AsmString, S.getNumOutputs());
2837  SawAsmBlock = true;
2838  }
2839  }
2840 
2841  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2842  const Expr *InputExpr = S.getInputExpr(i);
2843 
2844  TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2845 
2846  if (Info.allowsMemory())
2847  ReadNone = false;
2848 
2849  if (!Constraints.empty())
2850  Constraints += ',';
2851 
2852  // Simplify the input constraint.
2853  std::string InputConstraint(S.getInputConstraint(i));
2854  InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2855  &OutputConstraintInfos);
2856 
2857  InputConstraint = AddVariableConstraints(
2858  InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2859  getTarget(), CGM, S, false /* No EarlyClobber */);
2860 
2861  std::string ReplaceConstraint (InputConstraint);
2862  llvm::Value *Arg;
2863  llvm::Type *ArgElemType;
2864  std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2865 
2866  // If this input argument is tied to a larger output result, extend the
2867  // input to be the same size as the output. The LLVM backend wants to see
2868  // the input and output of a matching constraint be the same size. Note
2869  // that GCC does not define what the top bits are here. We use zext because
2870  // that is usually cheaper, but LLVM IR should really get an anyext someday.
2871  if (Info.hasTiedOperand()) {
2872  unsigned Output = Info.getTiedOperand();
2873  QualType OutputType = S.getOutputExpr(Output)->getType();
2874  QualType InputTy = InputExpr->getType();
2875 
2876  if (getContext().getTypeSize(OutputType) >
2877  getContext().getTypeSize(InputTy)) {
2878  // Use ptrtoint as appropriate so that we can do our extension.
2879  if (isa<llvm::PointerType>(Arg->getType()))
2880  Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2881  llvm::Type *OutputTy = ConvertType(OutputType);
2882  if (isa<llvm::IntegerType>(OutputTy))
2883  Arg = Builder.CreateZExt(Arg, OutputTy);
2884  else if (isa<llvm::PointerType>(OutputTy))
2885  Arg = Builder.CreateZExt(Arg, IntPtrTy);
2886  else if (OutputTy->isFloatingPointTy())
2887  Arg = Builder.CreateFPExt(Arg, OutputTy);
2888  }
2889  // Deal with the tied operands' constraint code in adjustInlineAsmType.
2890  ReplaceConstraint = OutputConstraints[Output];
2891  }
2892  if (llvm::Type* AdjTy =
2893  getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2894  Arg->getType()))
2895  Arg = Builder.CreateBitCast(Arg, AdjTy);
2896  else
2897  CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2898  << InputExpr->getType() << InputConstraint;
2899 
2900  // Update largest vector width for any vector types.
2901  if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2902  LargestVectorWidth =
2903  std::max((uint64_t)LargestVectorWidth,
2904  VT->getPrimitiveSizeInBits().getKnownMinValue());
2905 
2906  ArgTypes.push_back(Arg->getType());
2907  ArgElemTypes.push_back(ArgElemType);
2908  Args.push_back(Arg);
2909  Constraints += InputConstraint;
2910  }
2911 
2912  // Append the "input" part of inout constraints.
2913  for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2914  ArgTypes.push_back(InOutArgTypes[i]);
2915  ArgElemTypes.push_back(InOutArgElemTypes[i]);
2916  Args.push_back(InOutArgs[i]);
2917  }
2918  Constraints += InOutConstraints;
2919 
2920  // Labels
2922  llvm::BasicBlock *Fallthrough = nullptr;
2923  bool IsGCCAsmGoto = false;
2924  if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2925  IsGCCAsmGoto = GS->isAsmGoto();
2926  if (IsGCCAsmGoto) {
2927  for (const auto *E : GS->labels()) {
2928  JumpDest Dest = getJumpDestForLabel(E->getLabel());
2929  Transfer.push_back(Dest.getBlock());
2930  if (!Constraints.empty())
2931  Constraints += ',';
2932  Constraints += "!i";
2933  }
2934  Fallthrough = createBasicBlock("asm.fallthrough");
2935  }
2936  }
2937 
2938  bool HasUnwindClobber = false;
2939 
2940  // Clobbers
2941  for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2942  StringRef Clobber = S.getClobber(i);
2943 
2944  if (Clobber == "memory")
2945  ReadOnly = ReadNone = false;
2946  else if (Clobber == "unwind") {
2947  HasUnwindClobber = true;
2948  continue;
2949  } else if (Clobber != "cc") {
2950  Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2951  if (CGM.getCodeGenOpts().StackClashProtector &&
2952  getTarget().isSPRegName(Clobber)) {
2953  CGM.getDiags().Report(S.getAsmLoc(),
2954  diag::warn_stack_clash_protection_inline_asm);
2955  }
2956  }
2957 
2958  if (isa<MSAsmStmt>(&S)) {
2959  if (Clobber == "eax" || Clobber == "edx") {
2960  if (Constraints.find("=&A") != std::string::npos)
2961  continue;
2962  std::string::size_type position1 =
2963  Constraints.find("={" + Clobber.str() + "}");
2964  if (position1 != std::string::npos) {
2965  Constraints.insert(position1 + 1, "&");
2966  continue;
2967  }
2968  std::string::size_type position2 = Constraints.find("=A");
2969  if (position2 != std::string::npos) {
2970  Constraints.insert(position2 + 1, "&");
2971  continue;
2972  }
2973  }
2974  }
2975  if (!Constraints.empty())
2976  Constraints += ',';
2977 
2978  Constraints += "~{";
2979  Constraints += Clobber;
2980  Constraints += '}';
2981  }
2982 
2983  assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2984  "unwind clobber can't be used with asm goto");
2985 
2986  // Add machine specific clobbers
2987  std::string_view MachineClobbers = getTarget().getClobbers();
2988  if (!MachineClobbers.empty()) {
2989  if (!Constraints.empty())
2990  Constraints += ',';
2991  Constraints += MachineClobbers;
2992  }
2993 
2994  llvm::Type *ResultType;
2995  if (ResultRegTypes.empty())
2996  ResultType = VoidTy;
2997  else if (ResultRegTypes.size() == 1)
2998  ResultType = ResultRegTypes[0];
2999  else
3000  ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
3001 
3002  llvm::FunctionType *FTy =
3003  llvm::FunctionType::get(ResultType, ArgTypes, false);
3004 
3005  bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3006 
3007  llvm::InlineAsm::AsmDialect GnuAsmDialect =
3008  CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3009  ? llvm::InlineAsm::AD_ATT
3010  : llvm::InlineAsm::AD_Intel;
3011  llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
3012  llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3013 
3014  llvm::InlineAsm *IA = llvm::InlineAsm::get(
3015  FTy, AsmString, Constraints, HasSideEffect,
3016  /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
3017  std::vector<llvm::Value*> RegResults;
3018  llvm::CallBrInst *CBR;
3019  llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3020  CBRRegResults;
3021  if (IsGCCAsmGoto) {
3022  CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
3023  EmitBlock(Fallthrough);
3024  UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
3025  InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3026  *this, RegResults);
3027  // Because we are emitting code top to bottom, we don't have enough
3028  // information at this point to know precisely whether we have a critical
3029  // edge. If we have outputs, split all indirect destinations.
3030  if (!RegResults.empty()) {
3031  unsigned i = 0;
3032  for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3033  llvm::Twine SynthName = Dest->getName() + ".split";
3034  llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
3035  llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3036  Builder.SetInsertPoint(SynthBB);
3037 
3038  if (ResultRegTypes.size() == 1) {
3039  CBRRegResults[SynthBB].push_back(CBR);
3040  } else {
3041  for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3042  llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
3043  CBRRegResults[SynthBB].push_back(Tmp);
3044  }
3045  }
3046 
3047  EmitBranch(Dest);
3048  EmitBlock(SynthBB);
3049  CBR->setIndirectDest(i++, SynthBB);
3050  }
3051  }
3052  } else if (HasUnwindClobber) {
3053  llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
3054  UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
3055  InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3056  *this, RegResults);
3057  } else {
3058  llvm::CallInst *Result =
3059  Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
3060  UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
3061  InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
3062  *this, RegResults);
3063  }
3064 
3065  EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3066  ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3067  ResultRegIsFlagReg);
3068 
3069  // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3070  // different insertion point; one for each indirect destination and with
3071  // CBRRegResults rather than RegResults.
3072  if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3073  for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3074  llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3075  Builder.SetInsertPoint(Succ, --(Succ->end()));
3076  EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
3077  ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3078  ResultTypeRequiresCast, ResultRegIsFlagReg);
3079  }
3080  }
3081 }
3082 
3084  const RecordDecl *RD = S.getCapturedRecordDecl();
3085  QualType RecordTy = getContext().getRecordType(RD);
3086 
3087  // Initialize the captured struct.
3088  LValue SlotLV =
3089  MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
3090 
3091  RecordDecl::field_iterator CurField = RD->field_begin();
3092  for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3093  E = S.capture_init_end();
3094  I != E; ++I, ++CurField) {
3095  LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
3096  if (CurField->hasCapturedVLAType()) {
3097  EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
3098  } else {
3099  EmitInitializerForField(*CurField, LV, *I);
3100  }
3101  }
3102 
3103  return SlotLV;
3104 }
3105 
3106 /// Generate an outlined function for the body of a CapturedStmt, store any
3107 /// captured variables into the captured struct, and call the outlined function.
3108 llvm::Function *
3110  LValue CapStruct = InitCapturedStruct(S);
3111 
3112  // Emit the CapturedDecl
3113  CodeGenFunction CGF(CGM, true);
3114  CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3115  llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3116  delete CGF.CapturedStmtInfo;
3117 
3118  // Emit call to the helper function.
3119  EmitCallOrInvoke(F, CapStruct.getPointer(*this));
3120 
3121  return F;
3122 }
3123 
3125  LValue CapStruct = InitCapturedStruct(S);
3126  return CapStruct.getAddress();
3127 }
3128 
3129 /// Creates the outlined function for a CapturedStmt.
3130 llvm::Function *
3132  assert(CapturedStmtInfo &&
3133  "CapturedStmtInfo should be set when generating the captured function");
3134  const CapturedDecl *CD = S.getCapturedDecl();
3135  const RecordDecl *RD = S.getCapturedRecordDecl();
3136  SourceLocation Loc = S.getBeginLoc();
3137  assert(CD->hasBody() && "missing CapturedDecl body");
3138 
3139  // Build the argument list.
3140  ASTContext &Ctx = CGM.getContext();
3141  FunctionArgList Args;
3142  Args.append(CD->param_begin(), CD->param_end());
3143 
3144  // Create the function declaration.
3145  const CGFunctionInfo &FuncInfo =
3147  llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
3148 
3149  llvm::Function *F =
3150  llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
3152  CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
3153  if (CD->isNothrow())
3154  F->addFnAttr(llvm::Attribute::NoUnwind);
3155 
3156  // Generate the function.
3157  StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
3158  CD->getBody()->getBeginLoc());
3159  // Set the context parameter in CapturedStmtInfo.
3160  Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
3162 
3163  // Initialize variable-length arrays.
3166  for (auto *FD : RD->fields()) {
3167  if (FD->hasCapturedVLAType()) {
3168  auto *ExprArg =
3169  EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
3170  .getScalarVal();
3171  auto VAT = FD->getCapturedVLAType();
3172  VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3173  }
3174  }
3175 
3176  // If 'this' is captured, load it into CXXThisValue.
3179  LValue ThisLValue = EmitLValueForField(Base, FD);
3180  CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
3181  }
3182 
3183  PGO.assignRegionCounters(GlobalDecl(CD), F);
3184  CapturedStmtInfo->EmitBody(*this, CD->getBody());
3186 
3187  return F;
3188 }
3189 
3190 namespace {
3191 // Returns the first convergence entry/loop/anchor instruction found in |BB|.
3192 // std::nullptr otherwise.
3193 llvm::IntrinsicInst *getConvergenceToken(llvm::BasicBlock *BB) {
3194  for (auto &I : *BB) {
3195  auto *II = dyn_cast<llvm::IntrinsicInst>(&I);
3196  if (II && llvm::isConvergenceControlIntrinsic(II->getIntrinsicID()))
3197  return II;
3198  }
3199  return nullptr;
3200 }
3201 
3202 } // namespace
3203 
3204 llvm::CallBase *
3205 CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input,
3206  llvm::Value *ParentToken) {
3207  llvm::Value *bundleArgs[] = {ParentToken};
3208  llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3209  auto Output = llvm::CallBase::addOperandBundle(
3210  Input, llvm::LLVMContext::OB_convergencectrl, OB, Input);
3211  Input->replaceAllUsesWith(Output);
3212  Input->eraseFromParent();
3213  return Output;
3214 }
3215 
3216 llvm::IntrinsicInst *
3217 CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB,
3218  llvm::Value *ParentToken) {
3219  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3220  if (BB->empty())
3221  Builder.SetInsertPoint(BB);
3222  else
3223  Builder.SetInsertPoint(BB->getFirstInsertionPt());
3224 
3225  llvm::CallBase *CB = Builder.CreateIntrinsic(
3226  llvm::Intrinsic::experimental_convergence_loop, {}, {});
3227  Builder.restoreIP(IP);
3228 
3229  llvm::CallBase *I = addConvergenceControlToken(CB, ParentToken);
3230  return cast<llvm::IntrinsicInst>(I);
3231 }
3232 
3233 llvm::IntrinsicInst *
3234 CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3235  llvm::BasicBlock *BB = &F->getEntryBlock();
3236  llvm::IntrinsicInst *Token = getConvergenceToken(BB);
3237  if (Token)
3238  return Token;
3239 
3240  // Adding a convergence token requires the function to be marked as
3241  // convergent.
3242  F->setConvergent();
3243 
3244  CGBuilderTy::InsertPoint IP = Builder.saveIP();
3245  Builder.SetInsertPoint(&BB->front());
3246  llvm::CallBase *I = Builder.CreateIntrinsic(
3247  llvm::Intrinsic::experimental_convergence_entry, {}, {});
3248  assert(isa<llvm::IntrinsicInst>(I));
3249  Builder.restoreIP(IP);
3250 
3251  return cast<llvm::IntrinsicInst>(I);
3252 }
#define V(N, I)
Definition: ASTContext.h:3299
#define SM(sm)
Definition: Cuda.cpp:83
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber, std::string *GCCReg=nullptr)
AddVariableConstraints - Look at AsmExpr and if it is a variable declared as using a particular regis...
Definition: CGStmt.cpp:2329
static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl< const Stmt * > &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase)
FindCaseStatementsForValue - Find the case statement being jumped to and then invoke CollectStatement...
Definition: CGStmt.cpp:2000
static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF, const AsmStmt &S)
Definition: CGStmt.cpp:2596
static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl< TargetInfo::ConstraintInfo > *OutCons=nullptr)
Definition: CGStmt.cpp:2276
static bool isSwiftAsyncCallee(const CallExpr *CE)
Determine if the given call uses the swiftasync calling convention.
Definition: CGStmt.cpp:1431
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, bool HasUnwindClobber, bool ReadOnly, bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector< llvm::Type * > &ResultRegTypes, const std::vector< llvm::Type * > &ArgElemTypes, CodeGenFunction &CGF, std::vector< llvm::Value * > &RegResults)
Definition: CGStmt.cpp:2450
static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl< const Stmt * > &ResultStmts)
Definition: CGStmt.cpp:1845
static llvm::MDNode * getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF)
getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline asm call instruction.
Definition: CGStmt.cpp:2423
static std::optional< SmallVector< uint64_t, 16 > > getLikelihoodWeights(ArrayRef< Stmt::Likelihood > Likelihoods)
Definition: CGStmt.cpp:2054
static void EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S, const llvm::ArrayRef< llvm::Value * > RegResults, const llvm::ArrayRef< llvm::Type * > ResultRegTypes, const llvm::ArrayRef< llvm::Type * > ResultTruncRegTypes, const llvm::ArrayRef< LValue > ResultRegDests, const llvm::ArrayRef< QualType > ResultRegQualTys, const llvm::BitVector &ResultTypeRequiresCast, const llvm::BitVector &ResultRegIsFlagReg)
Definition: CGStmt.cpp:2511
static bool hasEmptyLoopBody(const LoopStmt &S)
Definition: CGStmt.cpp:961
CSFC_Result
CollectStatementsForCase - Given the body of a 'switch' statement and a constant value that is being ...
Definition: CGStmt.cpp:1844
@ CSFC_Failure
Definition: CGStmt.cpp:1844
@ CSFC_Success
Definition: CGStmt.cpp:1844
@ CSFC_FallThrough
Definition: CGStmt.cpp:1844
const CFGBlock * Block
Definition: HTMLLogger.cpp:153
llvm::MachO::Target Target
Definition: MachO.h:50
Defines the PrettyStackTraceEntry class, which is used to make crashes give more contextual informati...
SourceRange Range
Definition: SemaObjC.cpp:754
SourceLocation Loc
Definition: SemaObjC.cpp:755
Defines the SourceManager interface.
__DEVICE__ int max(int __a, int __b)
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
bool toIntegralConstant(APSInt &Result, QualType SrcTy, const ASTContext &Ctx) const
Try to convert this value to an integral constant.
Definition: APValue.cpp:954
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:185
SourceManager & getSourceManager()
Definition: ASTContext.h:708
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
QualType getRecordType(const RecordDecl *Decl) const
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2355
CanQualType VoidTy
Definition: ASTContext.h:1094
AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
Definition: Stmt.h:3100
Attr - This represents one attribute.
Definition: Attr.h:46
Represents an attribute applied to a statement.
Definition: Stmt.h:2080
BreakStmt - This represents a break.
Definition: Stmt.h:2980
CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for statement, represented as 'for (ra...
Definition: StmtCXX.h:135
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2872
Expr * getCallee()
Definition: Expr.h:3022
Represents the body of a CapturedStmt, and serves as its DeclContext.
Definition: Decl.h:4689
bool isNothrow() const
Definition: Decl.cpp:5447
ImplicitParamDecl * getContextParam() const
Retrieve the parameter containing captured variables.
Definition: Decl.h:4751
param_iterator param_end() const
Retrieve an iterator one past the last parameter decl.
Definition: Decl.h:4768
param_iterator param_begin() const
Retrieve an iterator pointing to the first parameter decl.
Definition: Decl.h:4766
Stmt * getBody() const override
getBody - If this Decl represents a declaration for a body of code, such as a function or method defi...
Definition: Decl.cpp:5444
This captures a statement into a function.
Definition: Stmt.h:3757
Expr *const * const_capture_init_iterator
Const iterator that walks over the capture initialization arguments.
Definition: Stmt.h:3921
CapturedRegionKind getCapturedRegionKind() const
Retrieve the captured region kind.
Definition: Stmt.cpp:1422
CaseStmt - Represent a case statement.
Definition: Stmt.h:1801
Expr * getLHS()
Definition: Stmt.h:1888
Expr * getRHS()
Definition: Stmt.h:1900
Stmt * getSubStmt()
Definition: Stmt.h:1918
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
bool hasReducedDebugInfo() const
Check if type and variable info should be emitted.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:176
bool isValid() const
Definition: Address.h:154
An aggregate value slot.
Definition: CGValue.h:509
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:592
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:886
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
llvm::StoreInst * CreateFlagStore(bool Value, llvm::Value *Addr)
Emit a store to an i1 flag variable.
Definition: CGBuilder.h:164
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:55
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallingConv getASTCallingConvention() const
getASTCallingConvention() - Return the AST-specified calling convention.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD)
Gets the OpenMP-specific address of the local variable.
API for captured statement code generation.
virtual void EmitBody(CodeGenFunction &CGF, const Stmt *S)
Emit the captured statement body.
virtual StringRef getHelperName() const
Get the name of the capture helper.
RAII for correct setting/restoring of CapturedStmtInfo.
void rescopeLabels()
Change the cleanup scope of the labels in this lexical scope to match the scope of the enclosing cont...
Definition: CGStmt.cpp:680
void ForceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void ForceCleanup(std::initializer_list< llvm::Value ** > ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool requiresCleanups() const
Determine whether this scope requires any cleanups.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitOMPParallelMaskedDirective(const OMPParallelMaskedDirective &S)
void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S)
void EmitCXXTryStmt(const CXXTryStmt &S)
JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target)
The given basic block lies in the current EH scope, but may be a target of a potentially scope-crossi...
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
Definition: CGExpr.cpp:4833
llvm::DenseMap< const VarDecl *, llvm::Value * > NRVOFlags
A mapping from NRVO variables to the flags used to indicate when the NRVO has been applied to this va...
bool IsOutlinedSEHHelper
True if the current function is an outlined SEH helper.
void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S)
Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S)
void EmitOMPScanDirective(const OMPScanDirective &S)
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
LValue InitCapturedStruct(const CapturedStmt &S)
Definition: CGStmt.cpp:3083
CGCapturedStmtInfo * CapturedStmtInfo
void EmitOMPDistributeDirective(const OMPDistributeDirective &S)
void EmitOMPParallelForDirective(const OMPParallelForDirective &S)
llvm::CallBase * EmitCallOrInvoke(llvm::FunctionCallee Callee, ArrayRef< llvm::Value * > Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:4962
void EmitOMPMasterDirective(const OMPMasterDirective &S)
void EmitOMPParallelMasterTaskLoopSimdDirective(const OMPParallelMasterTaskLoopSimdDirective &S)
void EmitDecl(const Decl &D)
EmitDecl - Emit a declaration.
Definition: CGDecl.cpp:51
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
void EmitOMPFlushDirective(const OMPFlushDirective &S)
void EmitOMPTaskgroupDirective(const OMPTaskgroupDirective &S)
JumpDest getJumpDestForLabel(const LabelDecl *S)
getBasicBlockForLabel - Return the LLVM basicblock that the specified label maps to.
Definition: CGStmt.cpp:632
void EmitCoreturnStmt(const CoreturnStmt &S)
void EmitOMPTargetTeamsDistributeParallelForSimdDirective(const OMPTargetTeamsDistributeParallelForSimdDirective &S)
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
Definition: CGObjC.cpp:2085
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S)
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition: CGExpr.cpp:3386
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::LLVMContext & getLLVMContext()
bool EmitSimpleStmt(const Stmt *S, ArrayRef< const Attr * > Attrs)
EmitSimpleStmt - Try to emit a "simple" statement which does not necessarily require an insertion poi...
Definition: CGStmt.cpp:448
void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S)
void EmitOMPTargetParallelForDirective(const OMPTargetParallelForDirective &S)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition: CGExpr.cpp:626
void EmitForStmt(const ForStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
Definition: CGStmt.cpp:1171
void EmitBlockAfterUses(llvm::BasicBlock *BB)
EmitBlockAfterUses - Emit the given block somewhere hopefully near its uses, and leave the insertion ...
Definition: CGStmt.cpp:615
void SimplifyForwardingBlocks(llvm::BasicBlock *BB)
SimplifyForwardingBlocks - If the given basic block is only a branch to another basic block,...
Definition: CGStmt.cpp:556
void EmitBranchThroughCleanup(JumpDest Dest)
EmitBranchThroughCleanup - Emit a branch from the current insert block through the normal cleanup han...
Definition: CGCleanup.cpp:1096
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue MakeAddrLValueWithoutTBAA(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
JumpDest ReturnBlock
ReturnBlock - Unified return block.
void EmitOMPTargetTeamsDistributeSimdDirective(const OMPTargetTeamsDistributeSimdDirective &S)
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location)
Converts Location to a DebugLoc, if debug information is enabled.
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:2093
Address EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
Definition: CGStmt.cpp:504
void EmitGotoStmt(const GotoStmt &S)
Definition: CGStmt.cpp:753
void EmitOMPDepobjDirective(const OMPDepobjDirective &S)
void EmitOMPMetaDirective(const OMPMetaDirective &S)
void EmitOMPCriticalDirective(const OMPCriticalDirective &S)
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:203
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:2168
void EmitOMPCancelDirective(const OMPCancelDirective &S)
const Expr * RetExpr
If a return statement is being visited, this holds the return statment's result expression.
void EmitOMPBarrierDirective(const OMPBarrierDirective &S)
void EmitOMPOrderedDirective(const OMPOrderedDirective &S)
void EmitOMPSectionsDirective(const OMPSectionsDirective &S)
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S)
Definition: CGObjC.cpp:3643
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitOMPInteropDirective(const OMPInteropDirective &S)
void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S)
void EmitOMPTargetParallelDirective(const OMPTargetParallelDirective &S)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:184
void EmitOMPTargetParallelForSimdDirective(const OMPTargetParallelForSimdDirective &S)
void EmitOMPTargetParallelGenericLoopDirective(const OMPTargetParallelGenericLoopDirective &S)
Emit combined directive 'target parallel loop' as if its constituent constructs are 'target',...
uint64_t getCurrentProfileCount()
Get the profiler's current count.
void ResolveBranchFixups(llvm::BasicBlock *Target)
Definition: CGCleanup.cpp:371
void EmitOMPTeamsDistributeParallelForSimdDirective(const OMPTeamsDistributeParallelForSimdDirective &S)
void EmitWhileStmt(const WhileStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
Definition: CGStmt.cpp:974
void EmitOMPMaskedDirective(const OMPMaskedDirective &S)
bool checkIfLoopMustProgress(const Expr *, bool HasEmptyBody)
Returns true if a loop must make progress, which means the mustprogress attribute can be added.
Definition: CGStmt.cpp:911
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S)
void EmitOMPMasterTaskLoopDirective(const OMPMasterTaskLoopDirective &S)
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
void EmitOMPCancellationPointDirective(const OMPCancellationPointDirective &S)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition: CGExpr.cpp:5014
void EmitOMPTargetTeamsDistributeParallelForDirective(const OMPTargetTeamsDistributeParallelForDirective &S)
SmallVector< llvm::IntrinsicInst *, 4 > ConvergenceTokenStack
Stack to track the controlled convergence tokens.
void EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
Definition: CGObjC.cpp:2081
const TargetInfo & getTarget() const
void EmitOMPTargetExitDataDirective(const OMPTargetExitDataDirective &S)
void EmitSEHLeaveStmt(const SEHLeaveStmt &S)
void EmitOMPTargetEnterDataDirective(const OMPTargetEnterDataDirective &S)
bool InNoInlineAttributedStmt
True if the current statement has noinline attribute.
SmallVector< llvm::OperandBundleDef, 1 > getBundlesForFunclet(llvm::Value *Callee)
Definition: CGCall.cpp:4886
void EmitCoroutineBody(const CoroutineBodyStmt &S)
void EmitOMPParallelDirective(const OMPParallelDirective &S)
void EmitOMPTaskDirective(const OMPTaskDirective &S)
void EmitOMPMasterTaskLoopSimdDirective(const OMPMasterTaskLoopSimdDirective &S)
void EmitOMPDistributeParallelForDirective(const OMPDistributeParallelForDirective &S)
void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S)
void EmitStopPoint(const Stmt *S)
EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
Definition: CGStmt.cpp:50
void EmitOMPTargetUpdateDirective(const OMPTargetUpdateDirective &S)
void EmitOMPTargetTeamsGenericLoopDirective(const OMPTargetTeamsGenericLoopDirective &S)
void EmitIfStmt(const IfStmt &S)
Definition: CGStmt.cpp:785
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2344
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S)
Definition: CGObjC.cpp:2089
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:492
void EmitOMPTeamsDistributeParallelForDirective(const OMPTeamsDistributeParallelForDirective &S)
void EmitDoStmt(const DoStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
Definition: CGStmt.cpp:1091
void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init)
Definition: CGClass.cpp:692
void EmitAsmStmt(const AsmStmt &S)
Definition: CGStmt.cpp:2614
void EmitDefaultStmt(const DefaultStmt &S, ArrayRef< const Attr * > Attrs)
Definition: CGStmt.cpp:1799
void EmitOMPTargetTeamsDistributeDirective(const OMPTargetTeamsDistributeDirective &S)
void EmitSwitchStmt(const SwitchStmt &S)
Definition: CGStmt.cpp:2109
static bool mightAddDeclToScope(const Stmt *S)
Determine if the given statement might introduce a declaration into the current scope,...
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:254
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition: CGExpr.cpp:225
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S)
llvm::Type * ConvertTypeForMem(QualType T)
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S)
void EmitOMPTargetDirective(const OMPTargetDirective &S)
llvm::Function * EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K)
Generate an outlined function for the body of a CapturedStmt, store any captured variables into the c...
Definition: CGStmt.cpp:3109
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitOMPTeamsDirective(const OMPTeamsDirective &S)
static bool containsBreak(const Stmt *S)
containsBreak - Return true if the statement contains a break out of it.
void EmitSimpleOMPExecutableDirective(const OMPExecutableDirective &D)
Emit simple code for OpenMP directives in Simd-only mode.
bool InAlwaysInlineAttributedStmt
True if the current statement has always_inline attribute.
void EmitCaseStmt(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
Definition: CGStmt.cpp:1684
void EmitOMPErrorDirective(const OMPErrorDirective &S)
void EmitBreakStmt(const BreakStmt &S)
Definition: CGStmt.cpp:1572
void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S)
void EmitOpenACCComputeConstruct(const OpenACCComputeConstruct &S)
void EmitOMPTargetDataDirective(const OMPTargetDataDirective &S)
Address GenerateCapturedStmtArgument(const CapturedStmt &S)
Definition: CGStmt.cpp:3124
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition: CGStmt.cpp:598
void EmitOMPSimdDirective(const OMPSimdDirective &S)
const TargetCodeGenInfo & getTargetHooks() const
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:147
void EmitOMPParallelGenericLoopDirective(const OMPLoopDirective &S)
void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S)
void EmitOMPTeamsGenericLoopDirective(const OMPTeamsGenericLoopDirective &S)
AggValueSlot::Overlap_t getOverlapForReturnValue()
Determine whether a return value slot may overlap some other object.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Definition: CGExprAgg.cpp:2030
llvm::BasicBlock * GetIndirectGotoBlock()
void EmitOMPUnrollDirective(const OMPUnrollDirective &S)
void EmitStmt(const Stmt *S, ArrayRef< const Attr * > Attrs=std::nullopt)
EmitStmt - Emit the code for the statement.
Definition: CGStmt.cpp:60
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
static bool hasAggregateEvaluationKind(QualType T)
void EmitCaseStmtRange(const CaseStmt &S, ArrayRef< const Attr * > Attrs)
EmitCaseStmtRange - If case statement range is not too big then add multiple cases to switch instruct...
Definition: CGStmt.cpp:1599
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitOMPSingleDirective(const OMPSingleDirective &S)
void EmitReturnStmt(const ReturnStmt &S)
EmitReturnStmt - Note that due to GCC extensions, this can have an operand if the function returns vo...
Definition: CGStmt.cpp:1457
void EmitLambdaVLACapture(const VariableArrayType *VAT, LValue LV)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Function * GenerateCapturedStmtFunction(const CapturedStmt &S)
Creates the outlined function for a CapturedStmt.
Definition: CGStmt.cpp:3131
const CGFunctionInfo * CurFnInfo
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitDeclStmt(const DeclStmt &S)
Definition: CGStmt.cpp:1562
void EmitLabelStmt(const LabelStmt &S)
Definition: CGStmt.cpp:702
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void EmitOMPTileDirective(const OMPTileDirective &S)
void EmitOMPAtomicDirective(const OMPAtomicDirective &S)
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition: CGExpr.cpp:1503
void EmitAttributedStmt(const AttributedStmt &S)
Definition: CGStmt.cpp:712
void EmitOMPParallelMasterTaskLoopDirective(const OMPParallelMasterTaskLoopDirective &S)
void EmitOMPDistributeParallelForSimdDirective(const OMPDistributeParallelForSimdDirective &S)
void EmitOMPSectionDirective(const OMPSectionDirective &S)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
void EmitOMPForSimdDirective(const OMPForSimdDirective &S)
bool SawAsmBlock
Whether we processed a Microsoft-style asm block during CodeGen.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S)
Definition: CGObjC.cpp:1761
void EmitIndirectGotoStmt(const IndirectGotoStmt &S)
Definition: CGStmt.cpp:764
bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const
isObviouslyBranchWithoutCleanups - Return true if a branch to the specified destination obviously has...
Definition: CGCleanup.cpp:1071
void EmitSEHTryStmt(const SEHTryStmt &S)
void EmitOMPParallelMasterDirective(const OMPParallelMasterDirective &S)
void EmitBlockWithFallThrough(llvm::BasicBlock *BB, const Stmt *S)
When instrumenting to collect profile data, the counts for some blocks such as switch cases need to n...
void EmitOMPForDirective(const OMPForDirective &S)
void EmitLabel(const LabelDecl *D)
EmitLabel - Emit the block for the given label.
Definition: CGStmt.cpp:643
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:578
void EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef< const Attr * > Attrs=std::nullopt)
Definition: CGStmt.cpp:1307
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
void EmitContinueStmt(const ContinueStmt &S)
Definition: CGStmt.cpp:1584
const LangOptions & getLangOpts() const
This class organizes the cross-function state that is used while generating LLVM code.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
void SetInternalFunctionAttributes(GlobalDecl GD, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
const llvm::DataLayout & getDataLayout() const
void ErrorUnsupported(const Stmt *S, const char *Type)
Print out an error that codegen doesn't support the specified stmt yet.
llvm::Module & getModule() const
const LangOptions & getLangOpts() const
SanitizerMetadata * getSanitizerMetadata()
llvm::LLVMContext & getLLVMContext()
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
bool shouldEmitConvergenceTokens() const
DiagnosticsEngine & getDiags() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
ASTContext & getContext() const
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
void assignRegionCounters(GlobalDecl GD, llvm::Function *Fn)
Assign counters to regions and configure them for PGO of a given function.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
bool haveRegionCounts() const
Whether or not we have PGO region data for the current function.
Definition: CodeGenPGO.h:53
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1641
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:682
A saved depth on the scope stack.
Definition: EHScopeStack.h:101
bool encloses(stable_iterator I) const
Returns true if this scope encloses I.
Definition: EHScopeStack.h:118
stable_iterator getInnermostNormalCleanup() const
Returns the innermost normal cleanup on the stack, or stable_end() if there are no normal cleanups.
Definition: EHScopeStack.h:370
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:393
bool empty() const
Determines whether the exception-scopes stack is empty.
Definition: EHScopeStack.h:359
bool hasNormalCleanups() const
Determines whether there are any normal cleanups on the stack.
Definition: EHScopeStack.h:364
static stable_iterator stable_end()
Create a stable reference to the bottom of the EH stack.
Definition: EHScopeStack.h:398
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:351
LValue - This represents an lvalue references.
Definition: CGValue.h:181
Address getAddress() const
Definition: CGValue.h:370
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:361
void pop()
End the current loop.
void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
Begin a new structured loop.
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:41
bool isScalar() const
Definition: CGValue.h:63
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:77
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
bool isAggregate() const
Definition: CGValue.h:65
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:82
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
virtual void addReturnRegisterOutputs(CodeGen::CodeGenFunction &CGF, CodeGen::LValue ReturnValue, std::string &Constraints, std::vector< llvm::Type * > &ResultRegTypes, std::vector< llvm::Type * > &ResultTruncRegTypes, std::vector< CodeGen::LValue > &ResultRegDests, std::string &AsmString, unsigned NumOutputs) const
Adds constraints and types for result registers.
Definition: TargetInfo.h:185
virtual bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF, llvm::Type *Ty) const
Target hook to decide whether an inline asm operand can be passed by value.
Definition: TargetInfo.h:179
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1606
Stmt *const * const_body_iterator
Definition: Stmt.h:1673
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
ContinueStmt - This represents a continue.
Definition: Stmt.h:2950
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2342
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
ValueDecl * getDecl()
Definition: Expr.h:1328
DeclStmt - Adaptor class for mixing declarations with statements and expressions.
Definition: Stmt.h:1497
SourceLocation getBodyRBrace() const
getBodyRBrace - Gets the right brace of the body, if a body exists.
Definition: DeclBase.cpp:1045
virtual bool hasBody() const
Returns true if this Decl represents a declaration for a body of code, such as a function or method d...
Definition: DeclBase.h:1083
SourceLocation getLocation() const
Definition: DeclBase.h:445
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1553
DoStmt - This represents a 'do/while' stmt.
Definition: Stmt.h:2725
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3138
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3107
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3608
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3060
ForStmt - This represents a 'for (init;cond;inc)' stmt.
Definition: Stmt.h:2781
const Expr * getSubExpr() const
Definition: Expr.h:1052
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition: Type.h:4268
CallingConv getCallConv() const
Definition: Type.h:4596
This represents a GCC inline-assembly statement extension.
Definition: Stmt.h:3259
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
GotoStmt - This represents a direct goto.
Definition: Stmt.h:2862
IfStmt - This represents an if/then/else.
Definition: Stmt.h:2138
IndirectGotoStmt - This represents an indirect goto.
Definition: Stmt.h:2901
Represents the declaration of a label.
Definition: Decl.h:500
LabelStmt * getStmt() const
Definition: Decl.h:524
LabelStmt - Represents a label, which has a substatement.
Definition: Stmt.h:2031
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:482
bool assumeFunctionsAreConvergent() const
Definition: LangOptions.h:683
Represents a point when we exit a loop.
Definition: ProgramPoint.h:711
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:276
If a crash happens while one of these objects are live, the message is printed out along with the spe...
A (possibly-)qualified type.
Definition: Type.h:940
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
QualType getCanonicalType() const
Definition: Type.h:7423
The collection of all-type qualifiers we support.
Definition: Type.h:318
Represents a struct/union/class.
Definition: Decl.h:4171
field_range fields() const
Definition: Decl.h:4377
field_iterator field_begin() const
Definition: Decl.cpp:5073
ReturnStmt - This represents a return, optionally of an expression: return; return 4;.
Definition: Stmt.h:3019
Expr * getRetValue()
Definition: Stmt.h:3050
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
UIntTy getRawEncoding() const
When a SourceLocation itself cannot be used, this returns an (opaque) 32-bit integer encoding for it.
This class handles loading and caching of source files into memory.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition: Stmt.h:84
@ NoStmtClass
Definition: Stmt.h:87
StmtClass getStmtClass() const
Definition: Stmt.h:1358
Likelihood
The likelihood of a branch being taken.
Definition: Stmt.h:1301
@ LH_Unlikely
Branch has the [[unlikely]] attribute.
Definition: Stmt.h:1302
@ LH_None
No attribute set or branches of the IfStmt have the same attribute.
Definition: Stmt.h:1303
@ LH_Likely
Branch has the [[likely]] attribute.
Definition: Stmt.h:1305
static const Attr * getLikelihoodAttr(const Stmt *S)
Definition: Stmt.cpp:163
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
static Likelihood getLikelihood(ArrayRef< const Attr * > Attrs)
Definition: Stmt.cpp:155
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1773
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:1954
SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, const TargetInfo &Target, unsigned *StartToken=nullptr, unsigned *StartTokenByteOffset=nullptr) const
getLocationOfByte - Return a source location that points to the specified byte of this string literal...
Definition: Expr.cpp:1384
StringRef getString() const
Definition: Expr.h:1850
const SwitchCase * getNextSwitchCase() const
Definition: Stmt.h:1774
SwitchStmt - This represents a 'switch' stmt.
Definition: Stmt.h:2388
Exposes information about the current target.
Definition: TargetInfo.h:218
bool validateInputConstraint(MutableArrayRef< ConstraintInfo > OutputConstraints, ConstraintInfo &info) const
Definition: TargetInfo.cpp:856
StringRef getNormalizedGCCRegisterName(StringRef Name, bool ReturnCanonical=false) const
Returns the "normalized" GCC register name.
Definition: TargetInfo.cpp:718
bool validateOutputConstraint(ConstraintInfo &Info) const
Definition: TargetInfo.cpp:759
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
Token - This structure provides full information about a lexed token.
Definition: Token.h:36
bool isVoidType() const
Definition: Type.h:7939
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8227
bool isReferenceType() const
Definition: Type.h:7636
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:705
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:707
Represents a variable declaration or definition.
Definition: Decl.h:919
WhileStmt - This represents a 'while' stmt.
Definition: Stmt.h:2584
Defines the clang::TargetInfo interface.
llvm::APInt APInt
Definition: Integral.h:29
bool Rem(InterpState &S, CodePtr OpPC)
1) Pops the RHS from the stack.
Definition: Interp.h:420
The JSON file list parser is used to communicate input to InstallAPI.
CapturedRegionKind
The different kinds of captured statement.
Definition: CapturedStmt.h:16
@ SC_Register
Definition: Specifiers.h:254
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ CC_SwiftAsync
Definition: Specifiers.h:291
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
A jump destination is an abstract label, branching to which may require a jump out through normal cle...
void setScopeDepth(EHScopeStack::stable_iterator depth)
EHScopeStack::stable_iterator getScopeDepth() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool hasMatchingInput() const
Return true if this output operand has a matching (tied) input operand.
Definition: TargetInfo.h:1125
bool hasTiedOperand() const
Return true if this input operand is a matching constraint that ties it to an output operand.
Definition: TargetInfo.h:1132