1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Stmt nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CGDebugInfo.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "clang/Basic/Builtins.h"
20 #include "clang/Basic/PrettyStackTrace.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/Sema/LoopHint.h"
23 #include "clang/Sema/SemaDiagnostic.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/MDBuilder.h"
30 
31 using namespace clang;
32 using namespace CodeGen;
33 
34 //===----------------------------------------------------------------------===//
35 //                              Statement Emission
36 //===----------------------------------------------------------------------===//
37 
38 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
39   if (CGDebugInfo *DI = getDebugInfo()) {
40     SourceLocation Loc;
41     Loc = S->getLocStart();
42     DI->EmitLocation(Builder, Loc);
43 
44     LastStopPoint = Loc;
45   }
46 }
47 
48 void CodeGenFunction::EmitStmt(const Stmt *S) {
49   assert(S && "Null statement?");
50   PGO.setCurrentStmt(S);
51 
52   // These statements have their own debug info handling.
53   if (EmitSimpleStmt(S))
54     return;
55 
56   // Check if we are generating unreachable code.
57   if (!HaveInsertPoint()) {
58     // If so, and the statement doesn't contain a label, then we do not need to
59     // generate actual code. This is safe because (1) the current point is
60     // unreachable, so we don't need to execute the code, and (2) we've already
61     // handled the statements which update internal data structures (like the
62     // local variable map) which could be used by subsequent statements.
63     if (!ContainsLabel(S)) {
64       // Verify that any decl statements were handled as simple, they may be in
65       // scope of subsequent reachable statements.
66       assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
67       return;
68     }
69 
70     // Otherwise, make a new block to hold the code.
71     EnsureInsertPoint();
72   }
73 
74   // Generate a stoppoint if we are emitting debug info.
75   EmitStopPoint(S);
76 
77   switch (S->getStmtClass()) {
78   case Stmt::NoStmtClass:
79   case Stmt::CXXCatchStmtClass:
80   case Stmt::SEHExceptStmtClass:
81   case Stmt::SEHFinallyStmtClass:
82   case Stmt::MSDependentExistsStmtClass:
83     llvm_unreachable("invalid statement class to emit generically");
84   case Stmt::NullStmtClass:
85   case Stmt::CompoundStmtClass:
86   case Stmt::DeclStmtClass:
87   case Stmt::LabelStmtClass:
88   case Stmt::AttributedStmtClass:
89   case Stmt::GotoStmtClass:
90   case Stmt::BreakStmtClass:
91   case Stmt::ContinueStmtClass:
92   case Stmt::DefaultStmtClass:
93   case Stmt::CaseStmtClass:
94   case Stmt::SEHLeaveStmtClass:
95     llvm_unreachable("should have emitted these statements as simple");
96 
97 #define STMT(Type, Base)
98 #define ABSTRACT_STMT(Op)
99 #define EXPR(Type, Base) \
100   case Stmt::Type##Class:
101 #include "clang/AST/StmtNodes.inc"
102   {
103     // Remember the block we came in on.
104     llvm::BasicBlock *incoming = Builder.GetInsertBlock();
105     assert(incoming && "expression emission must have an insertion point");
106 
107     EmitIgnoredExpr(cast<Expr>(S));
108 
109     llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
110     assert(outgoing && "expression emission cleared block!");
111 
112     // The expression emitters assume (reasonably!) that the insertion
113     // point is always set.  To maintain that, the call-emission code
114     // for noreturn functions has to enter a new block with no
115     // predecessors.  We want to kill that block and mark the current
116     // insertion point unreachable in the common case of a call like
117     // "exit();".  Since expression emission doesn't otherwise create
118     // blocks with no predecessors, we can just test for that.
119     // However, we must be careful not to do this to our incoming
120     // block, because *statement* emission does sometimes create
121     // reachable blocks which will have no predecessors until later in
122     // the function.  This occurs with, e.g., labels that are not
123     // reachable by fallthrough.
124     if (incoming != outgoing && outgoing->use_empty()) {
125       outgoing->eraseFromParent();
126       Builder.ClearInsertionPoint();
127     }
128     break;
129   }
130 
131   case Stmt::IndirectGotoStmtClass:
132     EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
133 
134   case Stmt::IfStmtClass:       EmitIfStmt(cast<IfStmt>(*S));             break;
135   case Stmt::WhileStmtClass:    EmitWhileStmt(cast<WhileStmt>(*S));       break;
136   case Stmt::DoStmtClass:       EmitDoStmt(cast<DoStmt>(*S));             break;
137   case Stmt::ForStmtClass:      EmitForStmt(cast<ForStmt>(*S));           break;
138 
139   case Stmt::ReturnStmtClass:   EmitReturnStmt(cast<ReturnStmt>(*S));     break;
140 
141   case Stmt::SwitchStmtClass:   EmitSwitchStmt(cast<SwitchStmt>(*S));     break;
142   case Stmt::GCCAsmStmtClass:   // Intentional fall-through.
143   case Stmt::MSAsmStmtClass:    EmitAsmStmt(cast<AsmStmt>(*S));           break;
144   case Stmt::CoroutineBodyStmtClass:
145     EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
146     break;
147   case Stmt::CoreturnStmtClass:
148     CGM.ErrorUnsupported(S, "coroutine");
149     break;
150   case Stmt::CapturedStmtClass: {
151     const CapturedStmt *CS = cast<CapturedStmt>(S);
152     EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
153     }
154     break;
155   case Stmt::ObjCAtTryStmtClass:
156     EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
157     break;
158   case Stmt::ObjCAtCatchStmtClass:
159     llvm_unreachable(
160                     "@catch statements should be handled by EmitObjCAtTryStmt");
161   case Stmt::ObjCAtFinallyStmtClass:
162     llvm_unreachable(
163                   "@finally statements should be handled by EmitObjCAtTryStmt");
164   case Stmt::ObjCAtThrowStmtClass:
165     EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
166     break;
167   case Stmt::ObjCAtSynchronizedStmtClass:
168     EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
169     break;
170   case Stmt::ObjCForCollectionStmtClass:
171     EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
172     break;
173   case Stmt::ObjCAutoreleasePoolStmtClass:
174     EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
175     break;
176 
177   case Stmt::CXXTryStmtClass:
178     EmitCXXTryStmt(cast<CXXTryStmt>(*S));
179     break;
180   case Stmt::CXXForRangeStmtClass:
181     EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
182     break;
183   case Stmt::SEHTryStmtClass:
184     EmitSEHTryStmt(cast<SEHTryStmt>(*S));
185     break;
186   case Stmt::OMPParallelDirectiveClass:
187     EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
188     break;
189   case Stmt::OMPSimdDirectiveClass:
190     EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
191     break;
192   case Stmt::OMPForDirectiveClass:
193     EmitOMPForDirective(cast<OMPForDirective>(*S));
194     break;
195   case Stmt::OMPForSimdDirectiveClass:
196     EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
197     break;
198   case Stmt::OMPSectionsDirectiveClass:
199     EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
200     break;
201   case Stmt::OMPSectionDirectiveClass:
202     EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
203     break;
204   case Stmt::OMPSingleDirectiveClass:
205     EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
206     break;
207   case Stmt::OMPMasterDirectiveClass:
208     EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
209     break;
210   case Stmt::OMPCriticalDirectiveClass:
211     EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
212     break;
213   case Stmt::OMPParallelForDirectiveClass:
214     EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
215     break;
216   case Stmt::OMPParallelForSimdDirectiveClass:
217     EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
218     break;
219   case Stmt::OMPParallelSectionsDirectiveClass:
220     EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
221     break;
222   case Stmt::OMPTaskDirectiveClass:
223     EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
224     break;
225   case Stmt::OMPTaskyieldDirectiveClass:
226     EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
227     break;
228   case Stmt::OMPBarrierDirectiveClass:
229     EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
230     break;
231   case Stmt::OMPTaskwaitDirectiveClass:
232     EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
233     break;
234   case Stmt::OMPTaskgroupDirectiveClass:
235     EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
236     break;
237   case Stmt::OMPFlushDirectiveClass:
238     EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
239     break;
240   case Stmt::OMPOrderedDirectiveClass:
241     EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
242     break;
243   case Stmt::OMPAtomicDirectiveClass:
244     EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
245     break;
246   case Stmt::OMPTargetDirectiveClass:
247     EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
248     break;
249   case Stmt::OMPTeamsDirectiveClass:
250     EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
251     break;
252   case Stmt::OMPCancellationPointDirectiveClass:
253     EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
254     break;
255   case Stmt::OMPCancelDirectiveClass:
256     EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
257     break;
258   case Stmt::OMPTargetDataDirectiveClass:
259     EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
260     break;
261   case Stmt::OMPTargetEnterDataDirectiveClass:
262     EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
263     break;
264   case Stmt::OMPTargetExitDataDirectiveClass:
265     EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
266     break;
267   case Stmt::OMPTargetParallelDirectiveClass:
268     EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
269     break;
270   case Stmt::OMPTargetParallelForDirectiveClass:
271     EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
272     break;
273   case Stmt::OMPTaskLoopDirectiveClass:
274     EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
275     break;
276   case Stmt::OMPTaskLoopSimdDirectiveClass:
277     EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
278     break;
279   case Stmt::OMPDistributeDirectiveClass:
280     EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
281     break;
282   case Stmt::OMPTargetUpdateDirectiveClass:
283     EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
284     break;
285   case Stmt::OMPDistributeParallelForDirectiveClass:
286     EmitOMPDistributeParallelForDirective(
287         cast<OMPDistributeParallelForDirective>(*S));
288     break;
289   case Stmt::OMPDistributeParallelForSimdDirectiveClass:
290     EmitOMPDistributeParallelForSimdDirective(
291         cast<OMPDistributeParallelForSimdDirective>(*S));
292     break;
293   case Stmt::OMPDistributeSimdDirectiveClass:
294     EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
295     break;
296   case Stmt::OMPTargetParallelForSimdDirectiveClass:
297     EmitOMPTargetParallelForSimdDirective(
298         cast<OMPTargetParallelForSimdDirective>(*S));
299     break;
300   case Stmt::OMPTargetSimdDirectiveClass:
301     EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
302     break;
303   case Stmt::OMPTeamsDistributeDirectiveClass:
304     EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
305     break;
306   case Stmt::OMPTeamsDistributeSimdDirectiveClass:
307     EmitOMPTeamsDistributeSimdDirective(
308         cast<OMPTeamsDistributeSimdDirective>(*S));
309     break;
310   case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
311     EmitOMPTeamsDistributeParallelForSimdDirective(
312         cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
313     break;
314   }
315 }
316 
317 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
318   switch (S->getStmtClass()) {
319   default: return false;
320   case Stmt::NullStmtClass: break;
321   case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
322   case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
323   case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
324   case Stmt::AttributedStmtClass:
325                             EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
326   case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
327   case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
328   case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
329   case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
330   case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
331   case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
332   }
333 
334   return true;
335 }
336 
337 /// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
338 /// this captures the expression result of the last sub-statement and returns it
339 /// (for use by the statement expression extension).
340 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
341                                           AggValueSlot AggSlot) {
342   PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
343                              "LLVM IR generation of compound statement ('{}')");
344 
345   // Keep track of the current cleanup stack depth, including debug scopes.
346   LexicalScope Scope(*this, S.getSourceRange());
347 
348   return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
349 }
350 
351 Address
352 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
353                                               bool GetLast,
354                                               AggValueSlot AggSlot) {
355 
356   for (CompoundStmt::const_body_iterator I = S.body_begin(),
357        E = S.body_end()-GetLast; I != E; ++I)
358     EmitStmt(*I);
359 
360   Address RetAlloca = Address::invalid();
361   if (GetLast) {
362     // We have to special case labels here.  They are statements, but when put
363     // at the end of a statement expression, they yield the value of their
364     // subexpression.  Handle this by walking through all labels we encounter,
365     // emitting them before we evaluate the subexpr.
366     const Stmt *LastStmt = S.body_back();
367     while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
368       EmitLabel(LS->getDecl());
369       LastStmt = LS->getSubStmt();
370     }
371 
372     EnsureInsertPoint();
373 
374     QualType ExprTy = cast<Expr>(LastStmt)->getType();
375     if (hasAggregateEvaluationKind(ExprTy)) {
376       EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
377     } else {
378       // We can't return an RValue here because there might be cleanups at
379       // the end of the StmtExpr.  Because of that, we have to emit the result
380       // here into a temporary alloca.
381       RetAlloca = CreateMemTemp(ExprTy);
382       EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
383                        /*IsInit*/false);
384     }
385 
386   }
387 
388   return RetAlloca;
389 }
390 
391 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
392   llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
393 
394   // If there is a cleanup stack, then we it isn't worth trying to
395   // simplify this block (we would need to remove it from the scope map
396   // and cleanup entry).
397   if (!EHStack.empty())
398     return;
399 
400   // Can only simplify direct branches.
401   if (!BI || !BI->isUnconditional())
402     return;
403 
404   // Can only simplify empty blocks.
405   if (BI->getIterator() != BB->begin())
406     return;
407 
408   BB->replaceAllUsesWith(BI->getSuccessor(0));
409   BI->eraseFromParent();
410   BB->eraseFromParent();
411 }
412 
413 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
414   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
415 
416   // Fall out of the current block (if necessary).
417   EmitBranch(BB);
418 
419   if (IsFinished && BB->use_empty()) {
420     delete BB;
421     return;
422   }
423 
424   // Place the block after the current block, if possible, or else at
425   // the end of the function.
426   if (CurBB && CurBB->getParent())
427     CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
428   else
429     CurFn->getBasicBlockList().push_back(BB);
430   Builder.SetInsertPoint(BB);
431 }
432 
433 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
434   // Emit a branch from the current block to the target one if this
435   // was a real block.  If this was just a fall-through block after a
436   // terminator, don't emit it.
437   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
438 
439   if (!CurBB || CurBB->getTerminator()) {
440     // If there is no insert point or the previous block is already
441     // terminated, don't touch it.
442   } else {
443     // Otherwise, create a fall-through branch.
444     Builder.CreateBr(Target);
445   }
446 
447   Builder.ClearInsertionPoint();
448 }
449 
450 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
451   bool inserted = false;
452   for (llvm::User *u : block->users()) {
453     if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
454       CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
455                                              block);
456       inserted = true;
457       break;
458     }
459   }
460 
461   if (!inserted)
462     CurFn->getBasicBlockList().push_back(block);
463 
464   Builder.SetInsertPoint(block);
465 }
466 
467 CodeGenFunction::JumpDest
468 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
469   JumpDest &Dest = LabelMap[D];
470   if (Dest.isValid()) return Dest;
471 
472   // Create, but don't insert, the new block.
473   Dest = JumpDest(createBasicBlock(D->getName()),
474                   EHScopeStack::stable_iterator::invalid(),
475                   NextCleanupDestIndex++);
476   return Dest;
477 }
478 
479 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
480   // Add this label to the current lexical scope if we're within any
481   // normal cleanups.  Jumps "in" to this label --- when permitted by
482   // the language --- may need to be routed around such cleanups.
483   if (EHStack.hasNormalCleanups() && CurLexicalScope)
484     CurLexicalScope->addLabel(D);
485 
486   JumpDest &Dest = LabelMap[D];
487 
488   // If we didn't need a forward reference to this label, just go
489   // ahead and create a destination at the current scope.
490   if (!Dest.isValid()) {
491     Dest = getJumpDestInCurrentScope(D->getName());
492 
493   // Otherwise, we need to give this label a target depth and remove
494   // it from the branch-fixups list.
495   } else {
496     assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
497     Dest.setScopeDepth(EHStack.stable_begin());
498     ResolveBranchFixups(Dest.getBlock());
499   }
500 
501   EmitBlock(Dest.getBlock());
502   incrementProfileCounter(D->getStmt());
503 }
504 
505 /// Change the cleanup scope of the labels in this lexical scope to
506 /// match the scope of the enclosing context.
507 void CodeGenFunction::LexicalScope::rescopeLabels() {
508   assert(!Labels.empty());
509   EHScopeStack::stable_iterator innermostScope
510     = CGF.EHStack.getInnermostNormalCleanup();
511 
512   // Change the scope depth of all the labels.
513   for (SmallVectorImpl<const LabelDecl*>::const_iterator
514          i = Labels.begin(), e = Labels.end(); i != e; ++i) {
515     assert(CGF.LabelMap.count(*i));
516     JumpDest &dest = CGF.LabelMap.find(*i)->second;
517     assert(dest.getScopeDepth().isValid());
518     assert(innermostScope.encloses(dest.getScopeDepth()));
519     dest.setScopeDepth(innermostScope);
520   }
521 
522   // Reparent the labels if the new scope also has cleanups.
523   if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
524     ParentScope->Labels.append(Labels.begin(), Labels.end());
525   }
526 }
527 
528 
529 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
530   EmitLabel(S.getDecl());
531   EmitStmt(S.getSubStmt());
532 }
533 
534 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
535   const Stmt *SubStmt = S.getSubStmt();
536   switch (SubStmt->getStmtClass()) {
537   case Stmt::DoStmtClass:
538     EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs());
539     break;
540   case Stmt::ForStmtClass:
541     EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs());
542     break;
543   case Stmt::WhileStmtClass:
544     EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs());
545     break;
546   case Stmt::CXXForRangeStmtClass:
547     EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs());
548     break;
549   default:
550     EmitStmt(SubStmt);
551   }
552 }
553 
554 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
555   // If this code is reachable then emit a stop point (if generating
556   // debug info). We have to do this ourselves because we are on the
557   // "simple" statement path.
558   if (HaveInsertPoint())
559     EmitStopPoint(&S);
560 
561   EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
562 }
563 
564 
565 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
566   if (const LabelDecl *Target = S.getConstantTarget()) {
567     EmitBranchThroughCleanup(getJumpDestForLabel(Target));
568     return;
569   }
570 
571   // Ensure that we have an i8* for our PHI node.
572   llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
573                                          Int8PtrTy, "addr");
574   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
575 
576   // Get the basic block for the indirect goto.
577   llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
578 
579   // The first instruction in the block has to be the PHI for the switch dest,
580   // add an entry for this branch.
581   cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
582 
583   EmitBranch(IndGotoBB);
584 }
585 
586 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
587   // C99 6.8.4.1: The first substatement is executed if the expression compares
588   // unequal to 0.  The condition must be a scalar type.
589   LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
590 
591   if (S.getInit())
592     EmitStmt(S.getInit());
593 
594   if (S.getConditionVariable())
595     EmitAutoVarDecl(*S.getConditionVariable());
596 
597   // If the condition constant folds and can be elided, try to avoid emitting
598   // the condition and the dead arm of the if/else.
599   bool CondConstant;
600   if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
601                                    S.isConstexpr())) {
602     // Figure out which block (then or else) is executed.
603     const Stmt *Executed = S.getThen();
604     const Stmt *Skipped  = S.getElse();
605     if (!CondConstant)  // Condition false?
606       std::swap(Executed, Skipped);
607 
608     // If the skipped block has no labels in it, just emit the executed block.
609     // This avoids emitting dead code and simplifies the CFG substantially.
610     if (S.isConstexpr() || !ContainsLabel(Skipped)) {
611       if (CondConstant)
612         incrementProfileCounter(&S);
613       if (Executed) {
614         RunCleanupsScope ExecutedScope(*this);
615         EmitStmt(Executed);
616       }
617       return;
618     }
619   }
620 
621   // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
622   // the conditional branch.
623   llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
624   llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
625   llvm::BasicBlock *ElseBlock = ContBlock;
626   if (S.getElse())
627     ElseBlock = createBasicBlock("if.else");
628 
629   EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
630                        getProfileCount(S.getThen()));
631 
632   // Emit the 'then' code.
633   EmitBlock(ThenBlock);
634   incrementProfileCounter(&S);
635   {
636     RunCleanupsScope ThenScope(*this);
637     EmitStmt(S.getThen());
638   }
639   EmitBranch(ContBlock);
640 
641   // Emit the 'else' code if present.
642   if (const Stmt *Else = S.getElse()) {
643     {
644       // There is no need to emit line number for an unconditional branch.
645       auto NL = ApplyDebugLocation::CreateEmpty(*this);
646       EmitBlock(ElseBlock);
647     }
648     {
649       RunCleanupsScope ElseScope(*this);
650       EmitStmt(Else);
651     }
652     {
653       // There is no need to emit line number for an unconditional branch.
654       auto NL = ApplyDebugLocation::CreateEmpty(*this);
655       EmitBranch(ContBlock);
656     }
657   }
658 
659   // Emit the continuation block for code after the if.
660   EmitBlock(ContBlock, true);
661 }
662 
663 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
664                                     ArrayRef<const Attr *> WhileAttrs) {
665   // Emit the header for the loop, which will also become
666   // the continue target.
667   JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
668   EmitBlock(LoopHeader.getBlock());
669 
670   const SourceRange &R = S.getSourceRange();
671   LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
672                  SourceLocToDebugLoc(R.getBegin()),
673                  SourceLocToDebugLoc(R.getEnd()));
674 
675   // Create an exit block for when the condition fails, which will
676   // also become the break target.
677   JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
678 
679   // Store the blocks to use for break and continue.
680   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
681 
682   // C++ [stmt.while]p2:
683   //   When the condition of a while statement is a declaration, the
684   //   scope of the variable that is declared extends from its point
685   //   of declaration (3.3.2) to the end of the while statement.
686   //   [...]
687   //   The object created in a condition is destroyed and created
688   //   with each iteration of the loop.
689   RunCleanupsScope ConditionScope(*this);
690 
691   if (S.getConditionVariable())
692     EmitAutoVarDecl(*S.getConditionVariable());
693 
694   // Evaluate the conditional in the while header.  C99 6.8.5.1: The
695   // evaluation of the controlling expression takes place before each
696   // execution of the loop body.
697   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
698 
699   // while(1) is common, avoid extra exit blocks.  Be sure
700   // to correctly handle break/continue though.
701   bool EmitBoolCondBranch = true;
702   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
703     if (C->isOne())
704       EmitBoolCondBranch = false;
705 
706   // As long as the condition is true, go to the loop body.
707   llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
708   if (EmitBoolCondBranch) {
709     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
710     if (ConditionScope.requiresCleanups())
711       ExitBlock = createBasicBlock("while.exit");
712     Builder.CreateCondBr(
713         BoolCondVal, LoopBody, ExitBlock,
714         createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
715 
716     if (ExitBlock != LoopExit.getBlock()) {
717       EmitBlock(ExitBlock);
718       EmitBranchThroughCleanup(LoopExit);
719     }
720   }
721 
722   // Emit the loop body.  We have to emit this in a cleanup scope
723   // because it might be a singleton DeclStmt.
724   {
725     RunCleanupsScope BodyScope(*this);
726     EmitBlock(LoopBody);
727     incrementProfileCounter(&S);
728     EmitStmt(S.getBody());
729   }
730 
731   BreakContinueStack.pop_back();
732 
733   // Immediately force cleanup.
734   ConditionScope.ForceCleanup();
735 
736   EmitStopPoint(&S);
737   // Branch to the loop header again.
738   EmitBranch(LoopHeader.getBlock());
739 
740   LoopStack.pop();
741 
742   // Emit the exit block.
743   EmitBlock(LoopExit.getBlock(), true);
744 
745   // The LoopHeader typically is just a branch if we skipped emitting
746   // a branch, try to erase it.
747   if (!EmitBoolCondBranch)
748     SimplifyForwardingBlocks(LoopHeader.getBlock());
749 }
750 
751 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
752                                  ArrayRef<const Attr *> DoAttrs) {
753   JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
754   JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
755 
756   uint64_t ParentCount = getCurrentProfileCount();
757 
758   // Store the blocks to use for break and continue.
759   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
760 
761   // Emit the body of the loop.
762   llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
763 
764   const SourceRange &R = S.getSourceRange();
765   LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
766                  SourceLocToDebugLoc(R.getBegin()),
767                  SourceLocToDebugLoc(R.getEnd()));
768 
769   EmitBlockWithFallThrough(LoopBody, &S);
770   {
771     RunCleanupsScope BodyScope(*this);
772     EmitStmt(S.getBody());
773   }
774 
775   EmitBlock(LoopCond.getBlock());
776 
777   // C99 6.8.5.2: "The evaluation of the controlling expression takes place
778   // after each execution of the loop body."
779 
780   // Evaluate the conditional in the while header.
781   // C99 6.8.5p2/p4: The first substatement is executed if the expression
782   // compares unequal to 0.  The condition must be a scalar type.
783   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
784 
785   BreakContinueStack.pop_back();
786 
787   // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
788   // to correctly handle break/continue though.
789   bool EmitBoolCondBranch = true;
790   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
791     if (C->isZero())
792       EmitBoolCondBranch = false;
793 
794   // As long as the condition is true, iterate the loop.
795   if (EmitBoolCondBranch) {
796     uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
797     Builder.CreateCondBr(
798         BoolCondVal, LoopBody, LoopExit.getBlock(),
799         createProfileWeightsForLoop(S.getCond(), BackedgeCount));
800   }
801 
802   LoopStack.pop();
803 
804   // Emit the exit block.
805   EmitBlock(LoopExit.getBlock());
806 
807   // The DoCond block typically is just a branch if we skipped
808   // emitting a branch, try to erase it.
809   if (!EmitBoolCondBranch)
810     SimplifyForwardingBlocks(LoopCond.getBlock());
811 }
812 
813 void CodeGenFunction::EmitForStmt(const ForStmt &S,
814                                   ArrayRef<const Attr *> ForAttrs) {
815   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
816 
817   LexicalScope ForScope(*this, S.getSourceRange());
818 
819   // Evaluate the first part before the loop.
820   if (S.getInit())
821     EmitStmt(S.getInit());
822 
823   // Start the loop with a block that tests the condition.
824   // If there's an increment, the continue scope will be overwritten
825   // later.
826   JumpDest Continue = getJumpDestInCurrentScope("for.cond");
827   llvm::BasicBlock *CondBlock = Continue.getBlock();
828   EmitBlock(CondBlock);
829 
830   const SourceRange &R = S.getSourceRange();
831   LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
832                  SourceLocToDebugLoc(R.getBegin()),
833                  SourceLocToDebugLoc(R.getEnd()));
834 
835   // If the for loop doesn't have an increment we can just use the
836   // condition as the continue block.  Otherwise we'll need to create
837   // a block for it (in the current scope, i.e. in the scope of the
838   // condition), and that we will become our continue block.
839   if (S.getInc())
840     Continue = getJumpDestInCurrentScope("for.inc");
841 
842   // Store the blocks to use for break and continue.
843   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
844 
845   // Create a cleanup scope for the condition variable cleanups.
846   LexicalScope ConditionScope(*this, S.getSourceRange());
847 
848   if (S.getCond()) {
849     // If the for statement has a condition scope, emit the local variable
850     // declaration.
851     if (S.getConditionVariable()) {
852       EmitAutoVarDecl(*S.getConditionVariable());
853     }
854 
855     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
856     // If there are any cleanups between here and the loop-exit scope,
857     // create a block to stage a loop exit along.
858     if (ForScope.requiresCleanups())
859       ExitBlock = createBasicBlock("for.cond.cleanup");
860 
861     // As long as the condition is true, iterate the loop.
862     llvm::BasicBlock *ForBody = createBasicBlock("for.body");
863 
864     // C99 6.8.5p2/p4: The first substatement is executed if the expression
865     // compares unequal to 0.  The condition must be a scalar type.
866     llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
867     Builder.CreateCondBr(
868         BoolCondVal, ForBody, ExitBlock,
869         createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
870 
871     if (ExitBlock != LoopExit.getBlock()) {
872       EmitBlock(ExitBlock);
873       EmitBranchThroughCleanup(LoopExit);
874     }
875 
876     EmitBlock(ForBody);
877   } else {
878     // Treat it as a non-zero constant.  Don't even create a new block for the
879     // body, just fall into it.
880   }
881   incrementProfileCounter(&S);
882 
883   {
884     // Create a separate cleanup scope for the body, in case it is not
885     // a compound statement.
886     RunCleanupsScope BodyScope(*this);
887     EmitStmt(S.getBody());
888   }
889 
890   // If there is an increment, emit it next.
891   if (S.getInc()) {
892     EmitBlock(Continue.getBlock());
893     EmitStmt(S.getInc());
894   }
895 
896   BreakContinueStack.pop_back();
897 
898   ConditionScope.ForceCleanup();
899 
900   EmitStopPoint(&S);
901   EmitBranch(CondBlock);
902 
903   ForScope.ForceCleanup();
904 
905   LoopStack.pop();
906 
907   // Emit the fall-through block.
908   EmitBlock(LoopExit.getBlock(), true);
909 }
910 
911 void
912 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
913                                      ArrayRef<const Attr *> ForAttrs) {
914   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
915 
916   LexicalScope ForScope(*this, S.getSourceRange());
917 
918   // Evaluate the first pieces before the loop.
919   EmitStmt(S.getRangeStmt());
920   EmitStmt(S.getBeginStmt());
921   EmitStmt(S.getEndStmt());
922 
923   // Start the loop with a block that tests the condition.
924   // If there's an increment, the continue scope will be overwritten
925   // later.
926   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
927   EmitBlock(CondBlock);
928 
929   const SourceRange &R = S.getSourceRange();
930   LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
931                  SourceLocToDebugLoc(R.getBegin()),
932                  SourceLocToDebugLoc(R.getEnd()));
933 
934   // If there are any cleanups between here and the loop-exit scope,
935   // create a block to stage a loop exit along.
936   llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
937   if (ForScope.requiresCleanups())
938     ExitBlock = createBasicBlock("for.cond.cleanup");
939 
940   // The loop body, consisting of the specified body and the loop variable.
941   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
942 
943   // The body is executed if the expression, contextually converted
944   // to bool, is true.
945   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
946   Builder.CreateCondBr(
947       BoolCondVal, ForBody, ExitBlock,
948       createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
949 
950   if (ExitBlock != LoopExit.getBlock()) {
951     EmitBlock(ExitBlock);
952     EmitBranchThroughCleanup(LoopExit);
953   }
954 
955   EmitBlock(ForBody);
956   incrementProfileCounter(&S);
957 
958   // Create a block for the increment. In case of a 'continue', we jump there.
959   JumpDest Continue = getJumpDestInCurrentScope("for.inc");
960 
961   // Store the blocks to use for break and continue.
962   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
963 
964   {
965     // Create a separate cleanup scope for the loop variable and body.
966     LexicalScope BodyScope(*this, S.getSourceRange());
967     EmitStmt(S.getLoopVarStmt());
968     EmitStmt(S.getBody());
969   }
970 
971   EmitStopPoint(&S);
972   // If there is an increment, emit it next.
973   EmitBlock(Continue.getBlock());
974   EmitStmt(S.getInc());
975 
976   BreakContinueStack.pop_back();
977 
978   EmitBranch(CondBlock);
979 
980   ForScope.ForceCleanup();
981 
982   LoopStack.pop();
983 
984   // Emit the fall-through block.
985   EmitBlock(LoopExit.getBlock(), true);
986 }
987 
988 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
989   if (RV.isScalar()) {
990     Builder.CreateStore(RV.getScalarVal(), ReturnValue);
991   } else if (RV.isAggregate()) {
992     EmitAggregateCopy(ReturnValue, RV.getAggregateAddress(), Ty);
993   } else {
994     EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
995                        /*init*/ true);
996   }
997   EmitBranchThroughCleanup(ReturnBlock);
998 }
999 
1000 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1001 /// if the function returns void, or may be missing one if the function returns
1002 /// non-void.  Fun stuff :).
1003 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1004   // Returning from an outlined SEH helper is UB, and we already warn on it.
1005   if (IsOutlinedSEHHelper) {
1006     Builder.CreateUnreachable();
1007     Builder.ClearInsertionPoint();
1008   }
1009 
1010   // Emit the result value, even if unused, to evalute the side effects.
1011   const Expr *RV = S.getRetValue();
1012 
1013   // Treat block literals in a return expression as if they appeared
1014   // in their own scope.  This permits a small, easily-implemented
1015   // exception to our over-conservative rules about not jumping to
1016   // statements following block literals with non-trivial cleanups.
1017   RunCleanupsScope cleanupScope(*this);
1018   if (const ExprWithCleanups *cleanups =
1019         dyn_cast_or_null<ExprWithCleanups>(RV)) {
1020     enterFullExpression(cleanups);
1021     RV = cleanups->getSubExpr();
1022   }
1023 
1024   // FIXME: Clean this up by using an LValue for ReturnTemp,
1025   // EmitStoreThroughLValue, and EmitAnyExpr.
1026   if (getLangOpts().ElideConstructors &&
1027       S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) {
1028     // Apply the named return value optimization for this return statement,
1029     // which means doing nothing: the appropriate result has already been
1030     // constructed into the NRVO variable.
1031 
1032     // If there is an NRVO flag for this variable, set it to 1 into indicate
1033     // that the cleanup code should not destroy the variable.
1034     if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1035       Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1036   } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1037     // Make sure not to return anything, but evaluate the expression
1038     // for side effects.
1039     if (RV)
1040       EmitAnyExpr(RV);
1041   } else if (!RV) {
1042     // Do nothing (return value is left uninitialized)
1043   } else if (FnRetTy->isReferenceType()) {
1044     // If this function returns a reference, take the address of the expression
1045     // rather than the value.
1046     RValue Result = EmitReferenceBindingToExpr(RV);
1047     Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1048   } else {
1049     switch (getEvaluationKind(RV->getType())) {
1050     case TEK_Scalar:
1051       Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1052       break;
1053     case TEK_Complex:
1054       EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1055                                 /*isInit*/ true);
1056       break;
1057     case TEK_Aggregate:
1058       EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue,
1059                                             Qualifiers(),
1060                                             AggValueSlot::IsDestructed,
1061                                             AggValueSlot::DoesNotNeedGCBarriers,
1062                                             AggValueSlot::IsNotAliased));
1063       break;
1064     }
1065   }
1066 
1067   ++NumReturnExprs;
1068   if (!RV || RV->isEvaluatable(getContext()))
1069     ++NumSimpleReturnExprs;
1070 
1071   cleanupScope.ForceCleanup();
1072   EmitBranchThroughCleanup(ReturnBlock);
1073 }
1074 
1075 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1076   // As long as debug info is modeled with instructions, we have to ensure we
1077   // have a place to insert here and write the stop point here.
1078   if (HaveInsertPoint())
1079     EmitStopPoint(&S);
1080 
1081   for (const auto *I : S.decls())
1082     EmitDecl(*I);
1083 }
1084 
1085 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1086   assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1087 
1088   // If this code is reachable then emit a stop point (if generating
1089   // debug info). We have to do this ourselves because we are on the
1090   // "simple" statement path.
1091   if (HaveInsertPoint())
1092     EmitStopPoint(&S);
1093 
1094   EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1095 }
1096 
1097 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1098   assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1099 
1100   // If this code is reachable then emit a stop point (if generating
1101   // debug info). We have to do this ourselves because we are on the
1102   // "simple" statement path.
1103   if (HaveInsertPoint())
1104     EmitStopPoint(&S);
1105 
1106   EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1107 }
1108 
1109 /// EmitCaseStmtRange - If case statement range is not too big then
1110 /// add multiple cases to switch instruction, one for each value within
1111 /// the range. If range is too big then emit "if" condition check.
1112 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1113   assert(S.getRHS() && "Expected RHS value in CaseStmt");
1114 
1115   llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1116   llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1117 
1118   // Emit the code for this case. We do this first to make sure it is
1119   // properly chained from our predecessor before generating the
1120   // switch machinery to enter this block.
1121   llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1122   EmitBlockWithFallThrough(CaseDest, &S);
1123   EmitStmt(S.getSubStmt());
1124 
1125   // If range is empty, do nothing.
1126   if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1127     return;
1128 
1129   llvm::APInt Range = RHS - LHS;
1130   // FIXME: parameters such as this should not be hardcoded.
1131   if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1132     // Range is small enough to add multiple switch instruction cases.
1133     uint64_t Total = getProfileCount(&S);
1134     unsigned NCases = Range.getZExtValue() + 1;
1135     // We only have one region counter for the entire set of cases here, so we
1136     // need to divide the weights evenly between the generated cases, ensuring
1137     // that the total weight is preserved. E.g., a weight of 5 over three cases
1138     // will be distributed as weights of 2, 2, and 1.
1139     uint64_t Weight = Total / NCases, Rem = Total % NCases;
1140     for (unsigned I = 0; I != NCases; ++I) {
1141       if (SwitchWeights)
1142         SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1143       if (Rem)
1144         Rem--;
1145       SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1146       LHS++;
1147     }
1148     return;
1149   }
1150 
1151   // The range is too big. Emit "if" condition into a new block,
1152   // making sure to save and restore the current insertion point.
1153   llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1154 
1155   // Push this test onto the chain of range checks (which terminates
1156   // in the default basic block). The switch's default will be changed
1157   // to the top of this chain after switch emission is complete.
1158   llvm::BasicBlock *FalseDest = CaseRangeBlock;
1159   CaseRangeBlock = createBasicBlock("sw.caserange");
1160 
1161   CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1162   Builder.SetInsertPoint(CaseRangeBlock);
1163 
1164   // Emit range check.
1165   llvm::Value *Diff =
1166     Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1167   llvm::Value *Cond =
1168     Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1169 
1170   llvm::MDNode *Weights = nullptr;
1171   if (SwitchWeights) {
1172     uint64_t ThisCount = getProfileCount(&S);
1173     uint64_t DefaultCount = (*SwitchWeights)[0];
1174     Weights = createProfileWeights(ThisCount, DefaultCount);
1175 
1176     // Since we're chaining the switch default through each large case range, we
1177     // need to update the weight for the default, ie, the first case, to include
1178     // this case.
1179     (*SwitchWeights)[0] += ThisCount;
1180   }
1181   Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1182 
1183   // Restore the appropriate insertion point.
1184   if (RestoreBB)
1185     Builder.SetInsertPoint(RestoreBB);
1186   else
1187     Builder.ClearInsertionPoint();
1188 }
1189 
1190 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1191   // If there is no enclosing switch instance that we're aware of, then this
1192   // case statement and its block can be elided.  This situation only happens
1193   // when we've constant-folded the switch, are emitting the constant case,
1194   // and part of the constant case includes another case statement.  For
1195   // instance: switch (4) { case 4: do { case 5: } while (1); }
1196   if (!SwitchInsn) {
1197     EmitStmt(S.getSubStmt());
1198     return;
1199   }
1200 
1201   // Handle case ranges.
1202   if (S.getRHS()) {
1203     EmitCaseStmtRange(S);
1204     return;
1205   }
1206 
1207   llvm::ConstantInt *CaseVal =
1208     Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1209 
1210   // If the body of the case is just a 'break', try to not emit an empty block.
1211   // If we're profiling or we're not optimizing, leave the block in for better
1212   // debug and coverage analysis.
1213   if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1214       CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1215       isa<BreakStmt>(S.getSubStmt())) {
1216     JumpDest Block = BreakContinueStack.back().BreakBlock;
1217 
1218     // Only do this optimization if there are no cleanups that need emitting.
1219     if (isObviouslyBranchWithoutCleanups(Block)) {
1220       if (SwitchWeights)
1221         SwitchWeights->push_back(getProfileCount(&S));
1222       SwitchInsn->addCase(CaseVal, Block.getBlock());
1223 
1224       // If there was a fallthrough into this case, make sure to redirect it to
1225       // the end of the switch as well.
1226       if (Builder.GetInsertBlock()) {
1227         Builder.CreateBr(Block.getBlock());
1228         Builder.ClearInsertionPoint();
1229       }
1230       return;
1231     }
1232   }
1233 
1234   llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1235   EmitBlockWithFallThrough(CaseDest, &S);
1236   if (SwitchWeights)
1237     SwitchWeights->push_back(getProfileCount(&S));
1238   SwitchInsn->addCase(CaseVal, CaseDest);
1239 
1240   // Recursively emitting the statement is acceptable, but is not wonderful for
1241   // code where we have many case statements nested together, i.e.:
1242   //  case 1:
1243   //    case 2:
1244   //      case 3: etc.
1245   // Handling this recursively will create a new block for each case statement
1246   // that falls through to the next case which is IR intensive.  It also causes
1247   // deep recursion which can run into stack depth limitations.  Handle
1248   // sequential non-range case statements specially.
1249   const CaseStmt *CurCase = &S;
1250   const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1251 
1252   // Otherwise, iteratively add consecutive cases to this switch stmt.
1253   while (NextCase && NextCase->getRHS() == nullptr) {
1254     CurCase = NextCase;
1255     llvm::ConstantInt *CaseVal =
1256       Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1257 
1258     if (SwitchWeights)
1259       SwitchWeights->push_back(getProfileCount(NextCase));
1260     if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1261       CaseDest = createBasicBlock("sw.bb");
1262       EmitBlockWithFallThrough(CaseDest, &S);
1263     }
1264 
1265     SwitchInsn->addCase(CaseVal, CaseDest);
1266     NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1267   }
1268 
1269   // Normal default recursion for non-cases.
1270   EmitStmt(CurCase->getSubStmt());
1271 }
1272 
1273 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1274   // If there is no enclosing switch instance that we're aware of, then this
1275   // default statement can be elided. This situation only happens when we've
1276   // constant-folded the switch.
1277   if (!SwitchInsn) {
1278     EmitStmt(S.getSubStmt());
1279     return;
1280   }
1281 
1282   llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1283   assert(DefaultBlock->empty() &&
1284          "EmitDefaultStmt: Default block already defined?");
1285 
1286   EmitBlockWithFallThrough(DefaultBlock, &S);
1287 
1288   EmitStmt(S.getSubStmt());
1289 }
1290 
1291 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1292 /// constant value that is being switched on, see if we can dead code eliminate
1293 /// the body of the switch to a simple series of statements to emit.  Basically,
1294 /// on a switch (5) we want to find these statements:
1295 ///    case 5:
1296 ///      printf(...);    <--
1297 ///      ++i;            <--
1298 ///      break;
1299 ///
1300 /// and add them to the ResultStmts vector.  If it is unsafe to do this
1301 /// transformation (for example, one of the elided statements contains a label
1302 /// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1303 /// should include statements after it (e.g. the printf() line is a substmt of
1304 /// the case) then return CSFC_FallThrough.  If we handled it and found a break
1305 /// statement, then return CSFC_Success.
1306 ///
1307 /// If Case is non-null, then we are looking for the specified case, checking
1308 /// that nothing we jump over contains labels.  If Case is null, then we found
1309 /// the case and are looking for the break.
1310 ///
1311 /// If the recursive walk actually finds our Case, then we set FoundCase to
1312 /// true.
1313 ///
1314 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1315 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1316                                             const SwitchCase *Case,
1317                                             bool &FoundCase,
1318                               SmallVectorImpl<const Stmt*> &ResultStmts) {
1319   // If this is a null statement, just succeed.
1320   if (!S)
1321     return Case ? CSFC_Success : CSFC_FallThrough;
1322 
1323   // If this is the switchcase (case 4: or default) that we're looking for, then
1324   // we're in business.  Just add the substatement.
1325   if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1326     if (S == Case) {
1327       FoundCase = true;
1328       return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1329                                       ResultStmts);
1330     }
1331 
1332     // Otherwise, this is some other case or default statement, just ignore it.
1333     return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1334                                     ResultStmts);
1335   }
1336 
1337   // If we are in the live part of the code and we found our break statement,
1338   // return a success!
1339   if (!Case && isa<BreakStmt>(S))
1340     return CSFC_Success;
1341 
1342   // If this is a switch statement, then it might contain the SwitchCase, the
1343   // break, or neither.
1344   if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1345     // Handle this as two cases: we might be looking for the SwitchCase (if so
1346     // the skipped statements must be skippable) or we might already have it.
1347     CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1348     bool StartedInLiveCode = FoundCase;
1349     unsigned StartSize = ResultStmts.size();
1350 
1351     // If we've not found the case yet, scan through looking for it.
1352     if (Case) {
1353       // Keep track of whether we see a skipped declaration.  The code could be
1354       // using the declaration even if it is skipped, so we can't optimize out
1355       // the decl if the kept statements might refer to it.
1356       bool HadSkippedDecl = false;
1357 
1358       // If we're looking for the case, just see if we can skip each of the
1359       // substatements.
1360       for (; Case && I != E; ++I) {
1361         HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1362 
1363         switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1364         case CSFC_Failure: return CSFC_Failure;
1365         case CSFC_Success:
1366           // A successful result means that either 1) that the statement doesn't
1367           // have the case and is skippable, or 2) does contain the case value
1368           // and also contains the break to exit the switch.  In the later case,
1369           // we just verify the rest of the statements are elidable.
1370           if (FoundCase) {
1371             // If we found the case and skipped declarations, we can't do the
1372             // optimization.
1373             if (HadSkippedDecl)
1374               return CSFC_Failure;
1375 
1376             for (++I; I != E; ++I)
1377               if (CodeGenFunction::ContainsLabel(*I, true))
1378                 return CSFC_Failure;
1379             return CSFC_Success;
1380           }
1381           break;
1382         case CSFC_FallThrough:
1383           // If we have a fallthrough condition, then we must have found the
1384           // case started to include statements.  Consider the rest of the
1385           // statements in the compound statement as candidates for inclusion.
1386           assert(FoundCase && "Didn't find case but returned fallthrough?");
1387           // We recursively found Case, so we're not looking for it anymore.
1388           Case = nullptr;
1389 
1390           // If we found the case and skipped declarations, we can't do the
1391           // optimization.
1392           if (HadSkippedDecl)
1393             return CSFC_Failure;
1394           break;
1395         }
1396       }
1397 
1398       if (!FoundCase)
1399         return CSFC_Success;
1400 
1401       assert(!HadSkippedDecl && "fallthrough after skipping decl");
1402     }
1403 
1404     // If we have statements in our range, then we know that the statements are
1405     // live and need to be added to the set of statements we're tracking.
1406     bool AnyDecls = false;
1407     for (; I != E; ++I) {
1408       AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1409 
1410       switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1411       case CSFC_Failure: return CSFC_Failure;
1412       case CSFC_FallThrough:
1413         // A fallthrough result means that the statement was simple and just
1414         // included in ResultStmt, keep adding them afterwards.
1415         break;
1416       case CSFC_Success:
1417         // A successful result means that we found the break statement and
1418         // stopped statement inclusion.  We just ensure that any leftover stmts
1419         // are skippable and return success ourselves.
1420         for (++I; I != E; ++I)
1421           if (CodeGenFunction::ContainsLabel(*I, true))
1422             return CSFC_Failure;
1423         return CSFC_Success;
1424       }
1425     }
1426 
1427     // If we're about to fall out of a scope without hitting a 'break;', we
1428     // can't perform the optimization if there were any decls in that scope
1429     // (we'd lose their end-of-lifetime).
1430     if (AnyDecls) {
1431       // If the entire compound statement was live, there's one more thing we
1432       // can try before giving up: emit the whole thing as a single statement.
1433       // We can do that unless the statement contains a 'break;'.
1434       // FIXME: Such a break must be at the end of a construct within this one.
1435       // We could emit this by just ignoring the BreakStmts entirely.
1436       if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1437         ResultStmts.resize(StartSize);
1438         ResultStmts.push_back(S);
1439       } else {
1440         return CSFC_Failure;
1441       }
1442     }
1443 
1444     return CSFC_FallThrough;
1445   }
1446 
1447   // Okay, this is some other statement that we don't handle explicitly, like a
1448   // for statement or increment etc.  If we are skipping over this statement,
1449   // just verify it doesn't have labels, which would make it invalid to elide.
1450   if (Case) {
1451     if (CodeGenFunction::ContainsLabel(S, true))
1452       return CSFC_Failure;
1453     return CSFC_Success;
1454   }
1455 
1456   // Otherwise, we want to include this statement.  Everything is cool with that
1457   // so long as it doesn't contain a break out of the switch we're in.
1458   if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1459 
1460   // Otherwise, everything is great.  Include the statement and tell the caller
1461   // that we fall through and include the next statement as well.
1462   ResultStmts.push_back(S);
1463   return CSFC_FallThrough;
1464 }
1465 
1466 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1467 /// then invoke CollectStatementsForCase to find the list of statements to emit
1468 /// for a switch on constant.  See the comment above CollectStatementsForCase
1469 /// for more details.
1470 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1471                                        const llvm::APSInt &ConstantCondValue,
1472                                 SmallVectorImpl<const Stmt*> &ResultStmts,
1473                                        ASTContext &C,
1474                                        const SwitchCase *&ResultCase) {
1475   // First step, find the switch case that is being branched to.  We can do this
1476   // efficiently by scanning the SwitchCase list.
1477   const SwitchCase *Case = S.getSwitchCaseList();
1478   const DefaultStmt *DefaultCase = nullptr;
1479 
1480   for (; Case; Case = Case->getNextSwitchCase()) {
1481     // It's either a default or case.  Just remember the default statement in
1482     // case we're not jumping to any numbered cases.
1483     if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1484       DefaultCase = DS;
1485       continue;
1486     }
1487 
1488     // Check to see if this case is the one we're looking for.
1489     const CaseStmt *CS = cast<CaseStmt>(Case);
1490     // Don't handle case ranges yet.
1491     if (CS->getRHS()) return false;
1492 
1493     // If we found our case, remember it as 'case'.
1494     if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1495       break;
1496   }
1497 
1498   // If we didn't find a matching case, we use a default if it exists, or we
1499   // elide the whole switch body!
1500   if (!Case) {
1501     // It is safe to elide the body of the switch if it doesn't contain labels
1502     // etc.  If it is safe, return successfully with an empty ResultStmts list.
1503     if (!DefaultCase)
1504       return !CodeGenFunction::ContainsLabel(&S);
1505     Case = DefaultCase;
1506   }
1507 
1508   // Ok, we know which case is being jumped to, try to collect all the
1509   // statements that follow it.  This can fail for a variety of reasons.  Also,
1510   // check to see that the recursive walk actually found our case statement.
1511   // Insane cases like this can fail to find it in the recursive walk since we
1512   // don't handle every stmt kind:
1513   // switch (4) {
1514   //   while (1) {
1515   //     case 4: ...
1516   bool FoundCase = false;
1517   ResultCase = Case;
1518   return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1519                                   ResultStmts) != CSFC_Failure &&
1520          FoundCase;
1521 }
1522 
1523 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1524   // Handle nested switch statements.
1525   llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1526   SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1527   llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1528 
1529   // See if we can constant fold the condition of the switch and therefore only
1530   // emit the live case statement (if any) of the switch.
1531   llvm::APSInt ConstantCondValue;
1532   if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1533     SmallVector<const Stmt*, 4> CaseStmts;
1534     const SwitchCase *Case = nullptr;
1535     if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1536                                    getContext(), Case)) {
1537       if (Case)
1538         incrementProfileCounter(Case);
1539       RunCleanupsScope ExecutedScope(*this);
1540 
1541       if (S.getInit())
1542         EmitStmt(S.getInit());
1543 
1544       // Emit the condition variable if needed inside the entire cleanup scope
1545       // used by this special case for constant folded switches.
1546       if (S.getConditionVariable())
1547         EmitAutoVarDecl(*S.getConditionVariable());
1548 
1549       // At this point, we are no longer "within" a switch instance, so
1550       // we can temporarily enforce this to ensure that any embedded case
1551       // statements are not emitted.
1552       SwitchInsn = nullptr;
1553 
1554       // Okay, we can dead code eliminate everything except this case.  Emit the
1555       // specified series of statements and we're good.
1556       for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1557         EmitStmt(CaseStmts[i]);
1558       incrementProfileCounter(&S);
1559 
1560       // Now we want to restore the saved switch instance so that nested
1561       // switches continue to function properly
1562       SwitchInsn = SavedSwitchInsn;
1563 
1564       return;
1565     }
1566   }
1567 
1568   JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1569 
1570   RunCleanupsScope ConditionScope(*this);
1571 
1572   if (S.getInit())
1573     EmitStmt(S.getInit());
1574 
1575   if (S.getConditionVariable())
1576     EmitAutoVarDecl(*S.getConditionVariable());
1577   llvm::Value *CondV = EmitScalarExpr(S.getCond());
1578 
1579   // Create basic block to hold stuff that comes after switch
1580   // statement. We also need to create a default block now so that
1581   // explicit case ranges tests can have a place to jump to on
1582   // failure.
1583   llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1584   SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1585   if (PGO.haveRegionCounts()) {
1586     // Walk the SwitchCase list to find how many there are.
1587     uint64_t DefaultCount = 0;
1588     unsigned NumCases = 0;
1589     for (const SwitchCase *Case = S.getSwitchCaseList();
1590          Case;
1591          Case = Case->getNextSwitchCase()) {
1592       if (isa<DefaultStmt>(Case))
1593         DefaultCount = getProfileCount(Case);
1594       NumCases += 1;
1595     }
1596     SwitchWeights = new SmallVector<uint64_t, 16>();
1597     SwitchWeights->reserve(NumCases);
1598     // The default needs to be first. We store the edge count, so we already
1599     // know the right weight.
1600     SwitchWeights->push_back(DefaultCount);
1601   }
1602   CaseRangeBlock = DefaultBlock;
1603 
1604   // Clear the insertion point to indicate we are in unreachable code.
1605   Builder.ClearInsertionPoint();
1606 
1607   // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1608   // then reuse last ContinueBlock.
1609   JumpDest OuterContinue;
1610   if (!BreakContinueStack.empty())
1611     OuterContinue = BreakContinueStack.back().ContinueBlock;
1612 
1613   BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1614 
1615   // Emit switch body.
1616   EmitStmt(S.getBody());
1617 
1618   BreakContinueStack.pop_back();
1619 
1620   // Update the default block in case explicit case range tests have
1621   // been chained on top.
1622   SwitchInsn->setDefaultDest(CaseRangeBlock);
1623 
1624   // If a default was never emitted:
1625   if (!DefaultBlock->getParent()) {
1626     // If we have cleanups, emit the default block so that there's a
1627     // place to jump through the cleanups from.
1628     if (ConditionScope.requiresCleanups()) {
1629       EmitBlock(DefaultBlock);
1630 
1631     // Otherwise, just forward the default block to the switch end.
1632     } else {
1633       DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1634       delete DefaultBlock;
1635     }
1636   }
1637 
1638   ConditionScope.ForceCleanup();
1639 
1640   // Emit continuation.
1641   EmitBlock(SwitchExit.getBlock(), true);
1642   incrementProfileCounter(&S);
1643 
1644   // If the switch has a condition wrapped by __builtin_unpredictable,
1645   // create metadata that specifies that the switch is unpredictable.
1646   // Don't bother if not optimizing because that metadata would not be used.
1647   auto *Call = dyn_cast<CallExpr>(S.getCond());
1648   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1649     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1650     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1651       llvm::MDBuilder MDHelper(getLLVMContext());
1652       SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1653                               MDHelper.createUnpredictable());
1654     }
1655   }
1656 
1657   if (SwitchWeights) {
1658     assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1659            "switch weights do not match switch cases");
1660     // If there's only one jump destination there's no sense weighting it.
1661     if (SwitchWeights->size() > 1)
1662       SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1663                               createProfileWeights(*SwitchWeights));
1664     delete SwitchWeights;
1665   }
1666   SwitchInsn = SavedSwitchInsn;
1667   SwitchWeights = SavedSwitchWeights;
1668   CaseRangeBlock = SavedCRBlock;
1669 }
1670 
1671 static std::string
1672 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1673                  SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1674   std::string Result;
1675 
1676   while (*Constraint) {
1677     switch (*Constraint) {
1678     default:
1679       Result += Target.convertConstraint(Constraint);
1680       break;
1681     // Ignore these
1682     case '*':
1683     case '?':
1684     case '!':
1685     case '=': // Will see this and the following in mult-alt constraints.
1686     case '+':
1687       break;
1688     case '#': // Ignore the rest of the constraint alternative.
1689       while (Constraint[1] && Constraint[1] != ',')
1690         Constraint++;
1691       break;
1692     case '&':
1693     case '%':
1694       Result += *Constraint;
1695       while (Constraint[1] && Constraint[1] == *Constraint)
1696         Constraint++;
1697       break;
1698     case ',':
1699       Result += "|";
1700       break;
1701     case 'g':
1702       Result += "imr";
1703       break;
1704     case '[': {
1705       assert(OutCons &&
1706              "Must pass output names to constraints with a symbolic name");
1707       unsigned Index;
1708       bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1709       assert(result && "Could not resolve symbolic name"); (void)result;
1710       Result += llvm::utostr(Index);
1711       break;
1712     }
1713     }
1714 
1715     Constraint++;
1716   }
1717 
1718   return Result;
1719 }
1720 
1721 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1722 /// as using a particular register add that as a constraint that will be used
1723 /// in this asm stmt.
1724 static std::string
1725 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1726                        const TargetInfo &Target, CodeGenModule &CGM,
1727                        const AsmStmt &Stmt, const bool EarlyClobber) {
1728   const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1729   if (!AsmDeclRef)
1730     return Constraint;
1731   const ValueDecl &Value = *AsmDeclRef->getDecl();
1732   const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1733   if (!Variable)
1734     return Constraint;
1735   if (Variable->getStorageClass() != SC_Register)
1736     return Constraint;
1737   AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1738   if (!Attr)
1739     return Constraint;
1740   StringRef Register = Attr->getLabel();
1741   assert(Target.isValidGCCRegisterName(Register));
1742   // We're using validateOutputConstraint here because we only care if
1743   // this is a register constraint.
1744   TargetInfo::ConstraintInfo Info(Constraint, "");
1745   if (Target.validateOutputConstraint(Info) &&
1746       !Info.allowsRegister()) {
1747     CGM.ErrorUnsupported(&Stmt, "__asm__");
1748     return Constraint;
1749   }
1750   // Canonicalize the register here before returning it.
1751   Register = Target.getNormalizedGCCRegisterName(Register);
1752   return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1753 }
1754 
1755 llvm::Value*
1756 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1757                                     LValue InputValue, QualType InputType,
1758                                     std::string &ConstraintStr,
1759                                     SourceLocation Loc) {
1760   llvm::Value *Arg;
1761   if (Info.allowsRegister() || !Info.allowsMemory()) {
1762     if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1763       Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1764     } else {
1765       llvm::Type *Ty = ConvertType(InputType);
1766       uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1767       if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1768         Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1769         Ty = llvm::PointerType::getUnqual(Ty);
1770 
1771         Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1772                                                        Ty));
1773       } else {
1774         Arg = InputValue.getPointer();
1775         ConstraintStr += '*';
1776       }
1777     }
1778   } else {
1779     Arg = InputValue.getPointer();
1780     ConstraintStr += '*';
1781   }
1782 
1783   return Arg;
1784 }
1785 
1786 llvm::Value* CodeGenFunction::EmitAsmInput(
1787                                          const TargetInfo::ConstraintInfo &Info,
1788                                            const Expr *InputExpr,
1789                                            std::string &ConstraintStr) {
1790   // If this can't be a register or memory, i.e., has to be a constant
1791   // (immediate or symbolic), try to emit it as such.
1792   if (!Info.allowsRegister() && !Info.allowsMemory()) {
1793     llvm::APSInt Result;
1794     if (InputExpr->EvaluateAsInt(Result, getContext()))
1795       return llvm::ConstantInt::get(getLLVMContext(), Result);
1796     assert(!Info.requiresImmediateConstant() &&
1797            "Required-immediate inlineasm arg isn't constant?");
1798   }
1799 
1800   if (Info.allowsRegister() || !Info.allowsMemory())
1801     if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1802       return EmitScalarExpr(InputExpr);
1803   if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1804     return EmitScalarExpr(InputExpr);
1805   InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1806   LValue Dest = EmitLValue(InputExpr);
1807   return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1808                             InputExpr->getExprLoc());
1809 }
1810 
1811 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1812 /// asm call instruction.  The !srcloc MDNode contains a list of constant
1813 /// integers which are the source locations of the start of each line in the
1814 /// asm.
1815 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1816                                       CodeGenFunction &CGF) {
1817   SmallVector<llvm::Metadata *, 8> Locs;
1818   // Add the location of the first line to the MDNode.
1819   Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1820       CGF.Int32Ty, Str->getLocStart().getRawEncoding())));
1821   StringRef StrVal = Str->getString();
1822   if (!StrVal.empty()) {
1823     const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1824     const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1825     unsigned StartToken = 0;
1826     unsigned ByteOffset = 0;
1827 
1828     // Add the location of the start of each subsequent line of the asm to the
1829     // MDNode.
1830     for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
1831       if (StrVal[i] != '\n') continue;
1832       SourceLocation LineLoc = Str->getLocationOfByte(
1833           i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1834       Locs.push_back(llvm::ConstantAsMetadata::get(
1835           llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1836     }
1837   }
1838 
1839   return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1840 }
1841 
1842 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
1843   // Assemble the final asm string.
1844   std::string AsmString = S.generateAsmString(getContext());
1845 
1846   // Get all the output and input constraints together.
1847   SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1848   SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1849 
1850   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1851     StringRef Name;
1852     if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1853       Name = GAS->getOutputName(i);
1854     TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
1855     bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
1856     assert(IsValid && "Failed to parse output constraint");
1857     OutputConstraintInfos.push_back(Info);
1858   }
1859 
1860   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1861     StringRef Name;
1862     if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
1863       Name = GAS->getInputName(i);
1864     TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
1865     bool IsValid =
1866       getTarget().validateInputConstraint(OutputConstraintInfos, Info);
1867     assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1868     InputConstraintInfos.push_back(Info);
1869   }
1870 
1871   std::string Constraints;
1872 
1873   std::vector<LValue> ResultRegDests;
1874   std::vector<QualType> ResultRegQualTys;
1875   std::vector<llvm::Type *> ResultRegTypes;
1876   std::vector<llvm::Type *> ResultTruncRegTypes;
1877   std::vector<llvm::Type *> ArgTypes;
1878   std::vector<llvm::Value*> Args;
1879 
1880   // Keep track of inout constraints.
1881   std::string InOutConstraints;
1882   std::vector<llvm::Value*> InOutArgs;
1883   std::vector<llvm::Type*> InOutArgTypes;
1884 
1885   // An inline asm can be marked readonly if it meets the following conditions:
1886   //  - it doesn't have any sideeffects
1887   //  - it doesn't clobber memory
1888   //  - it doesn't return a value by-reference
1889   // It can be marked readnone if it doesn't have any input memory constraints
1890   // in addition to meeting the conditions listed above.
1891   bool ReadOnly = true, ReadNone = true;
1892 
1893   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1894     TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
1895 
1896     // Simplify the output constraint.
1897     std::string OutputConstraint(S.getOutputConstraint(i));
1898     OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
1899                                           getTarget());
1900 
1901     const Expr *OutExpr = S.getOutputExpr(i);
1902     OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
1903 
1904     OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
1905                                               getTarget(), CGM, S,
1906                                               Info.earlyClobber());
1907 
1908     LValue Dest = EmitLValue(OutExpr);
1909     if (!Constraints.empty())
1910       Constraints += ',';
1911 
1912     // If this is a register output, then make the inline asm return it
1913     // by-value.  If this is a memory result, return the value by-reference.
1914     if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) {
1915       Constraints += "=" + OutputConstraint;
1916       ResultRegQualTys.push_back(OutExpr->getType());
1917       ResultRegDests.push_back(Dest);
1918       ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
1919       ResultTruncRegTypes.push_back(ResultRegTypes.back());
1920 
1921       // If this output is tied to an input, and if the input is larger, then
1922       // we need to set the actual result type of the inline asm node to be the
1923       // same as the input type.
1924       if (Info.hasMatchingInput()) {
1925         unsigned InputNo;
1926         for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1927           TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1928           if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1929             break;
1930         }
1931         assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1932 
1933         QualType InputTy = S.getInputExpr(InputNo)->getType();
1934         QualType OutputType = OutExpr->getType();
1935 
1936         uint64_t InputSize = getContext().getTypeSize(InputTy);
1937         if (getContext().getTypeSize(OutputType) < InputSize) {
1938           // Form the asm to return the value as a larger integer or fp type.
1939           ResultRegTypes.back() = ConvertType(InputTy);
1940         }
1941       }
1942       if (llvm::Type* AdjTy =
1943             getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1944                                                  ResultRegTypes.back()))
1945         ResultRegTypes.back() = AdjTy;
1946       else {
1947         CGM.getDiags().Report(S.getAsmLoc(),
1948                               diag::err_asm_invalid_type_in_input)
1949             << OutExpr->getType() << OutputConstraint;
1950       }
1951     } else {
1952       ArgTypes.push_back(Dest.getAddress().getType());
1953       Args.push_back(Dest.getPointer());
1954       Constraints += "=*";
1955       Constraints += OutputConstraint;
1956       ReadOnly = ReadNone = false;
1957     }
1958 
1959     if (Info.isReadWrite()) {
1960       InOutConstraints += ',';
1961 
1962       const Expr *InputExpr = S.getOutputExpr(i);
1963       llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
1964                                             InOutConstraints,
1965                                             InputExpr->getExprLoc());
1966 
1967       if (llvm::Type* AdjTy =
1968           getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1969                                                Arg->getType()))
1970         Arg = Builder.CreateBitCast(Arg, AdjTy);
1971 
1972       if (Info.allowsRegister())
1973         InOutConstraints += llvm::utostr(i);
1974       else
1975         InOutConstraints += OutputConstraint;
1976 
1977       InOutArgTypes.push_back(Arg->getType());
1978       InOutArgs.push_back(Arg);
1979     }
1980   }
1981 
1982   // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
1983   // to the return value slot. Only do this when returning in registers.
1984   if (isa<MSAsmStmt>(&S)) {
1985     const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
1986     if (RetAI.isDirect() || RetAI.isExtend()) {
1987       // Make a fake lvalue for the return value slot.
1988       LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
1989       CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
1990           *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
1991           ResultRegDests, AsmString, S.getNumOutputs());
1992       SawAsmBlock = true;
1993     }
1994   }
1995 
1996   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1997     const Expr *InputExpr = S.getInputExpr(i);
1998 
1999     TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2000 
2001     if (Info.allowsMemory())
2002       ReadNone = false;
2003 
2004     if (!Constraints.empty())
2005       Constraints += ',';
2006 
2007     // Simplify the input constraint.
2008     std::string InputConstraint(S.getInputConstraint(i));
2009     InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2010                                          &OutputConstraintInfos);
2011 
2012     InputConstraint = AddVariableConstraints(
2013         InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2014         getTarget(), CGM, S, false /* No EarlyClobber */);
2015 
2016     llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2017 
2018     // If this input argument is tied to a larger output result, extend the
2019     // input to be the same size as the output.  The LLVM backend wants to see
2020     // the input and output of a matching constraint be the same size.  Note
2021     // that GCC does not define what the top bits are here.  We use zext because
2022     // that is usually cheaper, but LLVM IR should really get an anyext someday.
2023     if (Info.hasTiedOperand()) {
2024       unsigned Output = Info.getTiedOperand();
2025       QualType OutputType = S.getOutputExpr(Output)->getType();
2026       QualType InputTy = InputExpr->getType();
2027 
2028       if (getContext().getTypeSize(OutputType) >
2029           getContext().getTypeSize(InputTy)) {
2030         // Use ptrtoint as appropriate so that we can do our extension.
2031         if (isa<llvm::PointerType>(Arg->getType()))
2032           Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2033         llvm::Type *OutputTy = ConvertType(OutputType);
2034         if (isa<llvm::IntegerType>(OutputTy))
2035           Arg = Builder.CreateZExt(Arg, OutputTy);
2036         else if (isa<llvm::PointerType>(OutputTy))
2037           Arg = Builder.CreateZExt(Arg, IntPtrTy);
2038         else {
2039           assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2040           Arg = Builder.CreateFPExt(Arg, OutputTy);
2041         }
2042       }
2043     }
2044     if (llvm::Type* AdjTy =
2045               getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
2046                                                    Arg->getType()))
2047       Arg = Builder.CreateBitCast(Arg, AdjTy);
2048     else
2049       CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2050           << InputExpr->getType() << InputConstraint;
2051 
2052     ArgTypes.push_back(Arg->getType());
2053     Args.push_back(Arg);
2054     Constraints += InputConstraint;
2055   }
2056 
2057   // Append the "input" part of inout constraints last.
2058   for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2059     ArgTypes.push_back(InOutArgTypes[i]);
2060     Args.push_back(InOutArgs[i]);
2061   }
2062   Constraints += InOutConstraints;
2063 
2064   // Clobbers
2065   for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2066     StringRef Clobber = S.getClobber(i);
2067 
2068     if (Clobber == "memory")
2069       ReadOnly = ReadNone = false;
2070     else if (Clobber != "cc")
2071       Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2072 
2073     if (!Constraints.empty())
2074       Constraints += ',';
2075 
2076     Constraints += "~{";
2077     Constraints += Clobber;
2078     Constraints += '}';
2079   }
2080 
2081   // Add machine specific clobbers
2082   std::string MachineClobbers = getTarget().getClobbers();
2083   if (!MachineClobbers.empty()) {
2084     if (!Constraints.empty())
2085       Constraints += ',';
2086     Constraints += MachineClobbers;
2087   }
2088 
2089   llvm::Type *ResultType;
2090   if (ResultRegTypes.empty())
2091     ResultType = VoidTy;
2092   else if (ResultRegTypes.size() == 1)
2093     ResultType = ResultRegTypes[0];
2094   else
2095     ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2096 
2097   llvm::FunctionType *FTy =
2098     llvm::FunctionType::get(ResultType, ArgTypes, false);
2099 
2100   bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2101   llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2102     llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2103   llvm::InlineAsm *IA =
2104     llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2105                          /* IsAlignStack */ false, AsmDialect);
2106   llvm::CallInst *Result = Builder.CreateCall(IA, Args);
2107   Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2108                        llvm::Attribute::NoUnwind);
2109 
2110   // Attach readnone and readonly attributes.
2111   if (!HasSideEffect) {
2112     if (ReadNone)
2113       Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2114                            llvm::Attribute::ReadNone);
2115     else if (ReadOnly)
2116       Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2117                            llvm::Attribute::ReadOnly);
2118   }
2119 
2120   // Slap the source location of the inline asm into a !srcloc metadata on the
2121   // call.
2122   if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) {
2123     Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(),
2124                                                    *this));
2125   } else {
2126     // At least put the line number on MS inline asm blobs.
2127     auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding());
2128     Result->setMetadata("srcloc",
2129                         llvm::MDNode::get(getLLVMContext(),
2130                                           llvm::ConstantAsMetadata::get(Loc)));
2131   }
2132 
2133   if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
2134     // Conservatively, mark all inline asm blocks in CUDA as convergent
2135     // (meaning, they may call an intrinsically convergent op, such as bar.sync,
2136     // and so can't have certain optimizations applied around them).
2137     Result->addAttribute(llvm::AttributeSet::FunctionIndex,
2138                          llvm::Attribute::Convergent);
2139   }
2140 
2141   // Extract all of the register value results from the asm.
2142   std::vector<llvm::Value*> RegResults;
2143   if (ResultRegTypes.size() == 1) {
2144     RegResults.push_back(Result);
2145   } else {
2146     for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2147       llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
2148       RegResults.push_back(Tmp);
2149     }
2150   }
2151 
2152   assert(RegResults.size() == ResultRegTypes.size());
2153   assert(RegResults.size() == ResultTruncRegTypes.size());
2154   assert(RegResults.size() == ResultRegDests.size());
2155   for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2156     llvm::Value *Tmp = RegResults[i];
2157 
2158     // If the result type of the LLVM IR asm doesn't match the result type of
2159     // the expression, do the conversion.
2160     if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2161       llvm::Type *TruncTy = ResultTruncRegTypes[i];
2162 
2163       // Truncate the integer result to the right size, note that TruncTy can be
2164       // a pointer.
2165       if (TruncTy->isFloatingPointTy())
2166         Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2167       else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2168         uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2169         Tmp = Builder.CreateTrunc(Tmp,
2170                    llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2171         Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2172       } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2173         uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2174         Tmp = Builder.CreatePtrToInt(Tmp,
2175                    llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2176         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2177       } else if (TruncTy->isIntegerTy()) {
2178         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2179       } else if (TruncTy->isVectorTy()) {
2180         Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2181       }
2182     }
2183 
2184     EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
2185   }
2186 }
2187 
2188 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2189   const RecordDecl *RD = S.getCapturedRecordDecl();
2190   QualType RecordTy = getContext().getRecordType(RD);
2191 
2192   // Initialize the captured struct.
2193   LValue SlotLV =
2194     MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2195 
2196   RecordDecl::field_iterator CurField = RD->field_begin();
2197   for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2198                                                  E = S.capture_init_end();
2199        I != E; ++I, ++CurField) {
2200     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2201     if (CurField->hasCapturedVLAType()) {
2202       auto VAT = CurField->getCapturedVLAType();
2203       EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2204     } else {
2205       EmitInitializerForField(*CurField, LV, *I, None);
2206     }
2207   }
2208 
2209   return SlotLV;
2210 }
2211 
2212 /// Generate an outlined function for the body of a CapturedStmt, store any
2213 /// captured variables into the captured struct, and call the outlined function.
2214 llvm::Function *
2215 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2216   LValue CapStruct = InitCapturedStruct(S);
2217 
2218   // Emit the CapturedDecl
2219   CodeGenFunction CGF(CGM, true);
2220   CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2221   llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2222   delete CGF.CapturedStmtInfo;
2223 
2224   // Emit call to the helper function.
2225   EmitCallOrInvoke(F, CapStruct.getPointer());
2226 
2227   return F;
2228 }
2229 
2230 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2231   LValue CapStruct = InitCapturedStruct(S);
2232   return CapStruct.getAddress();
2233 }
2234 
2235 /// Creates the outlined function for a CapturedStmt.
2236 llvm::Function *
2237 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2238   assert(CapturedStmtInfo &&
2239     "CapturedStmtInfo should be set when generating the captured function");
2240   const CapturedDecl *CD = S.getCapturedDecl();
2241   const RecordDecl *RD = S.getCapturedRecordDecl();
2242   SourceLocation Loc = S.getLocStart();
2243   assert(CD->hasBody() && "missing CapturedDecl body");
2244 
2245   // Build the argument list.
2246   ASTContext &Ctx = CGM.getContext();
2247   FunctionArgList Args;
2248   Args.append(CD->param_begin(), CD->param_end());
2249 
2250   // Create the function declaration.
2251   FunctionType::ExtInfo ExtInfo;
2252   const CGFunctionInfo &FuncInfo =
2253     CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2254   llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2255 
2256   llvm::Function *F =
2257     llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2258                            CapturedStmtInfo->getHelperName(), &CGM.getModule());
2259   CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2260   if (CD->isNothrow())
2261     F->addFnAttr(llvm::Attribute::NoUnwind);
2262 
2263   // Generate the function.
2264   StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args,
2265                 CD->getLocation(),
2266                 CD->getBody()->getLocStart());
2267   // Set the context parameter in CapturedStmtInfo.
2268   Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2269   CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2270 
2271   // Initialize variable-length arrays.
2272   LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2273                                            Ctx.getTagDeclType(RD));
2274   for (auto *FD : RD->fields()) {
2275     if (FD->hasCapturedVLAType()) {
2276       auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD),
2277                                        S.getLocStart()).getScalarVal();
2278       auto VAT = FD->getCapturedVLAType();
2279       VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2280     }
2281   }
2282 
2283   // If 'this' is captured, load it into CXXThisValue.
2284   if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2285     FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2286     LValue ThisLValue = EmitLValueForField(Base, FD);
2287     CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2288   }
2289 
2290   PGO.assignRegionCounters(GlobalDecl(CD), F);
2291   CapturedStmtInfo->EmitBody(*this, CD->getBody());
2292   FinishFunction(CD->getBodyRBrace());
2293 
2294   return F;
2295 }
2296