1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Stmt nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGDebugInfo.h"
15 #include "CodeGenModule.h"
16 #include "CodeGenFunction.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/StmtVisitor.h"
19 #include "clang/Basic/PrettyStackTrace.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/InlineAsm.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/Target/TargetData.h"
25 using namespace clang;
26 using namespace CodeGen;
27 
28 //===----------------------------------------------------------------------===//
29 //                              Statement Emission
30 //===----------------------------------------------------------------------===//
31 
32 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
33   if (CGDebugInfo *DI = getDebugInfo()) {
34     SourceLocation Loc;
35     if (isa<DeclStmt>(S))
36       Loc = S->getLocEnd();
37     else
38       Loc = S->getLocStart();
39     DI->EmitLocation(Builder, Loc);
40   }
41 }
42 
43 void CodeGenFunction::EmitStmt(const Stmt *S) {
44   assert(S && "Null statement?");
45 
46   // These statements have their own debug info handling.
47   if (EmitSimpleStmt(S))
48     return;
49 
50   // Check if we are generating unreachable code.
51   if (!HaveInsertPoint()) {
52     // If so, and the statement doesn't contain a label, then we do not need to
53     // generate actual code. This is safe because (1) the current point is
54     // unreachable, so we don't need to execute the code, and (2) we've already
55     // handled the statements which update internal data structures (like the
56     // local variable map) which could be used by subsequent statements.
57     if (!ContainsLabel(S)) {
58       // Verify that any decl statements were handled as simple, they may be in
59       // scope of subsequent reachable statements.
60       assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
61       return;
62     }
63 
64     // Otherwise, make a new block to hold the code.
65     EnsureInsertPoint();
66   }
67 
68   // Generate a stoppoint if we are emitting debug info.
69   EmitStopPoint(S);
70 
71   switch (S->getStmtClass()) {
72   case Stmt::NoStmtClass:
73   case Stmt::CXXCatchStmtClass:
74   case Stmt::SEHExceptStmtClass:
75   case Stmt::SEHFinallyStmtClass:
76   case Stmt::MSDependentExistsStmtClass:
77     llvm_unreachable("invalid statement class to emit generically");
78   case Stmt::NullStmtClass:
79   case Stmt::CompoundStmtClass:
80   case Stmt::DeclStmtClass:
81   case Stmt::LabelStmtClass:
82   case Stmt::GotoStmtClass:
83   case Stmt::BreakStmtClass:
84   case Stmt::ContinueStmtClass:
85   case Stmt::DefaultStmtClass:
86   case Stmt::CaseStmtClass:
87     llvm_unreachable("should have emitted these statements as simple");
88 
89 #define STMT(Type, Base)
90 #define ABSTRACT_STMT(Op)
91 #define EXPR(Type, Base) \
92   case Stmt::Type##Class:
93 #include "clang/AST/StmtNodes.inc"
94   {
95     // Remember the block we came in on.
96     llvm::BasicBlock *incoming = Builder.GetInsertBlock();
97     assert(incoming && "expression emission must have an insertion point");
98 
99     EmitIgnoredExpr(cast<Expr>(S));
100 
101     llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
102     assert(outgoing && "expression emission cleared block!");
103 
104     // The expression emitters assume (reasonably!) that the insertion
105     // point is always set.  To maintain that, the call-emission code
106     // for noreturn functions has to enter a new block with no
107     // predecessors.  We want to kill that block and mark the current
108     // insertion point unreachable in the common case of a call like
109     // "exit();".  Since expression emission doesn't otherwise create
110     // blocks with no predecessors, we can just test for that.
111     // However, we must be careful not to do this to our incoming
112     // block, because *statement* emission does sometimes create
113     // reachable blocks which will have no predecessors until later in
114     // the function.  This occurs with, e.g., labels that are not
115     // reachable by fallthrough.
116     if (incoming != outgoing && outgoing->use_empty()) {
117       outgoing->eraseFromParent();
118       Builder.ClearInsertionPoint();
119     }
120     break;
121   }
122 
123   case Stmt::IndirectGotoStmtClass:
124     EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
125 
126   case Stmt::IfStmtClass:       EmitIfStmt(cast<IfStmt>(*S));             break;
127   case Stmt::WhileStmtClass:    EmitWhileStmt(cast<WhileStmt>(*S));       break;
128   case Stmt::DoStmtClass:       EmitDoStmt(cast<DoStmt>(*S));             break;
129   case Stmt::ForStmtClass:      EmitForStmt(cast<ForStmt>(*S));           break;
130 
131   case Stmt::ReturnStmtClass:   EmitReturnStmt(cast<ReturnStmt>(*S));     break;
132 
133   case Stmt::SwitchStmtClass:   EmitSwitchStmt(cast<SwitchStmt>(*S));     break;
134   case Stmt::AsmStmtClass:      EmitAsmStmt(cast<AsmStmt>(*S));           break;
135 
136   case Stmt::ObjCAtTryStmtClass:
137     EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
138     break;
139   case Stmt::ObjCAtCatchStmtClass:
140     llvm_unreachable(
141                     "@catch statements should be handled by EmitObjCAtTryStmt");
142   case Stmt::ObjCAtFinallyStmtClass:
143     llvm_unreachable(
144                   "@finally statements should be handled by EmitObjCAtTryStmt");
145   case Stmt::ObjCAtThrowStmtClass:
146     EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
147     break;
148   case Stmt::ObjCAtSynchronizedStmtClass:
149     EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
150     break;
151   case Stmt::ObjCForCollectionStmtClass:
152     EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
153     break;
154   case Stmt::ObjCAutoreleasePoolStmtClass:
155     EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
156     break;
157 
158   case Stmt::CXXTryStmtClass:
159     EmitCXXTryStmt(cast<CXXTryStmt>(*S));
160     break;
161   case Stmt::CXXForRangeStmtClass:
162     EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
163   case Stmt::SEHTryStmtClass:
164     // FIXME Not yet implemented
165     break;
166   }
167 }
168 
169 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
170   switch (S->getStmtClass()) {
171   default: return false;
172   case Stmt::NullStmtClass: break;
173   case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
174   case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
175   case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
176   case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
177   case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
178   case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
179   case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
180   case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
181   }
182 
183   return true;
184 }
185 
186 /// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
187 /// this captures the expression result of the last sub-statement and returns it
188 /// (for use by the statement expression extension).
189 RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
190                                          AggValueSlot AggSlot) {
191   PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
192                              "LLVM IR generation of compound statement ('{}')");
193 
194   // Keep track of the current cleanup stack depth, including debug scopes.
195   LexicalScope Scope(*this, S.getSourceRange());
196 
197   for (CompoundStmt::const_body_iterator I = S.body_begin(),
198        E = S.body_end()-GetLast; I != E; ++I)
199     EmitStmt(*I);
200 
201   RValue RV;
202   if (!GetLast)
203     RV = RValue::get(0);
204   else {
205     // We have to special case labels here.  They are statements, but when put
206     // at the end of a statement expression, they yield the value of their
207     // subexpression.  Handle this by walking through all labels we encounter,
208     // emitting them before we evaluate the subexpr.
209     const Stmt *LastStmt = S.body_back();
210     while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
211       EmitLabel(LS->getDecl());
212       LastStmt = LS->getSubStmt();
213     }
214 
215     EnsureInsertPoint();
216 
217     RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
218   }
219 
220   return RV;
221 }
222 
223 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
224   llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
225 
226   // If there is a cleanup stack, then we it isn't worth trying to
227   // simplify this block (we would need to remove it from the scope map
228   // and cleanup entry).
229   if (!EHStack.empty())
230     return;
231 
232   // Can only simplify direct branches.
233   if (!BI || !BI->isUnconditional())
234     return;
235 
236   BB->replaceAllUsesWith(BI->getSuccessor(0));
237   BI->eraseFromParent();
238   BB->eraseFromParent();
239 }
240 
241 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
242   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
243 
244   // Fall out of the current block (if necessary).
245   EmitBranch(BB);
246 
247   if (IsFinished && BB->use_empty()) {
248     delete BB;
249     return;
250   }
251 
252   // Place the block after the current block, if possible, or else at
253   // the end of the function.
254   if (CurBB && CurBB->getParent())
255     CurFn->getBasicBlockList().insertAfter(CurBB, BB);
256   else
257     CurFn->getBasicBlockList().push_back(BB);
258   Builder.SetInsertPoint(BB);
259 }
260 
261 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
262   // Emit a branch from the current block to the target one if this
263   // was a real block.  If this was just a fall-through block after a
264   // terminator, don't emit it.
265   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
266 
267   if (!CurBB || CurBB->getTerminator()) {
268     // If there is no insert point or the previous block is already
269     // terminated, don't touch it.
270   } else {
271     // Otherwise, create a fall-through branch.
272     Builder.CreateBr(Target);
273   }
274 
275   Builder.ClearInsertionPoint();
276 }
277 
278 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
279   bool inserted = false;
280   for (llvm::BasicBlock::use_iterator
281          i = block->use_begin(), e = block->use_end(); i != e; ++i) {
282     if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
283       CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
284       inserted = true;
285       break;
286     }
287   }
288 
289   if (!inserted)
290     CurFn->getBasicBlockList().push_back(block);
291 
292   Builder.SetInsertPoint(block);
293 }
294 
295 CodeGenFunction::JumpDest
296 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
297   JumpDest &Dest = LabelMap[D];
298   if (Dest.isValid()) return Dest;
299 
300   // Create, but don't insert, the new block.
301   Dest = JumpDest(createBasicBlock(D->getName()),
302                   EHScopeStack::stable_iterator::invalid(),
303                   NextCleanupDestIndex++);
304   return Dest;
305 }
306 
307 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
308   JumpDest &Dest = LabelMap[D];
309 
310   // If we didn't need a forward reference to this label, just go
311   // ahead and create a destination at the current scope.
312   if (!Dest.isValid()) {
313     Dest = getJumpDestInCurrentScope(D->getName());
314 
315   // Otherwise, we need to give this label a target depth and remove
316   // it from the branch-fixups list.
317   } else {
318     assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
319     Dest = JumpDest(Dest.getBlock(),
320                     EHStack.stable_begin(),
321                     Dest.getDestIndex());
322 
323     ResolveBranchFixups(Dest.getBlock());
324   }
325 
326   EmitBlock(Dest.getBlock());
327 }
328 
329 
330 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
331   EmitLabel(S.getDecl());
332   EmitStmt(S.getSubStmt());
333 }
334 
335 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
336   // If this code is reachable then emit a stop point (if generating
337   // debug info). We have to do this ourselves because we are on the
338   // "simple" statement path.
339   if (HaveInsertPoint())
340     EmitStopPoint(&S);
341 
342   EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
343 }
344 
345 
346 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
347   if (const LabelDecl *Target = S.getConstantTarget()) {
348     EmitBranchThroughCleanup(getJumpDestForLabel(Target));
349     return;
350   }
351 
352   // Ensure that we have an i8* for our PHI node.
353   llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
354                                          Int8PtrTy, "addr");
355   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
356 
357 
358   // Get the basic block for the indirect goto.
359   llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
360 
361   // The first instruction in the block has to be the PHI for the switch dest,
362   // add an entry for this branch.
363   cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
364 
365   EmitBranch(IndGotoBB);
366 }
367 
368 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
369   // C99 6.8.4.1: The first substatement is executed if the expression compares
370   // unequal to 0.  The condition must be a scalar type.
371   RunCleanupsScope ConditionScope(*this);
372 
373   if (S.getConditionVariable())
374     EmitAutoVarDecl(*S.getConditionVariable());
375 
376   // If the condition constant folds and can be elided, try to avoid emitting
377   // the condition and the dead arm of the if/else.
378   bool CondConstant;
379   if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
380     // Figure out which block (then or else) is executed.
381     const Stmt *Executed = S.getThen();
382     const Stmt *Skipped  = S.getElse();
383     if (!CondConstant)  // Condition false?
384       std::swap(Executed, Skipped);
385 
386     // If the skipped block has no labels in it, just emit the executed block.
387     // This avoids emitting dead code and simplifies the CFG substantially.
388     if (!ContainsLabel(Skipped)) {
389       if (Executed) {
390         RunCleanupsScope ExecutedScope(*this);
391         EmitStmt(Executed);
392       }
393       return;
394     }
395   }
396 
397   // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
398   // the conditional branch.
399   llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
400   llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
401   llvm::BasicBlock *ElseBlock = ContBlock;
402   if (S.getElse())
403     ElseBlock = createBasicBlock("if.else");
404   EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
405 
406   // Emit the 'then' code.
407   EmitBlock(ThenBlock);
408   {
409     RunCleanupsScope ThenScope(*this);
410     EmitStmt(S.getThen());
411   }
412   EmitBranch(ContBlock);
413 
414   // Emit the 'else' code if present.
415   if (const Stmt *Else = S.getElse()) {
416     // There is no need to emit line number for unconditional branch.
417     if (getDebugInfo())
418       Builder.SetCurrentDebugLocation(llvm::DebugLoc());
419     EmitBlock(ElseBlock);
420     {
421       RunCleanupsScope ElseScope(*this);
422       EmitStmt(Else);
423     }
424     // There is no need to emit line number for unconditional branch.
425     if (getDebugInfo())
426       Builder.SetCurrentDebugLocation(llvm::DebugLoc());
427     EmitBranch(ContBlock);
428   }
429 
430   // Emit the continuation block for code after the if.
431   EmitBlock(ContBlock, true);
432 }
433 
434 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
435   // Emit the header for the loop, which will also become
436   // the continue target.
437   JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
438   EmitBlock(LoopHeader.getBlock());
439 
440   // Create an exit block for when the condition fails, which will
441   // also become the break target.
442   JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
443 
444   // Store the blocks to use for break and continue.
445   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
446 
447   // C++ [stmt.while]p2:
448   //   When the condition of a while statement is a declaration, the
449   //   scope of the variable that is declared extends from its point
450   //   of declaration (3.3.2) to the end of the while statement.
451   //   [...]
452   //   The object created in a condition is destroyed and created
453   //   with each iteration of the loop.
454   RunCleanupsScope ConditionScope(*this);
455 
456   if (S.getConditionVariable())
457     EmitAutoVarDecl(*S.getConditionVariable());
458 
459   // Evaluate the conditional in the while header.  C99 6.8.5.1: The
460   // evaluation of the controlling expression takes place before each
461   // execution of the loop body.
462   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
463 
464   // while(1) is common, avoid extra exit blocks.  Be sure
465   // to correctly handle break/continue though.
466   bool EmitBoolCondBranch = true;
467   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
468     if (C->isOne())
469       EmitBoolCondBranch = false;
470 
471   // As long as the condition is true, go to the loop body.
472   llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
473   if (EmitBoolCondBranch) {
474     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
475     if (ConditionScope.requiresCleanups())
476       ExitBlock = createBasicBlock("while.exit");
477 
478     Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
479 
480     if (ExitBlock != LoopExit.getBlock()) {
481       EmitBlock(ExitBlock);
482       EmitBranchThroughCleanup(LoopExit);
483     }
484   }
485 
486   // Emit the loop body.  We have to emit this in a cleanup scope
487   // because it might be a singleton DeclStmt.
488   {
489     RunCleanupsScope BodyScope(*this);
490     EmitBlock(LoopBody);
491     EmitStmt(S.getBody());
492   }
493 
494   BreakContinueStack.pop_back();
495 
496   // Immediately force cleanup.
497   ConditionScope.ForceCleanup();
498 
499   // Branch to the loop header again.
500   EmitBranch(LoopHeader.getBlock());
501 
502   // Emit the exit block.
503   EmitBlock(LoopExit.getBlock(), true);
504 
505   // The LoopHeader typically is just a branch if we skipped emitting
506   // a branch, try to erase it.
507   if (!EmitBoolCondBranch)
508     SimplifyForwardingBlocks(LoopHeader.getBlock());
509 }
510 
511 void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
512   JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
513   JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
514 
515   // Store the blocks to use for break and continue.
516   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
517 
518   // Emit the body of the loop.
519   llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
520   EmitBlock(LoopBody);
521   {
522     RunCleanupsScope BodyScope(*this);
523     EmitStmt(S.getBody());
524   }
525 
526   BreakContinueStack.pop_back();
527 
528   EmitBlock(LoopCond.getBlock());
529 
530   // C99 6.8.5.2: "The evaluation of the controlling expression takes place
531   // after each execution of the loop body."
532 
533   // Evaluate the conditional in the while header.
534   // C99 6.8.5p2/p4: The first substatement is executed if the expression
535   // compares unequal to 0.  The condition must be a scalar type.
536   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
537 
538   // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
539   // to correctly handle break/continue though.
540   bool EmitBoolCondBranch = true;
541   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
542     if (C->isZero())
543       EmitBoolCondBranch = false;
544 
545   // As long as the condition is true, iterate the loop.
546   if (EmitBoolCondBranch)
547     Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
548 
549   // Emit the exit block.
550   EmitBlock(LoopExit.getBlock());
551 
552   // The DoCond block typically is just a branch if we skipped
553   // emitting a branch, try to erase it.
554   if (!EmitBoolCondBranch)
555     SimplifyForwardingBlocks(LoopCond.getBlock());
556 }
557 
558 void CodeGenFunction::EmitForStmt(const ForStmt &S) {
559   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
560 
561   RunCleanupsScope ForScope(*this);
562 
563   CGDebugInfo *DI = getDebugInfo();
564   if (DI)
565     DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
566 
567   // Evaluate the first part before the loop.
568   if (S.getInit())
569     EmitStmt(S.getInit());
570 
571   // Start the loop with a block that tests the condition.
572   // If there's an increment, the continue scope will be overwritten
573   // later.
574   JumpDest Continue = getJumpDestInCurrentScope("for.cond");
575   llvm::BasicBlock *CondBlock = Continue.getBlock();
576   EmitBlock(CondBlock);
577 
578   // Create a cleanup scope for the condition variable cleanups.
579   RunCleanupsScope ConditionScope(*this);
580 
581   llvm::Value *BoolCondVal = 0;
582   if (S.getCond()) {
583     // If the for statement has a condition scope, emit the local variable
584     // declaration.
585     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
586     if (S.getConditionVariable()) {
587       EmitAutoVarDecl(*S.getConditionVariable());
588     }
589 
590     // If there are any cleanups between here and the loop-exit scope,
591     // create a block to stage a loop exit along.
592     if (ForScope.requiresCleanups())
593       ExitBlock = createBasicBlock("for.cond.cleanup");
594 
595     // As long as the condition is true, iterate the loop.
596     llvm::BasicBlock *ForBody = createBasicBlock("for.body");
597 
598     // C99 6.8.5p2/p4: The first substatement is executed if the expression
599     // compares unequal to 0.  The condition must be a scalar type.
600     BoolCondVal = EvaluateExprAsBool(S.getCond());
601     Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
602 
603     if (ExitBlock != LoopExit.getBlock()) {
604       EmitBlock(ExitBlock);
605       EmitBranchThroughCleanup(LoopExit);
606     }
607 
608     EmitBlock(ForBody);
609   } else {
610     // Treat it as a non-zero constant.  Don't even create a new block for the
611     // body, just fall into it.
612   }
613 
614   // If the for loop doesn't have an increment we can just use the
615   // condition as the continue block.  Otherwise we'll need to create
616   // a block for it (in the current scope, i.e. in the scope of the
617   // condition), and that we will become our continue block.
618   if (S.getInc())
619     Continue = getJumpDestInCurrentScope("for.inc");
620 
621   // Store the blocks to use for break and continue.
622   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
623 
624   {
625     // Create a separate cleanup scope for the body, in case it is not
626     // a compound statement.
627     RunCleanupsScope BodyScope(*this);
628     EmitStmt(S.getBody());
629   }
630 
631   // If there is an increment, emit it next.
632   if (S.getInc()) {
633     EmitBlock(Continue.getBlock());
634     EmitStmt(S.getInc());
635   }
636 
637   BreakContinueStack.pop_back();
638 
639   ConditionScope.ForceCleanup();
640   EmitBranch(CondBlock);
641 
642   ForScope.ForceCleanup();
643 
644   if (DI)
645     DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
646 
647   // Emit the fall-through block.
648   EmitBlock(LoopExit.getBlock(), true);
649 }
650 
651 void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
652   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
653 
654   RunCleanupsScope ForScope(*this);
655 
656   CGDebugInfo *DI = getDebugInfo();
657   if (DI)
658     DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
659 
660   // Evaluate the first pieces before the loop.
661   EmitStmt(S.getRangeStmt());
662   EmitStmt(S.getBeginEndStmt());
663 
664   // Start the loop with a block that tests the condition.
665   // If there's an increment, the continue scope will be overwritten
666   // later.
667   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
668   EmitBlock(CondBlock);
669 
670   // If there are any cleanups between here and the loop-exit scope,
671   // create a block to stage a loop exit along.
672   llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
673   if (ForScope.requiresCleanups())
674     ExitBlock = createBasicBlock("for.cond.cleanup");
675 
676   // The loop body, consisting of the specified body and the loop variable.
677   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
678 
679   // The body is executed if the expression, contextually converted
680   // to bool, is true.
681   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
682   Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
683 
684   if (ExitBlock != LoopExit.getBlock()) {
685     EmitBlock(ExitBlock);
686     EmitBranchThroughCleanup(LoopExit);
687   }
688 
689   EmitBlock(ForBody);
690 
691   // Create a block for the increment. In case of a 'continue', we jump there.
692   JumpDest Continue = getJumpDestInCurrentScope("for.inc");
693 
694   // Store the blocks to use for break and continue.
695   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
696 
697   {
698     // Create a separate cleanup scope for the loop variable and body.
699     RunCleanupsScope BodyScope(*this);
700     EmitStmt(S.getLoopVarStmt());
701     EmitStmt(S.getBody());
702   }
703 
704   // If there is an increment, emit it next.
705   EmitBlock(Continue.getBlock());
706   EmitStmt(S.getInc());
707 
708   BreakContinueStack.pop_back();
709 
710   EmitBranch(CondBlock);
711 
712   ForScope.ForceCleanup();
713 
714   if (DI)
715     DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
716 
717   // Emit the fall-through block.
718   EmitBlock(LoopExit.getBlock(), true);
719 }
720 
721 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
722   if (RV.isScalar()) {
723     Builder.CreateStore(RV.getScalarVal(), ReturnValue);
724   } else if (RV.isAggregate()) {
725     EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
726   } else {
727     StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
728   }
729   EmitBranchThroughCleanup(ReturnBlock);
730 }
731 
732 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
733 /// if the function returns void, or may be missing one if the function returns
734 /// non-void.  Fun stuff :).
735 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
736   // Emit the result value, even if unused, to evalute the side effects.
737   const Expr *RV = S.getRetValue();
738 
739   // FIXME: Clean this up by using an LValue for ReturnTemp,
740   // EmitStoreThroughLValue, and EmitAnyExpr.
741   if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
742       !Target.useGlobalsForAutomaticVariables()) {
743     // Apply the named return value optimization for this return statement,
744     // which means doing nothing: the appropriate result has already been
745     // constructed into the NRVO variable.
746 
747     // If there is an NRVO flag for this variable, set it to 1 into indicate
748     // that the cleanup code should not destroy the variable.
749     if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
750       Builder.CreateStore(Builder.getTrue(), NRVOFlag);
751   } else if (!ReturnValue) {
752     // Make sure not to return anything, but evaluate the expression
753     // for side effects.
754     if (RV)
755       EmitAnyExpr(RV);
756   } else if (RV == 0) {
757     // Do nothing (return value is left uninitialized)
758   } else if (FnRetTy->isReferenceType()) {
759     // If this function returns a reference, take the address of the expression
760     // rather than the value.
761     RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
762     Builder.CreateStore(Result.getScalarVal(), ReturnValue);
763   } else if (!hasAggregateLLVMType(RV->getType())) {
764     Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
765   } else if (RV->getType()->isAnyComplexType()) {
766     EmitComplexExprIntoAddr(RV, ReturnValue, false);
767   } else {
768     CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
769     EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(),
770                                           AggValueSlot::IsDestructed,
771                                           AggValueSlot::DoesNotNeedGCBarriers,
772                                           AggValueSlot::IsNotAliased));
773   }
774 
775   EmitBranchThroughCleanup(ReturnBlock);
776 }
777 
778 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
779   // As long as debug info is modeled with instructions, we have to ensure we
780   // have a place to insert here and write the stop point here.
781   if (getDebugInfo() && HaveInsertPoint())
782     EmitStopPoint(&S);
783 
784   for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
785        I != E; ++I)
786     EmitDecl(**I);
787 }
788 
789 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
790   assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
791 
792   // If this code is reachable then emit a stop point (if generating
793   // debug info). We have to do this ourselves because we are on the
794   // "simple" statement path.
795   if (HaveInsertPoint())
796     EmitStopPoint(&S);
797 
798   JumpDest Block = BreakContinueStack.back().BreakBlock;
799   EmitBranchThroughCleanup(Block);
800 }
801 
802 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
803   assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
804 
805   // If this code is reachable then emit a stop point (if generating
806   // debug info). We have to do this ourselves because we are on the
807   // "simple" statement path.
808   if (HaveInsertPoint())
809     EmitStopPoint(&S);
810 
811   JumpDest Block = BreakContinueStack.back().ContinueBlock;
812   EmitBranchThroughCleanup(Block);
813 }
814 
815 /// EmitCaseStmtRange - If case statement range is not too big then
816 /// add multiple cases to switch instruction, one for each value within
817 /// the range. If range is too big then emit "if" condition check.
818 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
819   assert(S.getRHS() && "Expected RHS value in CaseStmt");
820 
821   llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
822   llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
823 
824   // Emit the code for this case. We do this first to make sure it is
825   // properly chained from our predecessor before generating the
826   // switch machinery to enter this block.
827   EmitBlock(createBasicBlock("sw.bb"));
828   llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
829   EmitStmt(S.getSubStmt());
830 
831   // If range is empty, do nothing.
832   if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
833     return;
834 
835   llvm::APInt Range = RHS - LHS;
836   // FIXME: parameters such as this should not be hardcoded.
837   if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
838     // Range is small enough to add multiple switch instruction cases.
839     for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
840       SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
841       LHS++;
842     }
843     return;
844   }
845 
846   // The range is too big. Emit "if" condition into a new block,
847   // making sure to save and restore the current insertion point.
848   llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
849 
850   // Push this test onto the chain of range checks (which terminates
851   // in the default basic block). The switch's default will be changed
852   // to the top of this chain after switch emission is complete.
853   llvm::BasicBlock *FalseDest = CaseRangeBlock;
854   CaseRangeBlock = createBasicBlock("sw.caserange");
855 
856   CurFn->getBasicBlockList().push_back(CaseRangeBlock);
857   Builder.SetInsertPoint(CaseRangeBlock);
858 
859   // Emit range check.
860   llvm::Value *Diff =
861     Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
862   llvm::Value *Cond =
863     Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
864   Builder.CreateCondBr(Cond, CaseDest, FalseDest);
865 
866   // Restore the appropriate insertion point.
867   if (RestoreBB)
868     Builder.SetInsertPoint(RestoreBB);
869   else
870     Builder.ClearInsertionPoint();
871 }
872 
873 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
874   // If there is no enclosing switch instance that we're aware of, then this
875   // case statement and its block can be elided.  This situation only happens
876   // when we've constant-folded the switch, are emitting the constant case,
877   // and part of the constant case includes another case statement.  For
878   // instance: switch (4) { case 4: do { case 5: } while (1); }
879   if (!SwitchInsn) {
880     EmitStmt(S.getSubStmt());
881     return;
882   }
883 
884   // Handle case ranges.
885   if (S.getRHS()) {
886     EmitCaseStmtRange(S);
887     return;
888   }
889 
890   llvm::ConstantInt *CaseVal =
891     Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
892 
893   // If the body of the case is just a 'break', and if there was no fallthrough,
894   // try to not emit an empty block.
895   if (isa<BreakStmt>(S.getSubStmt())) {
896     JumpDest Block = BreakContinueStack.back().BreakBlock;
897 
898     // Only do this optimization if there are no cleanups that need emitting.
899     if (isObviouslyBranchWithoutCleanups(Block)) {
900       SwitchInsn->addCase(CaseVal, Block.getBlock());
901 
902       // If there was a fallthrough into this case, make sure to redirect it to
903       // the end of the switch as well.
904       if (Builder.GetInsertBlock()) {
905         Builder.CreateBr(Block.getBlock());
906         Builder.ClearInsertionPoint();
907       }
908       return;
909     }
910   }
911 
912   EmitBlock(createBasicBlock("sw.bb"));
913   llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
914   SwitchInsn->addCase(CaseVal, CaseDest);
915 
916   // Recursively emitting the statement is acceptable, but is not wonderful for
917   // code where we have many case statements nested together, i.e.:
918   //  case 1:
919   //    case 2:
920   //      case 3: etc.
921   // Handling this recursively will create a new block for each case statement
922   // that falls through to the next case which is IR intensive.  It also causes
923   // deep recursion which can run into stack depth limitations.  Handle
924   // sequential non-range case statements specially.
925   const CaseStmt *CurCase = &S;
926   const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
927 
928   // Otherwise, iteratively add consecutive cases to this switch stmt.
929   while (NextCase && NextCase->getRHS() == 0) {
930     CurCase = NextCase;
931     llvm::ConstantInt *CaseVal =
932       Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
933     SwitchInsn->addCase(CaseVal, CaseDest);
934     NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
935   }
936 
937   // Normal default recursion for non-cases.
938   EmitStmt(CurCase->getSubStmt());
939 }
940 
941 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
942   llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
943   assert(DefaultBlock->empty() &&
944          "EmitDefaultStmt: Default block already defined?");
945   EmitBlock(DefaultBlock);
946   EmitStmt(S.getSubStmt());
947 }
948 
949 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
950 /// constant value that is being switched on, see if we can dead code eliminate
951 /// the body of the switch to a simple series of statements to emit.  Basically,
952 /// on a switch (5) we want to find these statements:
953 ///    case 5:
954 ///      printf(...);    <--
955 ///      ++i;            <--
956 ///      break;
957 ///
958 /// and add them to the ResultStmts vector.  If it is unsafe to do this
959 /// transformation (for example, one of the elided statements contains a label
960 /// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
961 /// should include statements after it (e.g. the printf() line is a substmt of
962 /// the case) then return CSFC_FallThrough.  If we handled it and found a break
963 /// statement, then return CSFC_Success.
964 ///
965 /// If Case is non-null, then we are looking for the specified case, checking
966 /// that nothing we jump over contains labels.  If Case is null, then we found
967 /// the case and are looking for the break.
968 ///
969 /// If the recursive walk actually finds our Case, then we set FoundCase to
970 /// true.
971 ///
972 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
973 static CSFC_Result CollectStatementsForCase(const Stmt *S,
974                                             const SwitchCase *Case,
975                                             bool &FoundCase,
976                               SmallVectorImpl<const Stmt*> &ResultStmts) {
977   // If this is a null statement, just succeed.
978   if (S == 0)
979     return Case ? CSFC_Success : CSFC_FallThrough;
980 
981   // If this is the switchcase (case 4: or default) that we're looking for, then
982   // we're in business.  Just add the substatement.
983   if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
984     if (S == Case) {
985       FoundCase = true;
986       return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
987                                       ResultStmts);
988     }
989 
990     // Otherwise, this is some other case or default statement, just ignore it.
991     return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
992                                     ResultStmts);
993   }
994 
995   // If we are in the live part of the code and we found our break statement,
996   // return a success!
997   if (Case == 0 && isa<BreakStmt>(S))
998     return CSFC_Success;
999 
1000   // If this is a switch statement, then it might contain the SwitchCase, the
1001   // break, or neither.
1002   if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1003     // Handle this as two cases: we might be looking for the SwitchCase (if so
1004     // the skipped statements must be skippable) or we might already have it.
1005     CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1006     if (Case) {
1007       // Keep track of whether we see a skipped declaration.  The code could be
1008       // using the declaration even if it is skipped, so we can't optimize out
1009       // the decl if the kept statements might refer to it.
1010       bool HadSkippedDecl = false;
1011 
1012       // If we're looking for the case, just see if we can skip each of the
1013       // substatements.
1014       for (; Case && I != E; ++I) {
1015         HadSkippedDecl |= isa<DeclStmt>(*I);
1016 
1017         switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1018         case CSFC_Failure: return CSFC_Failure;
1019         case CSFC_Success:
1020           // A successful result means that either 1) that the statement doesn't
1021           // have the case and is skippable, or 2) does contain the case value
1022           // and also contains the break to exit the switch.  In the later case,
1023           // we just verify the rest of the statements are elidable.
1024           if (FoundCase) {
1025             // If we found the case and skipped declarations, we can't do the
1026             // optimization.
1027             if (HadSkippedDecl)
1028               return CSFC_Failure;
1029 
1030             for (++I; I != E; ++I)
1031               if (CodeGenFunction::ContainsLabel(*I, true))
1032                 return CSFC_Failure;
1033             return CSFC_Success;
1034           }
1035           break;
1036         case CSFC_FallThrough:
1037           // If we have a fallthrough condition, then we must have found the
1038           // case started to include statements.  Consider the rest of the
1039           // statements in the compound statement as candidates for inclusion.
1040           assert(FoundCase && "Didn't find case but returned fallthrough?");
1041           // We recursively found Case, so we're not looking for it anymore.
1042           Case = 0;
1043 
1044           // If we found the case and skipped declarations, we can't do the
1045           // optimization.
1046           if (HadSkippedDecl)
1047             return CSFC_Failure;
1048           break;
1049         }
1050       }
1051     }
1052 
1053     // If we have statements in our range, then we know that the statements are
1054     // live and need to be added to the set of statements we're tracking.
1055     for (; I != E; ++I) {
1056       switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) {
1057       case CSFC_Failure: return CSFC_Failure;
1058       case CSFC_FallThrough:
1059         // A fallthrough result means that the statement was simple and just
1060         // included in ResultStmt, keep adding them afterwards.
1061         break;
1062       case CSFC_Success:
1063         // A successful result means that we found the break statement and
1064         // stopped statement inclusion.  We just ensure that any leftover stmts
1065         // are skippable and return success ourselves.
1066         for (++I; I != E; ++I)
1067           if (CodeGenFunction::ContainsLabel(*I, true))
1068             return CSFC_Failure;
1069         return CSFC_Success;
1070       }
1071     }
1072 
1073     return Case ? CSFC_Success : CSFC_FallThrough;
1074   }
1075 
1076   // Okay, this is some other statement that we don't handle explicitly, like a
1077   // for statement or increment etc.  If we are skipping over this statement,
1078   // just verify it doesn't have labels, which would make it invalid to elide.
1079   if (Case) {
1080     if (CodeGenFunction::ContainsLabel(S, true))
1081       return CSFC_Failure;
1082     return CSFC_Success;
1083   }
1084 
1085   // Otherwise, we want to include this statement.  Everything is cool with that
1086   // so long as it doesn't contain a break out of the switch we're in.
1087   if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1088 
1089   // Otherwise, everything is great.  Include the statement and tell the caller
1090   // that we fall through and include the next statement as well.
1091   ResultStmts.push_back(S);
1092   return CSFC_FallThrough;
1093 }
1094 
1095 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1096 /// then invoke CollectStatementsForCase to find the list of statements to emit
1097 /// for a switch on constant.  See the comment above CollectStatementsForCase
1098 /// for more details.
1099 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1100                                        const llvm::APInt &ConstantCondValue,
1101                                 SmallVectorImpl<const Stmt*> &ResultStmts,
1102                                        ASTContext &C) {
1103   // First step, find the switch case that is being branched to.  We can do this
1104   // efficiently by scanning the SwitchCase list.
1105   const SwitchCase *Case = S.getSwitchCaseList();
1106   const DefaultStmt *DefaultCase = 0;
1107 
1108   for (; Case; Case = Case->getNextSwitchCase()) {
1109     // It's either a default or case.  Just remember the default statement in
1110     // case we're not jumping to any numbered cases.
1111     if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1112       DefaultCase = DS;
1113       continue;
1114     }
1115 
1116     // Check to see if this case is the one we're looking for.
1117     const CaseStmt *CS = cast<CaseStmt>(Case);
1118     // Don't handle case ranges yet.
1119     if (CS->getRHS()) return false;
1120 
1121     // If we found our case, remember it as 'case'.
1122     if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1123       break;
1124   }
1125 
1126   // If we didn't find a matching case, we use a default if it exists, or we
1127   // elide the whole switch body!
1128   if (Case == 0) {
1129     // It is safe to elide the body of the switch if it doesn't contain labels
1130     // etc.  If it is safe, return successfully with an empty ResultStmts list.
1131     if (DefaultCase == 0)
1132       return !CodeGenFunction::ContainsLabel(&S);
1133     Case = DefaultCase;
1134   }
1135 
1136   // Ok, we know which case is being jumped to, try to collect all the
1137   // statements that follow it.  This can fail for a variety of reasons.  Also,
1138   // check to see that the recursive walk actually found our case statement.
1139   // Insane cases like this can fail to find it in the recursive walk since we
1140   // don't handle every stmt kind:
1141   // switch (4) {
1142   //   while (1) {
1143   //     case 4: ...
1144   bool FoundCase = false;
1145   return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1146                                   ResultStmts) != CSFC_Failure &&
1147          FoundCase;
1148 }
1149 
1150 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1151   JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1152 
1153   RunCleanupsScope ConditionScope(*this);
1154 
1155   if (S.getConditionVariable())
1156     EmitAutoVarDecl(*S.getConditionVariable());
1157 
1158   // Handle nested switch statements.
1159   llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1160   llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1161 
1162   // See if we can constant fold the condition of the switch and therefore only
1163   // emit the live case statement (if any) of the switch.
1164   llvm::APInt ConstantCondValue;
1165   if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1166     SmallVector<const Stmt*, 4> CaseStmts;
1167     if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1168                                    getContext())) {
1169       RunCleanupsScope ExecutedScope(*this);
1170 
1171       // At this point, we are no longer "within" a switch instance, so
1172       // we can temporarily enforce this to ensure that any embedded case
1173       // statements are not emitted.
1174       SwitchInsn = 0;
1175 
1176       // Okay, we can dead code eliminate everything except this case.  Emit the
1177       // specified series of statements and we're good.
1178       for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1179         EmitStmt(CaseStmts[i]);
1180 
1181       // Now we want to restore the saved switch instance so that nested switches
1182       // continue to function properly
1183       SwitchInsn = SavedSwitchInsn;
1184 
1185       return;
1186     }
1187   }
1188 
1189   llvm::Value *CondV = EmitScalarExpr(S.getCond());
1190 
1191   // Create basic block to hold stuff that comes after switch
1192   // statement. We also need to create a default block now so that
1193   // explicit case ranges tests can have a place to jump to on
1194   // failure.
1195   llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1196   SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1197   CaseRangeBlock = DefaultBlock;
1198 
1199   // Clear the insertion point to indicate we are in unreachable code.
1200   Builder.ClearInsertionPoint();
1201 
1202   // All break statements jump to NextBlock. If BreakContinueStack is non empty
1203   // then reuse last ContinueBlock.
1204   JumpDest OuterContinue;
1205   if (!BreakContinueStack.empty())
1206     OuterContinue = BreakContinueStack.back().ContinueBlock;
1207 
1208   BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1209 
1210   // Emit switch body.
1211   EmitStmt(S.getBody());
1212 
1213   BreakContinueStack.pop_back();
1214 
1215   // Update the default block in case explicit case range tests have
1216   // been chained on top.
1217   SwitchInsn->setDefaultDest(CaseRangeBlock);
1218 
1219   // If a default was never emitted:
1220   if (!DefaultBlock->getParent()) {
1221     // If we have cleanups, emit the default block so that there's a
1222     // place to jump through the cleanups from.
1223     if (ConditionScope.requiresCleanups()) {
1224       EmitBlock(DefaultBlock);
1225 
1226     // Otherwise, just forward the default block to the switch end.
1227     } else {
1228       DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1229       delete DefaultBlock;
1230     }
1231   }
1232 
1233   ConditionScope.ForceCleanup();
1234 
1235   // Emit continuation.
1236   EmitBlock(SwitchExit.getBlock(), true);
1237 
1238   SwitchInsn = SavedSwitchInsn;
1239   CaseRangeBlock = SavedCRBlock;
1240 }
1241 
1242 static std::string
1243 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1244                  SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
1245   std::string Result;
1246 
1247   while (*Constraint) {
1248     switch (*Constraint) {
1249     default:
1250       Result += Target.convertConstraint(Constraint);
1251       break;
1252     // Ignore these
1253     case '*':
1254     case '?':
1255     case '!':
1256     case '=': // Will see this and the following in mult-alt constraints.
1257     case '+':
1258       break;
1259     case ',':
1260       Result += "|";
1261       break;
1262     case 'g':
1263       Result += "imr";
1264       break;
1265     case '[': {
1266       assert(OutCons &&
1267              "Must pass output names to constraints with a symbolic name");
1268       unsigned Index;
1269       bool result = Target.resolveSymbolicName(Constraint,
1270                                                &(*OutCons)[0],
1271                                                OutCons->size(), Index);
1272       assert(result && "Could not resolve symbolic name"); (void)result;
1273       Result += llvm::utostr(Index);
1274       break;
1275     }
1276     }
1277 
1278     Constraint++;
1279   }
1280 
1281   return Result;
1282 }
1283 
1284 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1285 /// as using a particular register add that as a constraint that will be used
1286 /// in this asm stmt.
1287 static std::string
1288 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1289                        const TargetInfo &Target, CodeGenModule &CGM,
1290                        const AsmStmt &Stmt) {
1291   const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1292   if (!AsmDeclRef)
1293     return Constraint;
1294   const ValueDecl &Value = *AsmDeclRef->getDecl();
1295   const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1296   if (!Variable)
1297     return Constraint;
1298   AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1299   if (!Attr)
1300     return Constraint;
1301   StringRef Register = Attr->getLabel();
1302   assert(Target.isValidGCCRegisterName(Register));
1303   // We're using validateOutputConstraint here because we only care if
1304   // this is a register constraint.
1305   TargetInfo::ConstraintInfo Info(Constraint, "");
1306   if (Target.validateOutputConstraint(Info) &&
1307       !Info.allowsRegister()) {
1308     CGM.ErrorUnsupported(&Stmt, "__asm__");
1309     return Constraint;
1310   }
1311   // Canonicalize the register here before returning it.
1312   Register = Target.getNormalizedGCCRegisterName(Register);
1313   return "{" + Register.str() + "}";
1314 }
1315 
1316 llvm::Value*
1317 CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
1318                                     const TargetInfo::ConstraintInfo &Info,
1319                                     LValue InputValue, QualType InputType,
1320                                     std::string &ConstraintStr) {
1321   llvm::Value *Arg;
1322   if (Info.allowsRegister() || !Info.allowsMemory()) {
1323     if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
1324       Arg = EmitLoadOfLValue(InputValue).getScalarVal();
1325     } else {
1326       llvm::Type *Ty = ConvertType(InputType);
1327       uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
1328       if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1329         Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1330         Ty = llvm::PointerType::getUnqual(Ty);
1331 
1332         Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
1333                                                        Ty));
1334       } else {
1335         Arg = InputValue.getAddress();
1336         ConstraintStr += '*';
1337       }
1338     }
1339   } else {
1340     Arg = InputValue.getAddress();
1341     ConstraintStr += '*';
1342   }
1343 
1344   return Arg;
1345 }
1346 
1347 llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
1348                                          const TargetInfo::ConstraintInfo &Info,
1349                                            const Expr *InputExpr,
1350                                            std::string &ConstraintStr) {
1351   if (Info.allowsRegister() || !Info.allowsMemory())
1352     if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType()))
1353       return EmitScalarExpr(InputExpr);
1354 
1355   InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1356   LValue Dest = EmitLValue(InputExpr);
1357   return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
1358 }
1359 
1360 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1361 /// asm call instruction.  The !srcloc MDNode contains a list of constant
1362 /// integers which are the source locations of the start of each line in the
1363 /// asm.
1364 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1365                                       CodeGenFunction &CGF) {
1366   SmallVector<llvm::Value *, 8> Locs;
1367   // Add the location of the first line to the MDNode.
1368   Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
1369                                         Str->getLocStart().getRawEncoding()));
1370   StringRef StrVal = Str->getString();
1371   if (!StrVal.empty()) {
1372     const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1373     const LangOptions &LangOpts = CGF.CGM.getLangOptions();
1374 
1375     // Add the location of the start of each subsequent line of the asm to the
1376     // MDNode.
1377     for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
1378       if (StrVal[i] != '\n') continue;
1379       SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
1380                                                       CGF.Target);
1381       Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
1382                                             LineLoc.getRawEncoding()));
1383     }
1384   }
1385 
1386   return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
1387 }
1388 
1389 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
1390   // Analyze the asm string to decompose it into its pieces.  We know that Sema
1391   // has already done this, so it is guaranteed to be successful.
1392   SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
1393   unsigned DiagOffs;
1394   S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
1395 
1396   // Assemble the pieces into the final asm string.
1397   std::string AsmString;
1398   for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
1399     if (Pieces[i].isString())
1400       AsmString += Pieces[i].getString();
1401     else if (Pieces[i].getModifier() == '\0')
1402       AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
1403     else
1404       AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
1405                    Pieces[i].getModifier() + '}';
1406   }
1407 
1408   // Get all the output and input constraints together.
1409   SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
1410   SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
1411 
1412   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1413     TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
1414                                     S.getOutputName(i));
1415     bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
1416     assert(IsValid && "Failed to parse output constraint");
1417     OutputConstraintInfos.push_back(Info);
1418   }
1419 
1420   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1421     TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
1422                                     S.getInputName(i));
1423     bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
1424                                                   S.getNumOutputs(), Info);
1425     assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
1426     InputConstraintInfos.push_back(Info);
1427   }
1428 
1429   std::string Constraints;
1430 
1431   std::vector<LValue> ResultRegDests;
1432   std::vector<QualType> ResultRegQualTys;
1433   std::vector<llvm::Type *> ResultRegTypes;
1434   std::vector<llvm::Type *> ResultTruncRegTypes;
1435   std::vector<llvm::Type*> ArgTypes;
1436   std::vector<llvm::Value*> Args;
1437 
1438   // Keep track of inout constraints.
1439   std::string InOutConstraints;
1440   std::vector<llvm::Value*> InOutArgs;
1441   std::vector<llvm::Type*> InOutArgTypes;
1442 
1443   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
1444     TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
1445 
1446     // Simplify the output constraint.
1447     std::string OutputConstraint(S.getOutputConstraint(i));
1448     OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
1449 
1450     const Expr *OutExpr = S.getOutputExpr(i);
1451     OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
1452 
1453     OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
1454                                               Target, CGM, S);
1455 
1456     LValue Dest = EmitLValue(OutExpr);
1457     if (!Constraints.empty())
1458       Constraints += ',';
1459 
1460     // If this is a register output, then make the inline asm return it
1461     // by-value.  If this is a memory result, return the value by-reference.
1462     if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
1463       Constraints += "=" + OutputConstraint;
1464       ResultRegQualTys.push_back(OutExpr->getType());
1465       ResultRegDests.push_back(Dest);
1466       ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
1467       ResultTruncRegTypes.push_back(ResultRegTypes.back());
1468 
1469       // If this output is tied to an input, and if the input is larger, then
1470       // we need to set the actual result type of the inline asm node to be the
1471       // same as the input type.
1472       if (Info.hasMatchingInput()) {
1473         unsigned InputNo;
1474         for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1475           TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1476           if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1477             break;
1478         }
1479         assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1480 
1481         QualType InputTy = S.getInputExpr(InputNo)->getType();
1482         QualType OutputType = OutExpr->getType();
1483 
1484         uint64_t InputSize = getContext().getTypeSize(InputTy);
1485         if (getContext().getTypeSize(OutputType) < InputSize) {
1486           // Form the asm to return the value as a larger integer or fp type.
1487           ResultRegTypes.back() = ConvertType(InputTy);
1488         }
1489       }
1490       if (llvm::Type* AdjTy =
1491             getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
1492                                                  ResultRegTypes.back()))
1493         ResultRegTypes.back() = AdjTy;
1494     } else {
1495       ArgTypes.push_back(Dest.getAddress()->getType());
1496       Args.push_back(Dest.getAddress());
1497       Constraints += "=*";
1498       Constraints += OutputConstraint;
1499     }
1500 
1501     if (Info.isReadWrite()) {
1502       InOutConstraints += ',';
1503 
1504       const Expr *InputExpr = S.getOutputExpr(i);
1505       llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
1506                                             InOutConstraints);
1507 
1508       if (Info.allowsRegister())
1509         InOutConstraints += llvm::utostr(i);
1510       else
1511         InOutConstraints += OutputConstraint;
1512 
1513       InOutArgTypes.push_back(Arg->getType());
1514       InOutArgs.push_back(Arg);
1515     }
1516   }
1517 
1518   unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
1519 
1520   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1521     const Expr *InputExpr = S.getInputExpr(i);
1522 
1523     TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
1524 
1525     if (!Constraints.empty())
1526       Constraints += ',';
1527 
1528     // Simplify the input constraint.
1529     std::string InputConstraint(S.getInputConstraint(i));
1530     InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
1531                                          &OutputConstraintInfos);
1532 
1533     InputConstraint =
1534       AddVariableConstraints(InputConstraint,
1535                             *InputExpr->IgnoreParenNoopCasts(getContext()),
1536                             Target, CGM, S);
1537 
1538     llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
1539 
1540     // If this input argument is tied to a larger output result, extend the
1541     // input to be the same size as the output.  The LLVM backend wants to see
1542     // the input and output of a matching constraint be the same size.  Note
1543     // that GCC does not define what the top bits are here.  We use zext because
1544     // that is usually cheaper, but LLVM IR should really get an anyext someday.
1545     if (Info.hasTiedOperand()) {
1546       unsigned Output = Info.getTiedOperand();
1547       QualType OutputType = S.getOutputExpr(Output)->getType();
1548       QualType InputTy = InputExpr->getType();
1549 
1550       if (getContext().getTypeSize(OutputType) >
1551           getContext().getTypeSize(InputTy)) {
1552         // Use ptrtoint as appropriate so that we can do our extension.
1553         if (isa<llvm::PointerType>(Arg->getType()))
1554           Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
1555         llvm::Type *OutputTy = ConvertType(OutputType);
1556         if (isa<llvm::IntegerType>(OutputTy))
1557           Arg = Builder.CreateZExt(Arg, OutputTy);
1558         else if (isa<llvm::PointerType>(OutputTy))
1559           Arg = Builder.CreateZExt(Arg, IntPtrTy);
1560         else {
1561           assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
1562           Arg = Builder.CreateFPExt(Arg, OutputTy);
1563         }
1564       }
1565     }
1566     if (llvm::Type* AdjTy =
1567               getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
1568                                                    Arg->getType()))
1569       Arg = Builder.CreateBitCast(Arg, AdjTy);
1570 
1571     ArgTypes.push_back(Arg->getType());
1572     Args.push_back(Arg);
1573     Constraints += InputConstraint;
1574   }
1575 
1576   // Append the "input" part of inout constraints last.
1577   for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
1578     ArgTypes.push_back(InOutArgTypes[i]);
1579     Args.push_back(InOutArgs[i]);
1580   }
1581   Constraints += InOutConstraints;
1582 
1583   // Clobbers
1584   for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
1585     StringRef Clobber = S.getClobber(i)->getString();
1586 
1587     if (Clobber != "memory" && Clobber != "cc")
1588     Clobber = Target.getNormalizedGCCRegisterName(Clobber);
1589 
1590     if (i != 0 || NumConstraints != 0)
1591       Constraints += ',';
1592 
1593     Constraints += "~{";
1594     Constraints += Clobber;
1595     Constraints += '}';
1596   }
1597 
1598   // Add machine specific clobbers
1599   std::string MachineClobbers = Target.getClobbers();
1600   if (!MachineClobbers.empty()) {
1601     if (!Constraints.empty())
1602       Constraints += ',';
1603     Constraints += MachineClobbers;
1604   }
1605 
1606   llvm::Type *ResultType;
1607   if (ResultRegTypes.empty())
1608     ResultType = VoidTy;
1609   else if (ResultRegTypes.size() == 1)
1610     ResultType = ResultRegTypes[0];
1611   else
1612     ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
1613 
1614   llvm::FunctionType *FTy =
1615     llvm::FunctionType::get(ResultType, ArgTypes, false);
1616 
1617   llvm::InlineAsm *IA =
1618     llvm::InlineAsm::get(FTy, AsmString, Constraints,
1619                          S.isVolatile() || S.getNumOutputs() == 0);
1620   llvm::CallInst *Result = Builder.CreateCall(IA, Args);
1621   Result->addAttribute(~0, llvm::Attribute::NoUnwind);
1622 
1623   // Slap the source location of the inline asm into a !srcloc metadata on the
1624   // call.
1625   Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
1626 
1627   // Extract all of the register value results from the asm.
1628   std::vector<llvm::Value*> RegResults;
1629   if (ResultRegTypes.size() == 1) {
1630     RegResults.push_back(Result);
1631   } else {
1632     for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
1633       llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
1634       RegResults.push_back(Tmp);
1635     }
1636   }
1637 
1638   for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
1639     llvm::Value *Tmp = RegResults[i];
1640 
1641     // If the result type of the LLVM IR asm doesn't match the result type of
1642     // the expression, do the conversion.
1643     if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
1644       llvm::Type *TruncTy = ResultTruncRegTypes[i];
1645 
1646       // Truncate the integer result to the right size, note that TruncTy can be
1647       // a pointer.
1648       if (TruncTy->isFloatingPointTy())
1649         Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
1650       else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
1651         uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
1652         Tmp = Builder.CreateTrunc(Tmp,
1653                    llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
1654         Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
1655       } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
1656         uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
1657         Tmp = Builder.CreatePtrToInt(Tmp,
1658                    llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
1659         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
1660       } else if (TruncTy->isIntegerTy()) {
1661         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
1662       } else if (TruncTy->isVectorTy()) {
1663         Tmp = Builder.CreateBitCast(Tmp, TruncTy);
1664       }
1665     }
1666 
1667     EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
1668   }
1669 }
1670