1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Stmt nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGDebugInfo.h"
15 #include "CodeGenModule.h"
16 #include "CodeGenFunction.h"
17 #include "clang/AST/StmtVisitor.h"
18 #include "clang/Basic/PrettyStackTrace.h"
19 #include "clang/Basic/TargetInfo.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/InlineAsm.h"
22 #include "llvm/Intrinsics.h"
23 #include "llvm/Target/TargetData.h"
24 using namespace clang;
25 using namespace CodeGen;
26 
27 //===----------------------------------------------------------------------===//
28 //                              Statement Emission
29 //===----------------------------------------------------------------------===//
30 
31 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
32   if (CGDebugInfo *DI = getDebugInfo()) {
33     if (isa<DeclStmt>(S))
34       DI->setLocation(S->getLocEnd());
35     else
36       DI->setLocation(S->getLocStart());
37     DI->EmitStopPoint(CurFn, Builder);
38   }
39 }
40 
41 void CodeGenFunction::EmitStmt(const Stmt *S) {
42   assert(S && "Null statement?");
43 
44   // Check if we can handle this without bothering to generate an
45   // insert point or debug info.
46   if (EmitSimpleStmt(S))
47     return;
48 
49   // Check if we are generating unreachable code.
50   if (!HaveInsertPoint()) {
51     // If so, and the statement doesn't contain a label, then we do not need to
52     // generate actual code. This is safe because (1) the current point is
53     // unreachable, so we don't need to execute the code, and (2) we've already
54     // handled the statements which update internal data structures (like the
55     // local variable map) which could be used by subsequent statements.
56     if (!ContainsLabel(S)) {
57       // Verify that any decl statements were handled as simple, they may be in
58       // scope of subsequent reachable statements.
59       assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
60       return;
61     }
62 
63     // Otherwise, make a new block to hold the code.
64     EnsureInsertPoint();
65   }
66 
67   // Generate a stoppoint if we are emitting debug info.
68   EmitStopPoint(S);
69 
70   switch (S->getStmtClass()) {
71   default:
72     // Must be an expression in a stmt context.  Emit the value (to get
73     // side-effects) and ignore the result.
74     if (!isa<Expr>(S))
75       ErrorUnsupported(S, "statement");
76 
77     EmitAnyExpr(cast<Expr>(S), 0, false, true);
78 
79     // Expression emitters don't handle unreachable blocks yet, so look for one
80     // explicitly here. This handles the common case of a call to a noreturn
81     // function.
82     // We can't erase blocks with an associated cleanup size here since the
83     // memory might be reused, leaving the old cleanup info pointing at a new
84     // block.
85     if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
86       if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) {
87         CurBB->eraseFromParent();
88         Builder.ClearInsertionPoint();
89       }
90     }
91     break;
92   case Stmt::IndirectGotoStmtClass:
93     EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
94 
95   case Stmt::IfStmtClass:       EmitIfStmt(cast<IfStmt>(*S));             break;
96   case Stmt::WhileStmtClass:    EmitWhileStmt(cast<WhileStmt>(*S));       break;
97   case Stmt::DoStmtClass:       EmitDoStmt(cast<DoStmt>(*S));             break;
98   case Stmt::ForStmtClass:      EmitForStmt(cast<ForStmt>(*S));           break;
99 
100   case Stmt::ReturnStmtClass:   EmitReturnStmt(cast<ReturnStmt>(*S));     break;
101 
102   case Stmt::SwitchStmtClass:   EmitSwitchStmt(cast<SwitchStmt>(*S));     break;
103   case Stmt::AsmStmtClass:      EmitAsmStmt(cast<AsmStmt>(*S));           break;
104 
105   case Stmt::ObjCAtTryStmtClass:
106     EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
107     break;
108   case Stmt::ObjCAtCatchStmtClass:
109     assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
110     break;
111   case Stmt::ObjCAtFinallyStmtClass:
112     assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
113     break;
114   case Stmt::ObjCAtThrowStmtClass:
115     EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
116     break;
117   case Stmt::ObjCAtSynchronizedStmtClass:
118     EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
119     break;
120   case Stmt::ObjCForCollectionStmtClass:
121     EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
122     break;
123 
124   case Stmt::CXXTryStmtClass:
125     EmitCXXTryStmt(cast<CXXTryStmt>(*S));
126     break;
127   }
128 }
129 
130 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
131   switch (S->getStmtClass()) {
132   default: return false;
133   case Stmt::NullStmtClass: break;
134   case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
135   case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
136   case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
137   case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
138   case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
139   case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
140   case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
141   case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
142   }
143 
144   return true;
145 }
146 
147 /// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
148 /// this captures the expression result of the last sub-statement and returns it
149 /// (for use by the statement expression extension).
150 RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
151                                          llvm::Value *AggLoc, bool isAggVol) {
152   PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
153                              "LLVM IR generation of compound statement ('{}')");
154 
155   CGDebugInfo *DI = getDebugInfo();
156   if (DI) {
157     DI->setLocation(S.getLBracLoc());
158     DI->EmitRegionStart(CurFn, Builder);
159   }
160 
161   // Keep track of the current cleanup stack depth.
162   CleanupScope Scope(*this);
163 
164   for (CompoundStmt::const_body_iterator I = S.body_begin(),
165        E = S.body_end()-GetLast; I != E; ++I)
166     EmitStmt(*I);
167 
168   if (DI) {
169     DI->setLocation(S.getRBracLoc());
170     DI->EmitRegionEnd(CurFn, Builder);
171   }
172 
173   RValue RV;
174   if (!GetLast)
175     RV = RValue::get(0);
176   else {
177     // We have to special case labels here.  They are statements, but when put
178     // at the end of a statement expression, they yield the value of their
179     // subexpression.  Handle this by walking through all labels we encounter,
180     // emitting them before we evaluate the subexpr.
181     const Stmt *LastStmt = S.body_back();
182     while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
183       EmitLabel(*LS);
184       LastStmt = LS->getSubStmt();
185     }
186 
187     EnsureInsertPoint();
188 
189     RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
190   }
191 
192   return RV;
193 }
194 
195 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
196   llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
197 
198   // If there is a cleanup stack, then we it isn't worth trying to
199   // simplify this block (we would need to remove it from the scope map
200   // and cleanup entry).
201   if (!CleanupEntries.empty())
202     return;
203 
204   // Can only simplify direct branches.
205   if (!BI || !BI->isUnconditional())
206     return;
207 
208   BB->replaceAllUsesWith(BI->getSuccessor(0));
209   BI->eraseFromParent();
210   BB->eraseFromParent();
211 }
212 
213 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
214   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
215 
216   // Fall out of the current block (if necessary).
217   EmitBranch(BB);
218 
219   if (IsFinished && BB->use_empty()) {
220     delete BB;
221     return;
222   }
223 
224   // If necessary, associate the block with the cleanup stack size.
225   if (!CleanupEntries.empty()) {
226     // Check if the basic block has already been inserted.
227     BlockScopeMap::iterator I = BlockScopes.find(BB);
228     if (I != BlockScopes.end()) {
229       assert(I->second == CleanupEntries.size() - 1);
230     } else {
231       BlockScopes[BB] = CleanupEntries.size() - 1;
232       CleanupEntries.back().Blocks.push_back(BB);
233     }
234   }
235 
236   // Place the block after the current block, if possible, or else at
237   // the end of the function.
238   if (CurBB && CurBB->getParent())
239     CurFn->getBasicBlockList().insertAfter(CurBB, BB);
240   else
241     CurFn->getBasicBlockList().push_back(BB);
242   Builder.SetInsertPoint(BB);
243 }
244 
245 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
246   // Emit a branch from the current block to the target one if this
247   // was a real block.  If this was just a fall-through block after a
248   // terminator, don't emit it.
249   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
250 
251   if (!CurBB || CurBB->getTerminator()) {
252     // If there is no insert point or the previous block is already
253     // terminated, don't touch it.
254   } else {
255     // Otherwise, create a fall-through branch.
256     Builder.CreateBr(Target);
257   }
258 
259   Builder.ClearInsertionPoint();
260 }
261 
262 void CodeGenFunction::EmitLabel(const LabelStmt &S) {
263   EmitBlock(getBasicBlockForLabel(&S));
264 }
265 
266 
267 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
268   EmitLabel(S);
269   EmitStmt(S.getSubStmt());
270 }
271 
272 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
273   // If this code is reachable then emit a stop point (if generating
274   // debug info). We have to do this ourselves because we are on the
275   // "simple" statement path.
276   if (HaveInsertPoint())
277     EmitStopPoint(&S);
278 
279   EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
280 }
281 
282 
283 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
284   // Ensure that we have an i8* for our PHI node.
285   llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
286                                          llvm::Type::getInt8PtrTy(VMContext),
287                                           "addr");
288   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
289 
290 
291   // Get the basic block for the indirect goto.
292   llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
293 
294   // The first instruction in the block has to be the PHI for the switch dest,
295   // add an entry for this branch.
296   cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
297 
298   EmitBranch(IndGotoBB);
299 }
300 
301 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
302   // C99 6.8.4.1: The first substatement is executed if the expression compares
303   // unequal to 0.  The condition must be a scalar type.
304   CleanupScope ConditionScope(*this);
305 
306   if (S.getConditionVariable())
307     EmitLocalBlockVarDecl(*S.getConditionVariable());
308 
309   // If the condition constant folds and can be elided, try to avoid emitting
310   // the condition and the dead arm of the if/else.
311   if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
312     // Figure out which block (then or else) is executed.
313     const Stmt *Executed = S.getThen(), *Skipped  = S.getElse();
314     if (Cond == -1)  // Condition false?
315       std::swap(Executed, Skipped);
316 
317     // If the skipped block has no labels in it, just emit the executed block.
318     // This avoids emitting dead code and simplifies the CFG substantially.
319     if (!ContainsLabel(Skipped)) {
320       if (Executed) {
321         CleanupScope ExecutedScope(*this);
322         EmitStmt(Executed);
323       }
324       return;
325     }
326   }
327 
328   // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
329   // the conditional branch.
330   llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
331   llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
332   llvm::BasicBlock *ElseBlock = ContBlock;
333   if (S.getElse())
334     ElseBlock = createBasicBlock("if.else");
335   EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
336 
337   // Emit the 'then' code.
338   EmitBlock(ThenBlock);
339   {
340     CleanupScope ThenScope(*this);
341     EmitStmt(S.getThen());
342   }
343   EmitBranch(ContBlock);
344 
345   // Emit the 'else' code if present.
346   if (const Stmt *Else = S.getElse()) {
347     EmitBlock(ElseBlock);
348     {
349       CleanupScope ElseScope(*this);
350       EmitStmt(Else);
351     }
352     EmitBranch(ContBlock);
353   }
354 
355   // Emit the continuation block for code after the if.
356   EmitBlock(ContBlock, true);
357 }
358 
359 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
360   // Emit the header for the loop, insert it, which will create an uncond br to
361   // it.
362   llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
363   EmitBlock(LoopHeader);
364 
365   // Create an exit block for when the condition fails, create a block for the
366   // body of the loop.
367   llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
368   llvm::BasicBlock *LoopBody  = createBasicBlock("while.body");
369   llvm::BasicBlock *CleanupBlock = 0;
370   llvm::BasicBlock *EffectiveExitBlock = ExitBlock;
371 
372   // Store the blocks to use for break and continue.
373   BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
374 
375   // C++ [stmt.while]p2:
376   //   When the condition of a while statement is a declaration, the
377   //   scope of the variable that is declared extends from its point
378   //   of declaration (3.3.2) to the end of the while statement.
379   //   [...]
380   //   The object created in a condition is destroyed and created
381   //   with each iteration of the loop.
382   CleanupScope ConditionScope(*this);
383 
384   if (S.getConditionVariable()) {
385     EmitLocalBlockVarDecl(*S.getConditionVariable());
386 
387     // If this condition variable requires cleanups, create a basic
388     // block to handle those cleanups.
389     if (ConditionScope.requiresCleanups()) {
390       CleanupBlock = createBasicBlock("while.cleanup");
391       EffectiveExitBlock = CleanupBlock;
392     }
393   }
394 
395   // Evaluate the conditional in the while header.  C99 6.8.5.1: The
396   // evaluation of the controlling expression takes place before each
397   // execution of the loop body.
398   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
399 
400   // while(1) is common, avoid extra exit blocks.  Be sure
401   // to correctly handle break/continue though.
402   bool EmitBoolCondBranch = true;
403   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
404     if (C->isOne())
405       EmitBoolCondBranch = false;
406 
407   // As long as the condition is true, go to the loop body.
408   if (EmitBoolCondBranch)
409     Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock);
410 
411   // Emit the loop body.
412   {
413     CleanupScope BodyScope(*this);
414     EmitBlock(LoopBody);
415     EmitStmt(S.getBody());
416   }
417 
418   BreakContinueStack.pop_back();
419 
420   if (CleanupBlock) {
421     // If we have a cleanup block, jump there to perform cleanups
422     // before looping.
423     EmitBranch(CleanupBlock);
424 
425     // Emit the cleanup block, performing cleanups for the condition
426     // and then jumping to either the loop header or the exit block.
427     EmitBlock(CleanupBlock);
428     ConditionScope.ForceCleanup();
429     Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock);
430   } else {
431     // Cycle to the condition.
432     EmitBranch(LoopHeader);
433   }
434 
435   // Emit the exit block.
436   EmitBlock(ExitBlock, true);
437 
438 
439   // The LoopHeader typically is just a branch if we skipped emitting
440   // a branch, try to erase it.
441   if (!EmitBoolCondBranch && !CleanupBlock)
442     SimplifyForwardingBlocks(LoopHeader);
443 }
444 
445 void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
446   // Emit the body for the loop, insert it, which will create an uncond br to
447   // it.
448   llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
449   llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
450   EmitBlock(LoopBody);
451 
452   llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
453 
454   // Store the blocks to use for break and continue.
455   BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
456 
457   // Emit the body of the loop into the block.
458   EmitStmt(S.getBody());
459 
460   BreakContinueStack.pop_back();
461 
462   EmitBlock(DoCond);
463 
464   // C99 6.8.5.2: "The evaluation of the controlling expression takes place
465   // after each execution of the loop body."
466 
467   // Evaluate the conditional in the while header.
468   // C99 6.8.5p2/p4: The first substatement is executed if the expression
469   // compares unequal to 0.  The condition must be a scalar type.
470   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
471 
472   // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
473   // to correctly handle break/continue though.
474   bool EmitBoolCondBranch = true;
475   if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
476     if (C->isZero())
477       EmitBoolCondBranch = false;
478 
479   // As long as the condition is true, iterate the loop.
480   if (EmitBoolCondBranch)
481     Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
482 
483   // Emit the exit block.
484   EmitBlock(AfterDo);
485 
486   // The DoCond block typically is just a branch if we skipped
487   // emitting a branch, try to erase it.
488   if (!EmitBoolCondBranch)
489     SimplifyForwardingBlocks(DoCond);
490 }
491 
492 void CodeGenFunction::EmitForStmt(const ForStmt &S) {
493   CleanupScope ForScope(*this);
494 
495   // Evaluate the first part before the loop.
496   if (S.getInit())
497     EmitStmt(S.getInit());
498 
499   // Start the loop with a block that tests the condition.
500   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
501   llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
502   llvm::BasicBlock *IncBlock = 0;
503   llvm::BasicBlock *CondCleanup = 0;
504   llvm::BasicBlock *EffectiveExitBlock = AfterFor;
505   EmitBlock(CondBlock);
506 
507   // Create a cleanup scope for the condition variable cleanups.
508   CleanupScope ConditionScope(*this);
509 
510   llvm::Value *BoolCondVal = 0;
511   if (S.getCond()) {
512     // If the for statement has a condition scope, emit the local variable
513     // declaration.
514     if (S.getConditionVariable()) {
515       EmitLocalBlockVarDecl(*S.getConditionVariable());
516 
517       if (ConditionScope.requiresCleanups()) {
518         CondCleanup = createBasicBlock("for.cond.cleanup");
519         EffectiveExitBlock = CondCleanup;
520       }
521     }
522 
523     // As long as the condition is true, iterate the loop.
524     llvm::BasicBlock *ForBody = createBasicBlock("for.body");
525 
526     // C99 6.8.5p2/p4: The first substatement is executed if the expression
527     // compares unequal to 0.  The condition must be a scalar type.
528     BoolCondVal = EvaluateExprAsBool(S.getCond());
529     Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock);
530 
531     EmitBlock(ForBody);
532   } else {
533     // Treat it as a non-zero constant.  Don't even create a new block for the
534     // body, just fall into it.
535   }
536 
537   // If the for loop doesn't have an increment we can just use the
538   // condition as the continue block.
539   llvm::BasicBlock *ContinueBlock;
540   if (S.getInc())
541     ContinueBlock = IncBlock = createBasicBlock("for.inc");
542   else
543     ContinueBlock = CondBlock;
544 
545   // Store the blocks to use for break and continue.
546   BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
547 
548   // If the condition is true, execute the body of the for stmt.
549   CGDebugInfo *DI = getDebugInfo();
550   if (DI) {
551     DI->setLocation(S.getSourceRange().getBegin());
552     DI->EmitRegionStart(CurFn, Builder);
553   }
554 
555   {
556     // Create a separate cleanup scope for the body, in case it is not
557     // a compound statement.
558     CleanupScope BodyScope(*this);
559     EmitStmt(S.getBody());
560   }
561 
562   // If there is an increment, emit it next.
563   if (S.getInc()) {
564     EmitBlock(IncBlock);
565     EmitStmt(S.getInc());
566   }
567 
568   BreakContinueStack.pop_back();
569 
570   // Finally, branch back up to the condition for the next iteration.
571   if (CondCleanup) {
572     // Branch to the cleanup block.
573     EmitBranch(CondCleanup);
574 
575     // Emit the cleanup block, which branches back to the loop body or
576     // outside of the for statement once it is done.
577     EmitBlock(CondCleanup);
578     ConditionScope.ForceCleanup();
579     Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor);
580   } else
581     EmitBranch(CondBlock);
582   if (DI) {
583     DI->setLocation(S.getSourceRange().getEnd());
584     DI->EmitRegionEnd(CurFn, Builder);
585   }
586 
587   // Emit the fall-through block.
588   EmitBlock(AfterFor, true);
589 }
590 
591 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
592   if (RV.isScalar()) {
593     Builder.CreateStore(RV.getScalarVal(), ReturnValue);
594   } else if (RV.isAggregate()) {
595     EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
596   } else {
597     StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
598   }
599   EmitBranchThroughCleanup(ReturnBlock);
600 }
601 
602 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
603 /// if the function returns void, or may be missing one if the function returns
604 /// non-void.  Fun stuff :).
605 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
606   // Emit the result value, even if unused, to evalute the side effects.
607   const Expr *RV = S.getRetValue();
608 
609   // FIXME: Clean this up by using an LValue for ReturnTemp,
610   // EmitStoreThroughLValue, and EmitAnyExpr.
611   if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
612       !Target.useGlobalsForAutomaticVariables()) {
613     // Apply the named return value optimization for this return statement,
614     // which means doing nothing: the appropriate result has already been
615     // constructed into the NRVO variable.
616 
617     // If there is an NRVO flag for this variable, set it to 1 into indicate
618     // that the cleanup code should not destroy the variable.
619     if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
620       const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
621       llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
622       Builder.CreateStore(One, NRVOFlag);
623     }
624   } else if (!ReturnValue) {
625     // Make sure not to return anything, but evaluate the expression
626     // for side effects.
627     if (RV)
628       EmitAnyExpr(RV);
629   } else if (RV == 0) {
630     // Do nothing (return value is left uninitialized)
631   } else if (FnRetTy->isReferenceType()) {
632     // If this function returns a reference, take the address of the expression
633     // rather than the value.
634     RValue Result = EmitReferenceBindingToExpr(RV, false);
635     Builder.CreateStore(Result.getScalarVal(), ReturnValue);
636   } else if (!hasAggregateLLVMType(RV->getType())) {
637     Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
638   } else if (RV->getType()->isAnyComplexType()) {
639     EmitComplexExprIntoAddr(RV, ReturnValue, false);
640   } else {
641     EmitAggExpr(RV, ReturnValue, false);
642   }
643 
644   EmitBranchThroughCleanup(ReturnBlock);
645 }
646 
647 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
648   // As long as debug info is modeled with instructions, we have to ensure we
649   // have a place to insert here and write the stop point here.
650   if (getDebugInfo()) {
651     EnsureInsertPoint();
652     EmitStopPoint(&S);
653   }
654 
655   for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
656        I != E; ++I)
657     EmitDecl(**I);
658 }
659 
660 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
661   assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
662 
663   // If this code is reachable then emit a stop point (if generating
664   // debug info). We have to do this ourselves because we are on the
665   // "simple" statement path.
666   if (HaveInsertPoint())
667     EmitStopPoint(&S);
668 
669   llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
670   EmitBranchThroughCleanup(Block);
671 }
672 
673 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
674   assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
675 
676   // If this code is reachable then emit a stop point (if generating
677   // debug info). We have to do this ourselves because we are on the
678   // "simple" statement path.
679   if (HaveInsertPoint())
680     EmitStopPoint(&S);
681 
682   llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
683   EmitBranchThroughCleanup(Block);
684 }
685 
686 /// EmitCaseStmtRange - If case statement range is not too big then
687 /// add multiple cases to switch instruction, one for each value within
688 /// the range. If range is too big then emit "if" condition check.
689 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
690   assert(S.getRHS() && "Expected RHS value in CaseStmt");
691 
692   llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
693   llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
694 
695   // Emit the code for this case. We do this first to make sure it is
696   // properly chained from our predecessor before generating the
697   // switch machinery to enter this block.
698   EmitBlock(createBasicBlock("sw.bb"));
699   llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
700   EmitStmt(S.getSubStmt());
701 
702   // If range is empty, do nothing.
703   if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
704     return;
705 
706   llvm::APInt Range = RHS - LHS;
707   // FIXME: parameters such as this should not be hardcoded.
708   if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
709     // Range is small enough to add multiple switch instruction cases.
710     for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
711       SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
712       LHS++;
713     }
714     return;
715   }
716 
717   // The range is too big. Emit "if" condition into a new block,
718   // making sure to save and restore the current insertion point.
719   llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
720 
721   // Push this test onto the chain of range checks (which terminates
722   // in the default basic block). The switch's default will be changed
723   // to the top of this chain after switch emission is complete.
724   llvm::BasicBlock *FalseDest = CaseRangeBlock;
725   CaseRangeBlock = createBasicBlock("sw.caserange");
726 
727   CurFn->getBasicBlockList().push_back(CaseRangeBlock);
728   Builder.SetInsertPoint(CaseRangeBlock);
729 
730   // Emit range check.
731   llvm::Value *Diff =
732     Builder.CreateSub(SwitchInsn->getCondition(),
733                       llvm::ConstantInt::get(VMContext, LHS),  "tmp");
734   llvm::Value *Cond =
735     Builder.CreateICmpULE(Diff,
736                           llvm::ConstantInt::get(VMContext, Range), "tmp");
737   Builder.CreateCondBr(Cond, CaseDest, FalseDest);
738 
739   // Restore the appropriate insertion point.
740   if (RestoreBB)
741     Builder.SetInsertPoint(RestoreBB);
742   else
743     Builder.ClearInsertionPoint();
744 }
745 
746 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
747   if (S.getRHS()) {
748     EmitCaseStmtRange(S);
749     return;
750   }
751 
752   EmitBlock(createBasicBlock("sw.bb"));
753   llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
754   llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
755   SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
756 
757   // Recursively emitting the statement is acceptable, but is not wonderful for
758   // code where we have many case statements nested together, i.e.:
759   //  case 1:
760   //    case 2:
761   //      case 3: etc.
762   // Handling this recursively will create a new block for each case statement
763   // that falls through to the next case which is IR intensive.  It also causes
764   // deep recursion which can run into stack depth limitations.  Handle
765   // sequential non-range case statements specially.
766   const CaseStmt *CurCase = &S;
767   const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
768 
769   // Otherwise, iteratively add consequtive cases to this switch stmt.
770   while (NextCase && NextCase->getRHS() == 0) {
771     CurCase = NextCase;
772     CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
773     SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
774 
775     NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
776   }
777 
778   // Normal default recursion for non-cases.
779   EmitStmt(CurCase->getSubStmt());
780 }
781 
782 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
783   llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
784   assert(DefaultBlock->empty() &&
785          "EmitDefaultStmt: Default block already defined?");
786   EmitBlock(DefaultBlock);
787   EmitStmt(S.getSubStmt());
788 }
789 
790 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
791   CleanupScope ConditionScope(*this);
792 
793   if (S.getConditionVariable())
794     EmitLocalBlockVarDecl(*S.getConditionVariable());
795 
796   llvm::Value *CondV = EmitScalarExpr(S.getCond());
797 
798   // Handle nested switch statements.
799   llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
800   llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
801 
802   // Create basic block to hold stuff that comes after switch
803   // statement. We also need to create a default block now so that
804   // explicit case ranges tests can have a place to jump to on
805   // failure.
806   llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
807   llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
808   SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
809   CaseRangeBlock = DefaultBlock;
810 
811   // Clear the insertion point to indicate we are in unreachable code.
812   Builder.ClearInsertionPoint();
813 
814   // All break statements jump to NextBlock. If BreakContinueStack is non empty
815   // then reuse last ContinueBlock.
816   llvm::BasicBlock *ContinueBlock = 0;
817   if (!BreakContinueStack.empty())
818     ContinueBlock = BreakContinueStack.back().ContinueBlock;
819 
820   // Ensure any vlas created between there and here, are undone
821   BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
822 
823   // Emit switch body.
824   EmitStmt(S.getBody());
825 
826   BreakContinueStack.pop_back();
827 
828   // Update the default block in case explicit case range tests have
829   // been chained on top.
830   SwitchInsn->setSuccessor(0, CaseRangeBlock);
831 
832   // If a default was never emitted then reroute any jumps to it and
833   // discard.
834   if (!DefaultBlock->getParent()) {
835     DefaultBlock->replaceAllUsesWith(NextBlock);
836     delete DefaultBlock;
837   }
838 
839   // Emit continuation.
840   EmitBlock(NextBlock, true);
841 
842   SwitchInsn = SavedSwitchInsn;
843   CaseRangeBlock = SavedCRBlock;
844 }
845 
846 static std::string
847 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
848                  llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
849   std::string Result;
850 
851   while (*Constraint) {
852     switch (*Constraint) {
853     default:
854       Result += Target.convertConstraint(*Constraint);
855       break;
856     // Ignore these
857     case '*':
858     case '?':
859     case '!':
860       break;
861     case 'g':
862       Result += "imr";
863       break;
864     case '[': {
865       assert(OutCons &&
866              "Must pass output names to constraints with a symbolic name");
867       unsigned Index;
868       bool result = Target.resolveSymbolicName(Constraint,
869                                                &(*OutCons)[0],
870                                                OutCons->size(), Index);
871       assert(result && "Could not resolve symbolic name"); result=result;
872       Result += llvm::utostr(Index);
873       break;
874     }
875     }
876 
877     Constraint++;
878   }
879 
880   return Result;
881 }
882 
883 llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
884                                          const TargetInfo::ConstraintInfo &Info,
885                                            const Expr *InputExpr,
886                                            std::string &ConstraintStr) {
887   llvm::Value *Arg;
888   if (Info.allowsRegister() || !Info.allowsMemory()) {
889     if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType())) {
890       Arg = EmitScalarExpr(InputExpr);
891     } else {
892       InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
893       LValue Dest = EmitLValue(InputExpr);
894 
895       const llvm::Type *Ty = ConvertType(InputExpr->getType());
896       uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
897       if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
898         Ty = llvm::IntegerType::get(VMContext, Size);
899         Ty = llvm::PointerType::getUnqual(Ty);
900 
901         Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
902       } else {
903         Arg = Dest.getAddress();
904         ConstraintStr += '*';
905       }
906     }
907   } else {
908     InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
909     LValue Dest = EmitLValue(InputExpr);
910     Arg = Dest.getAddress();
911     ConstraintStr += '*';
912   }
913 
914   return Arg;
915 }
916 
917 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
918   // Analyze the asm string to decompose it into its pieces.  We know that Sema
919   // has already done this, so it is guaranteed to be successful.
920   llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
921   unsigned DiagOffs;
922   S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
923 
924   // Assemble the pieces into the final asm string.
925   std::string AsmString;
926   for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
927     if (Pieces[i].isString())
928       AsmString += Pieces[i].getString();
929     else if (Pieces[i].getModifier() == '\0')
930       AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
931     else
932       AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
933                    Pieces[i].getModifier() + '}';
934   }
935 
936   // Get all the output and input constraints together.
937   llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
938   llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
939 
940   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
941     TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
942                                     S.getOutputName(i));
943     bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
944     assert(IsValid && "Failed to parse output constraint");
945     OutputConstraintInfos.push_back(Info);
946   }
947 
948   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
949     TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
950                                     S.getInputName(i));
951     bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
952                                                   S.getNumOutputs(), Info);
953     assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
954     InputConstraintInfos.push_back(Info);
955   }
956 
957   std::string Constraints;
958 
959   std::vector<LValue> ResultRegDests;
960   std::vector<QualType> ResultRegQualTys;
961   std::vector<const llvm::Type *> ResultRegTypes;
962   std::vector<const llvm::Type *> ResultTruncRegTypes;
963   std::vector<const llvm::Type*> ArgTypes;
964   std::vector<llvm::Value*> Args;
965 
966   // Keep track of inout constraints.
967   std::string InOutConstraints;
968   std::vector<llvm::Value*> InOutArgs;
969   std::vector<const llvm::Type*> InOutArgTypes;
970 
971   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
972     TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
973 
974     // Simplify the output constraint.
975     std::string OutputConstraint(S.getOutputConstraint(i));
976     OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
977 
978     const Expr *OutExpr = S.getOutputExpr(i);
979     OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
980 
981     LValue Dest = EmitLValue(OutExpr);
982     if (!Constraints.empty())
983       Constraints += ',';
984 
985     // If this is a register output, then make the inline asm return it
986     // by-value.  If this is a memory result, return the value by-reference.
987     if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
988       Constraints += "=" + OutputConstraint;
989       ResultRegQualTys.push_back(OutExpr->getType());
990       ResultRegDests.push_back(Dest);
991       ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
992       ResultTruncRegTypes.push_back(ResultRegTypes.back());
993 
994       // If this output is tied to an input, and if the input is larger, then
995       // we need to set the actual result type of the inline asm node to be the
996       // same as the input type.
997       if (Info.hasMatchingInput()) {
998         unsigned InputNo;
999         for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
1000           TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
1001           if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
1002             break;
1003         }
1004         assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
1005 
1006         QualType InputTy = S.getInputExpr(InputNo)->getType();
1007         QualType OutputType = OutExpr->getType();
1008 
1009         uint64_t InputSize = getContext().getTypeSize(InputTy);
1010         if (getContext().getTypeSize(OutputType) < InputSize) {
1011           // Form the asm to return the value as a larger integer or fp type.
1012           ResultRegTypes.back() = ConvertType(InputTy);
1013         }
1014       }
1015     } else {
1016       ArgTypes.push_back(Dest.getAddress()->getType());
1017       Args.push_back(Dest.getAddress());
1018       Constraints += "=*";
1019       Constraints += OutputConstraint;
1020     }
1021 
1022     if (Info.isReadWrite()) {
1023       InOutConstraints += ',';
1024 
1025       const Expr *InputExpr = S.getOutputExpr(i);
1026       llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
1027 
1028       if (Info.allowsRegister())
1029         InOutConstraints += llvm::utostr(i);
1030       else
1031         InOutConstraints += OutputConstraint;
1032 
1033       InOutArgTypes.push_back(Arg->getType());
1034       InOutArgs.push_back(Arg);
1035     }
1036   }
1037 
1038   unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
1039 
1040   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
1041     const Expr *InputExpr = S.getInputExpr(i);
1042 
1043     TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
1044 
1045     if (!Constraints.empty())
1046       Constraints += ',';
1047 
1048     // Simplify the input constraint.
1049     std::string InputConstraint(S.getInputConstraint(i));
1050     InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
1051                                          &OutputConstraintInfos);
1052 
1053     llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
1054 
1055     // If this input argument is tied to a larger output result, extend the
1056     // input to be the same size as the output.  The LLVM backend wants to see
1057     // the input and output of a matching constraint be the same size.  Note
1058     // that GCC does not define what the top bits are here.  We use zext because
1059     // that is usually cheaper, but LLVM IR should really get an anyext someday.
1060     if (Info.hasTiedOperand()) {
1061       unsigned Output = Info.getTiedOperand();
1062       QualType OutputType = S.getOutputExpr(Output)->getType();
1063       QualType InputTy = InputExpr->getType();
1064 
1065       if (getContext().getTypeSize(OutputType) >
1066           getContext().getTypeSize(InputTy)) {
1067         // Use ptrtoint as appropriate so that we can do our extension.
1068         if (isa<llvm::PointerType>(Arg->getType()))
1069           Arg = Builder.CreatePtrToInt(Arg,
1070                            llvm::IntegerType::get(VMContext, LLVMPointerWidth));
1071         const llvm::Type *OutputTy = ConvertType(OutputType);
1072         if (isa<llvm::IntegerType>(OutputTy))
1073           Arg = Builder.CreateZExt(Arg, OutputTy);
1074         else
1075           Arg = Builder.CreateFPExt(Arg, OutputTy);
1076       }
1077     }
1078 
1079 
1080     ArgTypes.push_back(Arg->getType());
1081     Args.push_back(Arg);
1082     Constraints += InputConstraint;
1083   }
1084 
1085   // Append the "input" part of inout constraints last.
1086   for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
1087     ArgTypes.push_back(InOutArgTypes[i]);
1088     Args.push_back(InOutArgs[i]);
1089   }
1090   Constraints += InOutConstraints;
1091 
1092   // Clobbers
1093   for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
1094     llvm::StringRef Clobber = S.getClobber(i)->getString();
1095 
1096     Clobber = Target.getNormalizedGCCRegisterName(Clobber);
1097 
1098     if (i != 0 || NumConstraints != 0)
1099       Constraints += ',';
1100 
1101     Constraints += "~{";
1102     Constraints += Clobber;
1103     Constraints += '}';
1104   }
1105 
1106   // Add machine specific clobbers
1107   std::string MachineClobbers = Target.getClobbers();
1108   if (!MachineClobbers.empty()) {
1109     if (!Constraints.empty())
1110       Constraints += ',';
1111     Constraints += MachineClobbers;
1112   }
1113 
1114   const llvm::Type *ResultType;
1115   if (ResultRegTypes.empty())
1116     ResultType = llvm::Type::getVoidTy(VMContext);
1117   else if (ResultRegTypes.size() == 1)
1118     ResultType = ResultRegTypes[0];
1119   else
1120     ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
1121 
1122   const llvm::FunctionType *FTy =
1123     llvm::FunctionType::get(ResultType, ArgTypes, false);
1124 
1125   llvm::InlineAsm *IA =
1126     llvm::InlineAsm::get(FTy, AsmString, Constraints,
1127                          S.isVolatile() || S.getNumOutputs() == 0);
1128   llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
1129   Result->addAttribute(~0, llvm::Attribute::NoUnwind);
1130 
1131   // Slap the source location of the inline asm into a !srcloc metadata on the
1132   // call.
1133   unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding();
1134   llvm::Value *LocIDC =
1135     llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID);
1136   Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1));
1137 
1138   // Extract all of the register value results from the asm.
1139   std::vector<llvm::Value*> RegResults;
1140   if (ResultRegTypes.size() == 1) {
1141     RegResults.push_back(Result);
1142   } else {
1143     for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
1144       llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
1145       RegResults.push_back(Tmp);
1146     }
1147   }
1148 
1149   for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
1150     llvm::Value *Tmp = RegResults[i];
1151 
1152     // If the result type of the LLVM IR asm doesn't match the result type of
1153     // the expression, do the conversion.
1154     if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
1155       const llvm::Type *TruncTy = ResultTruncRegTypes[i];
1156 
1157       // Truncate the integer result to the right size, note that TruncTy can be
1158       // a pointer.
1159       if (TruncTy->isFloatingPointTy())
1160         Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
1161       else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
1162         uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
1163         Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext,
1164                                                             (unsigned)ResSize));
1165         Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
1166       } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
1167         uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
1168         Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(VMContext,
1169                                                             (unsigned)TmpSize));
1170         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
1171       } else if (TruncTy->isIntegerTy()) {
1172         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
1173       }
1174     }
1175 
1176     EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
1177                            ResultRegQualTys[i]);
1178   }
1179 }
1180