1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This coordinates the per-function state used while generating code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGCXXABI.h"
17 #include "CGDebugInfo.h"
18 #include "CGException.h"
19 #include "clang/Basic/TargetInfo.h"
20 #include "clang/AST/APValue.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/StmtCXX.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/Intrinsics.h"
28 using namespace clang;
29 using namespace CodeGen;
30 
31 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
32   : CodeGenTypeCache(cgm), CGM(cgm),
33     Target(CGM.getContext().Target), Builder(cgm.getModule().getContext()),
34     BlockInfo(0), BlockPointer(0),
35     NormalCleanupDest(0), EHCleanupDest(0), NextCleanupDestIndex(1),
36     ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
37     SwitchInsn(0), CaseRangeBlock(0),
38     DidCallStackSave(false), UnreachableBlock(0),
39     CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
40     OutermostConditional(0), TerminateLandingPad(0), TerminateHandler(0),
41     TrapBB(0) {
42 
43   CatchUndefined = getContext().getLangOptions().CatchUndefined;
44   CGM.getCXXABI().getMangleContext().startNewFunction();
45 }
46 
47 ASTContext &CodeGenFunction::getContext() const {
48   return CGM.getContext();
49 }
50 
51 
52 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
53   return CGM.getTypes().ConvertTypeForMem(T);
54 }
55 
56 const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
57   return CGM.getTypes().ConvertType(T);
58 }
59 
60 bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
61   return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
62     T->isObjCObjectType();
63 }
64 
65 void CodeGenFunction::EmitReturnBlock() {
66   // For cleanliness, we try to avoid emitting the return block for
67   // simple cases.
68   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
69 
70   if (CurBB) {
71     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
72 
73     // We have a valid insert point, reuse it if it is empty or there are no
74     // explicit jumps to the return block.
75     if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
76       ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
77       delete ReturnBlock.getBlock();
78     } else
79       EmitBlock(ReturnBlock.getBlock());
80     return;
81   }
82 
83   // Otherwise, if the return block is the target of a single direct
84   // branch then we can just put the code in that block instead. This
85   // cleans up functions which started with a unified return block.
86   if (ReturnBlock.getBlock()->hasOneUse()) {
87     llvm::BranchInst *BI =
88       dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
89     if (BI && BI->isUnconditional() &&
90         BI->getSuccessor(0) == ReturnBlock.getBlock()) {
91       // Reset insertion point and delete the branch.
92       Builder.SetInsertPoint(BI->getParent());
93       BI->eraseFromParent();
94       delete ReturnBlock.getBlock();
95       return;
96     }
97   }
98 
99   // FIXME: We are at an unreachable point, there is no reason to emit the block
100   // unless it has uses. However, we still need a place to put the debug
101   // region.end for now.
102 
103   EmitBlock(ReturnBlock.getBlock());
104 }
105 
106 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
107   if (!BB) return;
108   if (!BB->use_empty())
109     return CGF.CurFn->getBasicBlockList().push_back(BB);
110   delete BB;
111 }
112 
113 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
114   assert(BreakContinueStack.empty() &&
115          "mismatched push/pop in break/continue stack!");
116 
117   // Emit function epilog (to return).
118   EmitReturnBlock();
119 
120   if (ShouldInstrumentFunction())
121     EmitFunctionInstrumentation("__cyg_profile_func_exit");
122 
123   // Emit debug descriptor for function end.
124   if (CGDebugInfo *DI = getDebugInfo()) {
125     DI->setLocation(EndLoc);
126     DI->EmitFunctionEnd(Builder);
127   }
128 
129   EmitFunctionEpilog(*CurFnInfo);
130   EmitEndEHSpec(CurCodeDecl);
131 
132   assert(EHStack.empty() &&
133          "did not remove all scopes from cleanup stack!");
134 
135   // If someone did an indirect goto, emit the indirect goto block at the end of
136   // the function.
137   if (IndirectBranch) {
138     EmitBlock(IndirectBranch->getParent());
139     Builder.ClearInsertionPoint();
140   }
141 
142   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
143   llvm::Instruction *Ptr = AllocaInsertPt;
144   AllocaInsertPt = 0;
145   Ptr->eraseFromParent();
146 
147   // If someone took the address of a label but never did an indirect goto, we
148   // made a zero entry PHI node, which is illegal, zap it now.
149   if (IndirectBranch) {
150     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
151     if (PN->getNumIncomingValues() == 0) {
152       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
153       PN->eraseFromParent();
154     }
155   }
156 
157   EmitIfUsed(*this, RethrowBlock.getBlock());
158   EmitIfUsed(*this, TerminateLandingPad);
159   EmitIfUsed(*this, TerminateHandler);
160   EmitIfUsed(*this, UnreachableBlock);
161 
162   if (CGM.getCodeGenOpts().EmitDeclMetadata)
163     EmitDeclMetadata();
164 }
165 
166 /// ShouldInstrumentFunction - Return true if the current function should be
167 /// instrumented with __cyg_profile_func_* calls
168 bool CodeGenFunction::ShouldInstrumentFunction() {
169   if (!CGM.getCodeGenOpts().InstrumentFunctions)
170     return false;
171   if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
172     return false;
173   return true;
174 }
175 
176 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
177 /// instrumentation function with the current function and the call site, if
178 /// function instrumentation is enabled.
179 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
180   const llvm::PointerType *PointerTy;
181   const llvm::FunctionType *FunctionTy;
182   std::vector<const llvm::Type*> ProfileFuncArgs;
183 
184   // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
185   PointerTy = Int8PtrTy;
186   ProfileFuncArgs.push_back(PointerTy);
187   ProfileFuncArgs.push_back(PointerTy);
188   FunctionTy = llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
189                                        ProfileFuncArgs, false);
190 
191   llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
192   llvm::CallInst *CallSite = Builder.CreateCall(
193     CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
194     llvm::ConstantInt::get(Int32Ty, 0),
195     "callsite");
196 
197   Builder.CreateCall2(F,
198                       llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
199                       CallSite);
200 }
201 
202 void CodeGenFunction::EmitMCountInstrumentation() {
203   llvm::FunctionType *FTy =
204     llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), false);
205 
206   llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
207                                                        Target.getMCountName());
208   Builder.CreateCall(MCountFn);
209 }
210 
211 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
212                                     llvm::Function *Fn,
213                                     const FunctionArgList &Args,
214                                     SourceLocation StartLoc,
215                                     CallingConv CC) {
216   const Decl *D = GD.getDecl();
217 
218   DidCallStackSave = false;
219   CurCodeDecl = CurFuncDecl = D;
220   FnRetTy = RetTy;
221   CurFn = Fn;
222   assert(CurFn->isDeclaration() && "Function already has body?");
223 
224   // Pass inline keyword to optimizer if it appears explicitly on any
225   // declaration.
226   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
227     for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
228            RE = FD->redecls_end(); RI != RE; ++RI)
229       if (RI->isInlineSpecified()) {
230         Fn->addFnAttr(llvm::Attribute::InlineHint);
231         break;
232       }
233 
234   if (getContext().getLangOptions().OpenCL) {
235     // Add metadata for a kernel function.
236     if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
237       if (FD->hasAttr<OpenCLKernelAttr>()) {
238         llvm::LLVMContext &Context = getLLVMContext();
239         llvm::NamedMDNode *OpenCLMetadata =
240           CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
241 
242         llvm::Value *Op = Fn;
243         OpenCLMetadata->addOperand(llvm::MDNode::get(Context, &Op, 1));
244       }
245   }
246 
247   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
248 
249   // Create a marker to make it easy to insert allocas into the entryblock
250   // later.  Don't create this with the builder, because we don't want it
251   // folded.
252   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
253   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
254   if (Builder.isNamePreserving())
255     AllocaInsertPt->setName("allocapt");
256 
257   ReturnBlock = getJumpDestInCurrentScope("return");
258 
259   Builder.SetInsertPoint(EntryBB);
260 
261   // Emit subprogram debug descriptor.
262   if (CGDebugInfo *DI = getDebugInfo()) {
263     // FIXME: what is going on here and why does it ignore all these
264     // interesting type properties?
265     QualType FnType =
266       getContext().getFunctionType(RetTy, 0, 0,
267                                    FunctionProtoType::ExtProtoInfo());
268 
269     DI->setLocation(StartLoc);
270     DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
271   }
272 
273   if (ShouldInstrumentFunction())
274     EmitFunctionInstrumentation("__cyg_profile_func_enter");
275 
276   if (CGM.getCodeGenOpts().InstrumentForProfiling)
277     EmitMCountInstrumentation();
278 
279   // FIXME: Leaked.
280   // CC info is ignored, hopefully?
281   CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
282                                               FunctionType::ExtInfo().withCallingConv(CC));
283 
284   if (RetTy->isVoidType()) {
285     // Void type; nothing to return.
286     ReturnValue = 0;
287   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
288              hasAggregateLLVMType(CurFnInfo->getReturnType())) {
289     // Indirect aggregate return; emit returned value directly into sret slot.
290     // This reduces code size, and affects correctness in C++.
291     ReturnValue = CurFn->arg_begin();
292   } else {
293     ReturnValue = CreateIRTemp(RetTy, "retval");
294   }
295 
296   EmitStartEHSpec(CurCodeDecl);
297   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
298 
299   if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
300     CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
301 
302   // If any of the arguments have a variably modified type, make sure to
303   // emit the type size.
304   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
305        i != e; ++i) {
306     QualType Ty = i->second;
307 
308     if (Ty->isVariablyModifiedType())
309       EmitVLASize(Ty);
310   }
311 }
312 
313 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
314   const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
315   assert(FD->getBody());
316   EmitStmt(FD->getBody());
317 }
318 
319 /// Tries to mark the given function nounwind based on the
320 /// non-existence of any throwing calls within it.  We believe this is
321 /// lightweight enough to do at -O0.
322 static void TryMarkNoThrow(llvm::Function *F) {
323   // LLVM treats 'nounwind' on a function as part of the type, so we
324   // can't do this on functions that can be overwritten.
325   if (F->mayBeOverridden()) return;
326 
327   for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
328     for (llvm::BasicBlock::iterator
329            BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
330       if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI))
331         if (!Call->doesNotThrow())
332           return;
333   F->setDoesNotThrow(true);
334 }
335 
336 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
337   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
338 
339   // Check if we should generate debug info for this function.
340   if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
341     DebugInfo = CGM.getDebugInfo();
342 
343   FunctionArgList Args;
344   QualType ResTy = FD->getResultType();
345 
346   CurGD = GD;
347   if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
348     CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
349 
350   if (FD->getNumParams()) {
351     const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
352     assert(FProto && "Function def must have prototype!");
353 
354     for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
355       Args.push_back(std::make_pair(FD->getParamDecl(i),
356                                     FProto->getArgType(i)));
357   }
358 
359   SourceRange BodyRange;
360   if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
361 
362   // Emit the standard function prologue.
363   StartFunction(GD, ResTy, Fn, Args, BodyRange.getBegin(), CC_Default);
364 
365   // Generate the body of the function.
366   if (isa<CXXDestructorDecl>(FD))
367     EmitDestructorBody(Args);
368   else if (isa<CXXConstructorDecl>(FD))
369     EmitConstructorBody(Args);
370   else
371     EmitFunctionBody(Args);
372 
373   // Emit the standard function epilogue.
374   FinishFunction(BodyRange.getEnd());
375 
376   // If we haven't marked the function nothrow through other means, do
377   // a quick pass now to see if we can.
378   if (!CurFn->doesNotThrow())
379     TryMarkNoThrow(CurFn);
380 }
381 
382 /// ContainsLabel - Return true if the statement contains a label in it.  If
383 /// this statement is not executed normally, it not containing a label means
384 /// that we can just remove the code.
385 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
386   // Null statement, not a label!
387   if (S == 0) return false;
388 
389   // If this is a label, we have to emit the code, consider something like:
390   // if (0) {  ...  foo:  bar(); }  goto foo;
391   //
392   // TODO: If anyone cared, we could track __label__'s, since we know that you
393   // can't jump to one from outside their declared region.
394   if (isa<LabelStmt>(S))
395     return true;
396 
397   // If this is a case/default statement, and we haven't seen a switch, we have
398   // to emit the code.
399   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
400     return true;
401 
402   // If this is a switch statement, we want to ignore cases below it.
403   if (isa<SwitchStmt>(S))
404     IgnoreCaseStmts = true;
405 
406   // Scan subexpressions for verboten labels.
407   for (Stmt::const_child_range I = S->children(); I; ++I)
408     if (ContainsLabel(*I, IgnoreCaseStmts))
409       return true;
410 
411   return false;
412 }
413 
414 /// containsBreak - Return true if the statement contains a break out of it.
415 /// If the statement (recursively) contains a switch or loop with a break
416 /// inside of it, this is fine.
417 bool CodeGenFunction::containsBreak(const Stmt *S) {
418   // Null statement, not a label!
419   if (S == 0) return false;
420 
421   // If this is a switch or loop that defines its own break scope, then we can
422   // include it and anything inside of it.
423   if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
424       isa<ForStmt>(S))
425     return false;
426 
427   if (isa<BreakStmt>(S))
428     return true;
429 
430   // Scan subexpressions for verboten breaks.
431   for (Stmt::const_child_range I = S->children(); I; ++I)
432     if (containsBreak(*I))
433       return true;
434 
435   return false;
436 }
437 
438 
439 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
440 /// to a constant, or if it does but contains a label, return false.  If it
441 /// constant folds return true and set the boolean result in Result.
442 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
443                                                    bool &ResultBool) {
444   llvm::APInt ResultInt;
445   if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
446     return false;
447 
448   ResultBool = ResultInt.getBoolValue();
449   return true;
450 }
451 
452 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
453 /// to a constant, or if it does but contains a label, return false.  If it
454 /// constant folds return true and set the folded value.
455 bool CodeGenFunction::
456 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
457   // FIXME: Rename and handle conversion of other evaluatable things
458   // to bool.
459   Expr::EvalResult Result;
460   if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
461       Result.HasSideEffects)
462     return false;  // Not foldable, not integer or not fully evaluatable.
463 
464   if (CodeGenFunction::ContainsLabel(Cond))
465     return false;  // Contains a label.
466 
467   ResultInt = Result.Val.getInt();
468   return true;
469 }
470 
471 
472 
473 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
474 /// statement) to the specified blocks.  Based on the condition, this might try
475 /// to simplify the codegen of the conditional based on the branch.
476 ///
477 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
478                                            llvm::BasicBlock *TrueBlock,
479                                            llvm::BasicBlock *FalseBlock) {
480   if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
481     return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
482 
483   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
484     // Handle X && Y in a condition.
485     if (CondBOp->getOpcode() == BO_LAnd) {
486       // If we have "1 && X", simplify the code.  "0 && X" would have constant
487       // folded if the case was simple enough.
488       bool ConstantBool;
489       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
490           ConstantBool) {
491         // br(1 && X) -> br(X).
492         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
493       }
494 
495       // If we have "X && 1", simplify the code to use an uncond branch.
496       // "X && 0" would have been constant folded to 0.
497       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
498           ConstantBool) {
499         // br(X && 1) -> br(X).
500         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
501       }
502 
503       // Emit the LHS as a conditional.  If the LHS conditional is false, we
504       // want to jump to the FalseBlock.
505       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
506 
507       ConditionalEvaluation eval(*this);
508       EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
509       EmitBlock(LHSTrue);
510 
511       // Any temporaries created here are conditional.
512       eval.begin(*this);
513       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
514       eval.end(*this);
515 
516       return;
517     }
518 
519     if (CondBOp->getOpcode() == BO_LOr) {
520       // If we have "0 || X", simplify the code.  "1 || X" would have constant
521       // folded if the case was simple enough.
522       bool ConstantBool;
523       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
524           !ConstantBool) {
525         // br(0 || X) -> br(X).
526         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
527       }
528 
529       // If we have "X || 0", simplify the code to use an uncond branch.
530       // "X || 1" would have been constant folded to 1.
531       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
532           !ConstantBool) {
533         // br(X || 0) -> br(X).
534         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
535       }
536 
537       // Emit the LHS as a conditional.  If the LHS conditional is true, we
538       // want to jump to the TrueBlock.
539       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
540 
541       ConditionalEvaluation eval(*this);
542       EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
543       EmitBlock(LHSFalse);
544 
545       // Any temporaries created here are conditional.
546       eval.begin(*this);
547       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
548       eval.end(*this);
549 
550       return;
551     }
552   }
553 
554   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
555     // br(!x, t, f) -> br(x, f, t)
556     if (CondUOp->getOpcode() == UO_LNot)
557       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
558   }
559 
560   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
561     // Handle ?: operator.
562 
563     // Just ignore GNU ?: extension.
564     if (CondOp->getLHS()) {
565       // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
566       llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
567       llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
568 
569       ConditionalEvaluation cond(*this);
570       EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
571 
572       cond.begin(*this);
573       EmitBlock(LHSBlock);
574       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
575       cond.end(*this);
576 
577       cond.begin(*this);
578       EmitBlock(RHSBlock);
579       EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
580       cond.end(*this);
581 
582       return;
583     }
584   }
585 
586   // Emit the code with the fully general case.
587   llvm::Value *CondV = EvaluateExprAsBool(Cond);
588   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
589 }
590 
591 /// ErrorUnsupported - Print out an error that codegen doesn't support the
592 /// specified stmt yet.
593 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
594                                        bool OmitOnError) {
595   CGM.ErrorUnsupported(S, Type, OmitOnError);
596 }
597 
598 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
599 /// variable-length array whose elements have a non-zero bit-pattern.
600 ///
601 /// \param src - a char* pointing to the bit-pattern for a single
602 /// base element of the array
603 /// \param sizeInChars - the total size of the VLA, in chars
604 /// \param align - the total alignment of the VLA
605 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
606                                llvm::Value *dest, llvm::Value *src,
607                                llvm::Value *sizeInChars) {
608   std::pair<CharUnits,CharUnits> baseSizeAndAlign
609     = CGF.getContext().getTypeInfoInChars(baseType);
610 
611   CGBuilderTy &Builder = CGF.Builder;
612 
613   llvm::Value *baseSizeInChars
614     = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
615 
616   const llvm::Type *i8p = Builder.getInt8PtrTy();
617 
618   llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
619   llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
620 
621   llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
622   llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
623   llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
624 
625   // Make a loop over the VLA.  C99 guarantees that the VLA element
626   // count must be nonzero.
627   CGF.EmitBlock(loopBB);
628 
629   llvm::PHINode *cur = Builder.CreatePHI(i8p, "vla.cur");
630   cur->reserveOperandSpace(2);
631   cur->addIncoming(begin, originBB);
632 
633   // memcpy the individual element bit-pattern.
634   Builder.CreateMemCpy(cur, src, baseSizeInChars,
635                        baseSizeAndAlign.second.getQuantity(),
636                        /*volatile*/ false);
637 
638   // Go to the next element.
639   llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
640 
641   // Leave if that's the end of the VLA.
642   llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
643   Builder.CreateCondBr(done, contBB, loopBB);
644   cur->addIncoming(next, loopBB);
645 
646   CGF.EmitBlock(contBB);
647 }
648 
649 void
650 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
651   // Ignore empty classes in C++.
652   if (getContext().getLangOptions().CPlusPlus) {
653     if (const RecordType *RT = Ty->getAs<RecordType>()) {
654       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
655         return;
656     }
657   }
658 
659   // Cast the dest ptr to the appropriate i8 pointer type.
660   unsigned DestAS =
661     cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
662   const llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
663   if (DestPtr->getType() != BP)
664     DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
665 
666   // Get size and alignment info for this aggregate.
667   std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
668   uint64_t Size = TypeInfo.first / 8;
669   unsigned Align = TypeInfo.second / 8;
670 
671   llvm::Value *SizeVal;
672   const VariableArrayType *vla;
673 
674   // Don't bother emitting a zero-byte memset.
675   if (Size == 0) {
676     // But note that getTypeInfo returns 0 for a VLA.
677     if (const VariableArrayType *vlaType =
678           dyn_cast_or_null<VariableArrayType>(
679                                           getContext().getAsArrayType(Ty))) {
680       SizeVal = GetVLASize(vlaType);
681       vla = vlaType;
682     } else {
683       return;
684     }
685   } else {
686     SizeVal = llvm::ConstantInt::get(IntPtrTy, Size);
687     vla = 0;
688   }
689 
690   // If the type contains a pointer to data member we can't memset it to zero.
691   // Instead, create a null constant and copy it to the destination.
692   // TODO: there are other patterns besides zero that we can usefully memset,
693   // like -1, which happens to be the pattern used by member-pointers.
694   if (!CGM.getTypes().isZeroInitializable(Ty)) {
695     // For a VLA, emit a single element, then splat that over the VLA.
696     if (vla) Ty = getContext().getBaseElementType(vla);
697 
698     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
699 
700     llvm::GlobalVariable *NullVariable =
701       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
702                                /*isConstant=*/true,
703                                llvm::GlobalVariable::PrivateLinkage,
704                                NullConstant, llvm::Twine());
705     llvm::Value *SrcPtr =
706       Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
707 
708     if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
709 
710     // Get and call the appropriate llvm.memcpy overload.
711     Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align, false);
712     return;
713   }
714 
715   // Otherwise, just memset the whole thing to zero.  This is legal
716   // because in LLVM, all default initializers (other than the ones we just
717   // handled above) are guaranteed to have a bit pattern of all zeros.
718   Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, Align, false);
719 }
720 
721 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
722   // Make sure that there is a block for the indirect goto.
723   if (IndirectBranch == 0)
724     GetIndirectGotoBlock();
725 
726   llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
727 
728   // Make sure the indirect branch includes all of the address-taken blocks.
729   IndirectBranch->addDestination(BB);
730   return llvm::BlockAddress::get(CurFn, BB);
731 }
732 
733 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
734   // If we already made the indirect branch for indirect goto, return its block.
735   if (IndirectBranch) return IndirectBranch->getParent();
736 
737   CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
738 
739   // Create the PHI node that indirect gotos will add entries to.
740   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
741 
742   // Create the indirect branch instruction.
743   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
744   return IndirectBranch->getParent();
745 }
746 
747 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
748   llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
749 
750   assert(SizeEntry && "Did not emit size for type");
751   return SizeEntry;
752 }
753 
754 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
755   assert(Ty->isVariablyModifiedType() &&
756          "Must pass variably modified type to EmitVLASizes!");
757 
758   EnsureInsertPoint();
759 
760   if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
761     // unknown size indication requires no size computation.
762     if (!VAT->getSizeExpr())
763       return 0;
764     llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
765 
766     if (!SizeEntry) {
767       const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
768 
769       // Get the element size;
770       QualType ElemTy = VAT->getElementType();
771       llvm::Value *ElemSize;
772       if (ElemTy->isVariableArrayType())
773         ElemSize = EmitVLASize(ElemTy);
774       else
775         ElemSize = llvm::ConstantInt::get(SizeTy,
776             getContext().getTypeSizeInChars(ElemTy).getQuantity());
777 
778       llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
779       NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
780 
781       SizeEntry = Builder.CreateMul(ElemSize, NumElements);
782     }
783 
784     return SizeEntry;
785   }
786 
787   if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
788     EmitVLASize(AT->getElementType());
789     return 0;
790   }
791 
792   if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
793     EmitVLASize(PT->getInnerType());
794     return 0;
795   }
796 
797   const PointerType *PT = Ty->getAs<PointerType>();
798   assert(PT && "unknown VM type!");
799   EmitVLASize(PT->getPointeeType());
800   return 0;
801 }
802 
803 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
804   if (getContext().getBuiltinVaListType()->isArrayType())
805     return EmitScalarExpr(E);
806   return EmitLValue(E).getAddress();
807 }
808 
809 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
810                                               llvm::Constant *Init) {
811   assert (Init && "Invalid DeclRefExpr initializer!");
812   if (CGDebugInfo *Dbg = getDebugInfo())
813     Dbg->EmitGlobalVariable(E->getDecl(), Init);
814 }
815 
816 CodeGenFunction::PeepholeProtection
817 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
818   // At the moment, the only aggressive peephole we do in IR gen
819   // is trunc(zext) folding, but if we add more, we can easily
820   // extend this protection.
821 
822   if (!rvalue.isScalar()) return PeepholeProtection();
823   llvm::Value *value = rvalue.getScalarVal();
824   if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
825 
826   // Just make an extra bitcast.
827   assert(HaveInsertPoint());
828   llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
829                                                   Builder.GetInsertBlock());
830 
831   PeepholeProtection protection;
832   protection.Inst = inst;
833   return protection;
834 }
835 
836 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
837   if (!protection.Inst) return;
838 
839   // In theory, we could try to duplicate the peepholes now, but whatever.
840   protection.Inst->eraseFromParent();
841 }
842