1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This coordinates the per-function state used while generating code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGDebugInfo.h"
17 #include "CGException.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/StmtCXX.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Intrinsics.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
31   : BlockFunction(cgm, *this, Builder), CGM(cgm),
32     Target(CGM.getContext().Target),
33     Builder(cgm.getModule().getContext()),
34     ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
35     SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
36     DidCallStackSave(false), UnreachableBlock(0),
37     CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
38     ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
39     TrapBB(0) {
40 
41   // Get some frequently used types.
42   LLVMPointerWidth = Target.getPointerWidth(0);
43   llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
44   IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
45   Int32Ty  = llvm::Type::getInt32Ty(LLVMContext);
46   Int64Ty  = llvm::Type::getInt64Ty(LLVMContext);
47 
48   Exceptions = getContext().getLangOptions().Exceptions;
49   CatchUndefined = getContext().getLangOptions().CatchUndefined;
50   CGM.getMangleContext().startNewFunction();
51 }
52 
53 ASTContext &CodeGenFunction::getContext() const {
54   return CGM.getContext();
55 }
56 
57 
58 llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
59   llvm::Value *Res = LocalDeclMap[VD];
60   assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
61   return Res;
62 }
63 
64 llvm::Constant *
65 CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
66   return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
67 }
68 
69 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
70   return CGM.getTypes().ConvertTypeForMem(T);
71 }
72 
73 const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
74   return CGM.getTypes().ConvertType(T);
75 }
76 
77 bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
78   return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
79     T->isMemberFunctionPointerType();
80 }
81 
82 void CodeGenFunction::EmitReturnBlock() {
83   // For cleanliness, we try to avoid emitting the return block for
84   // simple cases.
85   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
86 
87   if (CurBB) {
88     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
89 
90     // We have a valid insert point, reuse it if it is empty or there are no
91     // explicit jumps to the return block.
92     if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
93       ReturnBlock.Block->replaceAllUsesWith(CurBB);
94       delete ReturnBlock.Block;
95     } else
96       EmitBlock(ReturnBlock.Block);
97     return;
98   }
99 
100   // Otherwise, if the return block is the target of a single direct
101   // branch then we can just put the code in that block instead. This
102   // cleans up functions which started with a unified return block.
103   if (ReturnBlock.Block->hasOneUse()) {
104     llvm::BranchInst *BI =
105       dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
106     if (BI && BI->isUnconditional() &&
107         BI->getSuccessor(0) == ReturnBlock.Block) {
108       // Reset insertion point and delete the branch.
109       Builder.SetInsertPoint(BI->getParent());
110       BI->eraseFromParent();
111       delete ReturnBlock.Block;
112       return;
113     }
114   }
115 
116   // FIXME: We are at an unreachable point, there is no reason to emit the block
117   // unless it has uses. However, we still need a place to put the debug
118   // region.end for now.
119 
120   EmitBlock(ReturnBlock.Block);
121 }
122 
123 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
124   if (!BB) return;
125   if (!BB->use_empty())
126     return CGF.CurFn->getBasicBlockList().push_back(BB);
127   delete BB;
128 }
129 
130 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
131   assert(BreakContinueStack.empty() &&
132          "mismatched push/pop in break/continue stack!");
133 
134   // Emit function epilog (to return).
135   EmitReturnBlock();
136 
137   EmitFunctionInstrumentation("__cyg_profile_func_exit");
138 
139   // Emit debug descriptor for function end.
140   if (CGDebugInfo *DI = getDebugInfo()) {
141     DI->setLocation(EndLoc);
142     DI->EmitRegionEnd(CurFn, Builder);
143   }
144 
145   EmitFunctionEpilog(*CurFnInfo);
146   EmitEndEHSpec(CurCodeDecl);
147 
148   assert(EHStack.empty() &&
149          "did not remove all scopes from cleanup stack!");
150 
151   // If someone did an indirect goto, emit the indirect goto block at the end of
152   // the function.
153   if (IndirectBranch) {
154     EmitBlock(IndirectBranch->getParent());
155     Builder.ClearInsertionPoint();
156   }
157 
158   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
159   llvm::Instruction *Ptr = AllocaInsertPt;
160   AllocaInsertPt = 0;
161   Ptr->eraseFromParent();
162 
163   // If someone took the address of a label but never did an indirect goto, we
164   // made a zero entry PHI node, which is illegal, zap it now.
165   if (IndirectBranch) {
166     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
167     if (PN->getNumIncomingValues() == 0) {
168       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
169       PN->eraseFromParent();
170     }
171   }
172 
173   EmitIfUsed(*this, TerminateLandingPad);
174   EmitIfUsed(*this, TerminateHandler);
175   EmitIfUsed(*this, UnreachableBlock);
176 }
177 
178 /// ShouldInstrumentFunction - Return true if the current function should be
179 /// instrumented with __cyg_profile_func_* calls
180 bool CodeGenFunction::ShouldInstrumentFunction() {
181   if (!CGM.getCodeGenOpts().InstrumentFunctions)
182     return false;
183   if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
184     return false;
185   return true;
186 }
187 
188 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
189 /// instrumentation function with the current function and the call site, if
190 /// function instrumentation is enabled.
191 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
192   if (!ShouldInstrumentFunction())
193     return;
194 
195   const llvm::PointerType *PointerTy;
196   const llvm::FunctionType *FunctionTy;
197   std::vector<const llvm::Type*> ProfileFuncArgs;
198 
199   // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
200   PointerTy = llvm::Type::getInt8PtrTy(VMContext);
201   ProfileFuncArgs.push_back(PointerTy);
202   ProfileFuncArgs.push_back(PointerTy);
203   FunctionTy = llvm::FunctionType::get(
204     llvm::Type::getVoidTy(VMContext),
205     ProfileFuncArgs, false);
206 
207   llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
208   llvm::CallInst *CallSite = Builder.CreateCall(
209     CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
210     llvm::ConstantInt::get(Int32Ty, 0),
211     "callsite");
212 
213   Builder.CreateCall2(F,
214                       llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
215                       CallSite);
216 }
217 
218 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
219                                     llvm::Function *Fn,
220                                     const FunctionArgList &Args,
221                                     SourceLocation StartLoc) {
222   const Decl *D = GD.getDecl();
223 
224   DidCallStackSave = false;
225   CurCodeDecl = CurFuncDecl = D;
226   FnRetTy = RetTy;
227   CurFn = Fn;
228   assert(CurFn->isDeclaration() && "Function already has body?");
229 
230   // Pass inline keyword to optimizer if it appears explicitly on any
231   // declaration.
232   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
233     for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
234            RE = FD->redecls_end(); RI != RE; ++RI)
235       if (RI->isInlineSpecified()) {
236         Fn->addFnAttr(llvm::Attribute::InlineHint);
237         break;
238       }
239 
240   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
241 
242   // Create a marker to make it easy to insert allocas into the entryblock
243   // later.  Don't create this with the builder, because we don't want it
244   // folded.
245   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
246   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
247   if (Builder.isNamePreserving())
248     AllocaInsertPt->setName("allocapt");
249 
250   ReturnBlock = getJumpDestInCurrentScope("return");
251 
252   Builder.SetInsertPoint(EntryBB);
253 
254   QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
255                                                  false, false, 0, 0,
256                                                  /*FIXME?*/
257                                                  FunctionType::ExtInfo());
258 
259   // Emit subprogram debug descriptor.
260   if (CGDebugInfo *DI = getDebugInfo()) {
261     DI->setLocation(StartLoc);
262     DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
263   }
264 
265   EmitFunctionInstrumentation("__cyg_profile_func_enter");
266 
267   // FIXME: Leaked.
268   // CC info is ignored, hopefully?
269   CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
270                                               FunctionType::ExtInfo());
271 
272   if (RetTy->isVoidType()) {
273     // Void type; nothing to return.
274     ReturnValue = 0;
275   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
276              hasAggregateLLVMType(CurFnInfo->getReturnType())) {
277     // Indirect aggregate return; emit returned value directly into sret slot.
278     // This reduces code size, and affects correctness in C++.
279     ReturnValue = CurFn->arg_begin();
280   } else {
281     ReturnValue = CreateIRTemp(RetTy, "retval");
282   }
283 
284   EmitStartEHSpec(CurCodeDecl);
285   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
286 
287   if (CXXThisDecl)
288     CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
289   if (CXXVTTDecl)
290     CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
291 
292   // If any of the arguments have a variably modified type, make sure to
293   // emit the type size.
294   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
295        i != e; ++i) {
296     QualType Ty = i->second;
297 
298     if (Ty->isVariablyModifiedType())
299       EmitVLASize(Ty);
300   }
301 }
302 
303 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
304   const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
305   assert(FD->getBody());
306   EmitStmt(FD->getBody());
307 }
308 
309 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
310   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
311 
312   // Check if we should generate debug info for this function.
313   if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
314     DebugInfo = CGM.getDebugInfo();
315 
316   FunctionArgList Args;
317 
318   CurGD = GD;
319   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
320     if (MD->isInstance()) {
321       // Create the implicit 'this' decl.
322       // FIXME: I'm not entirely sure I like using a fake decl just for code
323       // generation. Maybe we can come up with a better way?
324       CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
325                                               FD->getLocation(),
326                                               &getContext().Idents.get("this"),
327                                               MD->getThisType(getContext()));
328       Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
329 
330       // Check if we need a VTT parameter as well.
331       if (CodeGenVTables::needsVTTParameter(GD)) {
332         // FIXME: The comment about using a fake decl above applies here too.
333         QualType T = getContext().getPointerType(getContext().VoidPtrTy);
334         CXXVTTDecl =
335           ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
336                                     &getContext().Idents.get("vtt"), T);
337         Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
338       }
339     }
340   }
341 
342   if (FD->getNumParams()) {
343     const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
344     assert(FProto && "Function def must have prototype!");
345 
346     for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
347       Args.push_back(std::make_pair(FD->getParamDecl(i),
348                                     FProto->getArgType(i)));
349   }
350 
351   SourceRange BodyRange;
352   if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
353 
354   // Emit the standard function prologue.
355   StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
356 
357   // Generate the body of the function.
358   if (isa<CXXDestructorDecl>(FD))
359     EmitDestructorBody(Args);
360   else if (isa<CXXConstructorDecl>(FD))
361     EmitConstructorBody(Args);
362   else
363     EmitFunctionBody(Args);
364 
365   // Emit the standard function epilogue.
366   FinishFunction(BodyRange.getEnd());
367 
368   // Destroy the 'this' declaration.
369   if (CXXThisDecl)
370     CXXThisDecl->Destroy(getContext());
371 
372   // Destroy the VTT declaration.
373   if (CXXVTTDecl)
374     CXXVTTDecl->Destroy(getContext());
375 }
376 
377 /// ContainsLabel - Return true if the statement contains a label in it.  If
378 /// this statement is not executed normally, it not containing a label means
379 /// that we can just remove the code.
380 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
381   // Null statement, not a label!
382   if (S == 0) return false;
383 
384   // If this is a label, we have to emit the code, consider something like:
385   // if (0) {  ...  foo:  bar(); }  goto foo;
386   if (isa<LabelStmt>(S))
387     return true;
388 
389   // If this is a case/default statement, and we haven't seen a switch, we have
390   // to emit the code.
391   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
392     return true;
393 
394   // If this is a switch statement, we want to ignore cases below it.
395   if (isa<SwitchStmt>(S))
396     IgnoreCaseStmts = true;
397 
398   // Scan subexpressions for verboten labels.
399   for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
400        I != E; ++I)
401     if (ContainsLabel(*I, IgnoreCaseStmts))
402       return true;
403 
404   return false;
405 }
406 
407 
408 /// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
409 /// a constant, or if it does but contains a label, return 0.  If it constant
410 /// folds to 'true' and does not contain a label, return 1, if it constant folds
411 /// to 'false' and does not contain a label, return -1.
412 int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
413   // FIXME: Rename and handle conversion of other evaluatable things
414   // to bool.
415   Expr::EvalResult Result;
416   if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
417       Result.HasSideEffects)
418     return 0;  // Not foldable, not integer or not fully evaluatable.
419 
420   if (CodeGenFunction::ContainsLabel(Cond))
421     return 0;  // Contains a label.
422 
423   return Result.Val.getInt().getBoolValue() ? 1 : -1;
424 }
425 
426 
427 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
428 /// statement) to the specified blocks.  Based on the condition, this might try
429 /// to simplify the codegen of the conditional based on the branch.
430 ///
431 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
432                                            llvm::BasicBlock *TrueBlock,
433                                            llvm::BasicBlock *FalseBlock) {
434   if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
435     return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
436 
437   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
438     // Handle X && Y in a condition.
439     if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
440       // If we have "1 && X", simplify the code.  "0 && X" would have constant
441       // folded if the case was simple enough.
442       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
443         // br(1 && X) -> br(X).
444         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
445       }
446 
447       // If we have "X && 1", simplify the code to use an uncond branch.
448       // "X && 0" would have been constant folded to 0.
449       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
450         // br(X && 1) -> br(X).
451         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
452       }
453 
454       // Emit the LHS as a conditional.  If the LHS conditional is false, we
455       // want to jump to the FalseBlock.
456       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
457       EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
458       EmitBlock(LHSTrue);
459 
460       // Any temporaries created here are conditional.
461       BeginConditionalBranch();
462       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
463       EndConditionalBranch();
464 
465       return;
466     } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
467       // If we have "0 || X", simplify the code.  "1 || X" would have constant
468       // folded if the case was simple enough.
469       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
470         // br(0 || X) -> br(X).
471         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
472       }
473 
474       // If we have "X || 0", simplify the code to use an uncond branch.
475       // "X || 1" would have been constant folded to 1.
476       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
477         // br(X || 0) -> br(X).
478         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
479       }
480 
481       // Emit the LHS as a conditional.  If the LHS conditional is true, we
482       // want to jump to the TrueBlock.
483       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
484       EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
485       EmitBlock(LHSFalse);
486 
487       // Any temporaries created here are conditional.
488       BeginConditionalBranch();
489       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
490       EndConditionalBranch();
491 
492       return;
493     }
494   }
495 
496   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
497     // br(!x, t, f) -> br(x, f, t)
498     if (CondUOp->getOpcode() == UnaryOperator::LNot)
499       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
500   }
501 
502   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
503     // Handle ?: operator.
504 
505     // Just ignore GNU ?: extension.
506     if (CondOp->getLHS()) {
507       // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
508       llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
509       llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
510       EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
511       EmitBlock(LHSBlock);
512       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
513       EmitBlock(RHSBlock);
514       EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
515       return;
516     }
517   }
518 
519   // Emit the code with the fully general case.
520   llvm::Value *CondV = EvaluateExprAsBool(Cond);
521   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
522 }
523 
524 /// ErrorUnsupported - Print out an error that codegen doesn't support the
525 /// specified stmt yet.
526 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
527                                        bool OmitOnError) {
528   CGM.ErrorUnsupported(S, Type, OmitOnError);
529 }
530 
531 void
532 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
533   // If the type contains a pointer to data member we can't memset it to zero.
534   // Instead, create a null constant and copy it to the destination.
535   if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
536     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
537 
538     llvm::GlobalVariable *NullVariable =
539       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
540                                /*isConstant=*/true,
541                                llvm::GlobalVariable::PrivateLinkage,
542                                NullConstant, llvm::Twine());
543     EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
544     return;
545   }
546 
547 
548   // Ignore empty classes in C++.
549   if (getContext().getLangOptions().CPlusPlus) {
550     if (const RecordType *RT = Ty->getAs<RecordType>()) {
551       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
552         return;
553     }
554   }
555 
556   // Otherwise, just memset the whole thing to zero.  This is legal
557   // because in LLVM, all default initializers (other than the ones we just
558   // handled above) are guaranteed to have a bit pattern of all zeros.
559   const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
560   if (DestPtr->getType() != BP)
561     DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
562 
563   // Get size and alignment info for this aggregate.
564   std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
565 
566   // Don't bother emitting a zero-byte memset.
567   if (TypeInfo.first == 0)
568     return;
569 
570   // FIXME: Handle variable sized types.
571   Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
572                  llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
573                       // TypeInfo.first describes size in bits.
574                       llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
575                       llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
576                       llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
577                                              0));
578 }
579 
580 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
581   // Make sure that there is a block for the indirect goto.
582   if (IndirectBranch == 0)
583     GetIndirectGotoBlock();
584 
585   llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
586 
587   // Make sure the indirect branch includes all of the address-taken blocks.
588   IndirectBranch->addDestination(BB);
589   return llvm::BlockAddress::get(CurFn, BB);
590 }
591 
592 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
593   // If we already made the indirect branch for indirect goto, return its block.
594   if (IndirectBranch) return IndirectBranch->getParent();
595 
596   CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
597 
598   const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
599 
600   // Create the PHI node that indirect gotos will add entries to.
601   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
602 
603   // Create the indirect branch instruction.
604   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
605   return IndirectBranch->getParent();
606 }
607 
608 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
609   llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
610 
611   assert(SizeEntry && "Did not emit size for type");
612   return SizeEntry;
613 }
614 
615 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
616   assert(Ty->isVariablyModifiedType() &&
617          "Must pass variably modified type to EmitVLASizes!");
618 
619   EnsureInsertPoint();
620 
621   if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
622     llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
623 
624     if (!SizeEntry) {
625       const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
626 
627       // Get the element size;
628       QualType ElemTy = VAT->getElementType();
629       llvm::Value *ElemSize;
630       if (ElemTy->isVariableArrayType())
631         ElemSize = EmitVLASize(ElemTy);
632       else
633         ElemSize = llvm::ConstantInt::get(SizeTy,
634             getContext().getTypeSizeInChars(ElemTy).getQuantity());
635 
636       llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
637       NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
638 
639       SizeEntry = Builder.CreateMul(ElemSize, NumElements);
640     }
641 
642     return SizeEntry;
643   }
644 
645   if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
646     EmitVLASize(AT->getElementType());
647     return 0;
648   }
649 
650   const PointerType *PT = Ty->getAs<PointerType>();
651   assert(PT && "unknown VM type!");
652   EmitVLASize(PT->getPointeeType());
653   return 0;
654 }
655 
656 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
657   if (CGM.getContext().getBuiltinVaListType()->isArrayType())
658     return EmitScalarExpr(E);
659   return EmitLValue(E).getAddress();
660 }
661 
662 /// Pops cleanup blocks until the given savepoint is reached.
663 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
664   assert(Old.isValid());
665 
666   EHScopeStack::iterator E = EHStack.find(Old);
667   while (EHStack.begin() != E)
668     PopCleanupBlock();
669 }
670 
671 /// Destroys a cleanup if it was unused.
672 static void DestroyCleanup(CodeGenFunction &CGF,
673                            llvm::BasicBlock *Entry,
674                            llvm::BasicBlock *Exit) {
675   assert(Entry->use_empty() && "destroying cleanup with uses!");
676   assert(Exit->getTerminator() == 0 &&
677          "exit has terminator but entry has no predecessors!");
678 
679   // This doesn't always remove the entire cleanup, but it's much
680   // safer as long as we don't know what blocks belong to the cleanup.
681   // A *much* better approach if we care about this inefficiency would
682   // be to lazily emit the cleanup.
683 
684   // If the exit block is distinct from the entry, give it a branch to
685   // an unreachable destination.  This preserves the well-formedness
686   // of the IR.
687   if (Entry != Exit)
688     llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
689 
690   assert(!Entry->getParent() && "cleanup entry already positioned?");
691   // We can't just delete the entry; we have to kill any references to
692   // its instructions in other blocks.
693   for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end();
694          I != E; ++I)
695     if (!I->use_empty())
696       I->replaceAllUsesWith(llvm::UndefValue::get(I->getType()));
697   delete Entry;
698 }
699 
700 /// Creates a switch instruction to thread branches out of the given
701 /// block (which is the exit block of a cleanup).
702 static void CreateCleanupSwitch(CodeGenFunction &CGF,
703                                 llvm::BasicBlock *Block) {
704   if (Block->getTerminator()) {
705     assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
706            "cleanup block already has a terminator, but it isn't a switch");
707     return;
708   }
709 
710   llvm::Value *DestCodePtr
711     = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
712   CGBuilderTy Builder(Block);
713   llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
714 
715   // Create a switch instruction to determine where to jump next.
716   Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
717 }
718 
719 /// Attempts to reduce a cleanup's entry block to a fallthrough.  This
720 /// is basically llvm::MergeBlockIntoPredecessor, except
721 /// simplified/optimized for the tighter constraints on cleanup
722 /// blocks.
723 static void SimplifyCleanupEntry(CodeGenFunction &CGF,
724                                  llvm::BasicBlock *Entry) {
725   llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
726   if (!Pred) return;
727 
728   llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
729   if (!Br || Br->isConditional()) return;
730   assert(Br->getSuccessor(0) == Entry);
731 
732   // If we were previously inserting at the end of the cleanup entry
733   // block, we'll need to continue inserting at the end of the
734   // predecessor.
735   bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
736   assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
737 
738   // Kill the branch.
739   Br->eraseFromParent();
740 
741   // Merge the blocks.
742   Pred->getInstList().splice(Pred->end(), Entry->getInstList());
743 
744   // Kill the entry block.
745   Entry->eraseFromParent();
746 
747   if (WasInsertBlock)
748     CGF.Builder.SetInsertPoint(Pred);
749 }
750 
751 /// Attempts to reduce an cleanup's exit switch to an unconditional
752 /// branch.
753 static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
754   llvm::TerminatorInst *Terminator = Exit->getTerminator();
755   assert(Terminator && "completed cleanup exit has no terminator");
756 
757   llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
758   if (!Switch) return;
759   if (Switch->getNumCases() != 2) return; // default + 1
760 
761   llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
762   llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
763 
764   // Replace the switch instruction with an unconditional branch.
765   llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
766   Switch->eraseFromParent();
767   llvm::BranchInst::Create(Dest, Exit);
768 
769   // Delete all uses of the condition variable.
770   Cond->eraseFromParent();
771   while (!CondVar->use_empty())
772     cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
773 
774   // Delete the condition variable itself.
775   CondVar->eraseFromParent();
776 }
777 
778 /// Threads a branch fixup through a cleanup block.
779 static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
780                                       BranchFixup &Fixup,
781                                       llvm::BasicBlock *Entry,
782                                       llvm::BasicBlock *Exit) {
783   if (!Exit->getTerminator())
784     CreateCleanupSwitch(CGF, Exit);
785 
786   // Find the switch and its destination index alloca.
787   llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
788   llvm::Value *DestCodePtr =
789     cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
790 
791   // Compute the index of the new case we're adding to the switch.
792   unsigned Index = Switch->getNumCases();
793 
794   const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
795   llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
796 
797   // Set the index in the origin block.
798   new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
799 
800   // Add a case to the switch.
801   Switch->addCase(IndexV, Fixup.Destination);
802 
803   // Change the last branch to point to the cleanup entry block.
804   Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
805 
806   // And finally, update the fixup.
807   Fixup.LatestBranch = Switch;
808   Fixup.LatestBranchIndex = Index;
809 }
810 
811 /// Try to simplify both the entry and exit edges of a cleanup.
812 static void SimplifyCleanupEdges(CodeGenFunction &CGF,
813                                  llvm::BasicBlock *Entry,
814                                  llvm::BasicBlock *Exit) {
815 
816   // Given their current implementations, it's important to run these
817   // in this order: SimplifyCleanupEntry will delete Entry if it can
818   // be merged into its predecessor, which will then break
819   // SimplifyCleanupExit if (as is common) Entry == Exit.
820 
821   SimplifyCleanupExit(Exit);
822   SimplifyCleanupEntry(CGF, Entry);
823 }
824 
825 /// Pops a cleanup block.  If the block includes a normal cleanup, the
826 /// current insertion point is threaded through the cleanup, as are
827 /// any branch fixups on the cleanup.
828 void CodeGenFunction::PopCleanupBlock() {
829   assert(!EHStack.empty() && "cleanup stack is empty!");
830   assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
831   EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
832   assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
833 
834   // Handle the EH cleanup if (1) there is one and (2) it's different
835   // from the normal cleanup.
836   if (Scope.isEHCleanup() &&
837       Scope.getEHEntry() != Scope.getNormalEntry()) {
838     llvm::BasicBlock *EHEntry = Scope.getEHEntry();
839     llvm::BasicBlock *EHExit = Scope.getEHExit();
840 
841     if (EHEntry->use_empty()) {
842       DestroyCleanup(*this, EHEntry, EHExit);
843     } else {
844       // TODO: this isn't really the ideal location to put this EH
845       // cleanup, but lazy emission is a better solution than trying
846       // to pick a better spot.
847       CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
848       EmitBlock(EHEntry);
849       Builder.restoreIP(SavedIP);
850 
851       SimplifyCleanupEdges(*this, EHEntry, EHExit);
852     }
853   }
854 
855   // If we only have an EH cleanup, we don't really need to do much
856   // here.  Branch fixups just naturally drop down to the enclosing
857   // cleanup scope.
858   if (!Scope.isNormalCleanup()) {
859     EHStack.popCleanup();
860     assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
861     return;
862   }
863 
864   // Check whether the scope has any fixups that need to be threaded.
865   unsigned FixupDepth = Scope.getFixupDepth();
866   bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
867 
868   // Grab the entry and exit blocks.
869   llvm::BasicBlock *Entry = Scope.getNormalEntry();
870   llvm::BasicBlock *Exit = Scope.getNormalExit();
871 
872   // Check whether anything's been threaded through the cleanup already.
873   assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
874          "cleanup entry/exit mismatch");
875   bool HasExistingBranches = !Entry->use_empty();
876 
877   // Check whether we need to emit a "fallthrough" branch through the
878   // cleanup for the current insertion point.
879   llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
880   if (FallThrough && FallThrough->getTerminator())
881     FallThrough = 0;
882 
883   // If *nothing* is using the cleanup, kill it.
884   if (!FallThrough && !HasFixups && !HasExistingBranches) {
885     EHStack.popCleanup();
886     DestroyCleanup(*this, Entry, Exit);
887     return;
888   }
889 
890   // Otherwise, add the block to the function.
891   EmitBlock(Entry);
892 
893   if (FallThrough)
894     Builder.SetInsertPoint(Exit);
895   else
896     Builder.ClearInsertionPoint();
897 
898   // Fast case: if we don't have to add any fixups, and either
899   // we don't have a fallthrough or the cleanup wasn't previously
900   // used, then the setup above is sufficient.
901   if (!HasFixups) {
902     if (!FallThrough) {
903       assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
904       EHStack.popCleanup();
905       SimplifyCleanupEdges(*this, Entry, Exit);
906       return;
907     } else if (!HasExistingBranches) {
908       assert(FallThrough && "no reason for cleanup but didn't kill before");
909       // We can't simplify the exit edge in this case because we're
910       // already inserting at the end of the exit block.
911       EHStack.popCleanup();
912       SimplifyCleanupEntry(*this, Entry);
913       return;
914     }
915   }
916 
917   // Otherwise we're going to have to thread things through the cleanup.
918   llvm::SmallVector<BranchFixup*, 8> Fixups;
919 
920   // Synthesize a fixup for the current insertion point.
921   BranchFixup Cur;
922   if (FallThrough) {
923     Cur.Destination = createBasicBlock("cleanup.cont");
924     Cur.LatestBranch = FallThrough->getTerminator();
925     Cur.LatestBranchIndex = 0;
926     Cur.Origin = Cur.LatestBranch;
927 
928     // Restore fixup invariant.  EmitBlock added a branch to the cleanup
929     // which we need to redirect to the destination.
930     cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
931 
932     Fixups.push_back(&Cur);
933   } else {
934     Cur.Destination = 0;
935   }
936 
937   // Collect any "real" fixups we need to thread.
938   for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
939         I != E; ++I)
940     if (EHStack.getBranchFixup(I).Destination)
941       Fixups.push_back(&EHStack.getBranchFixup(I));
942 
943   assert(!Fixups.empty() && "no fixups, invariants broken!");
944 
945   // If there's only a single fixup to thread through, do so with
946   // unconditional branches.  This only happens if there's a single
947   // branch and no fallthrough.
948   if (Fixups.size() == 1 && !HasExistingBranches) {
949     Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
950     llvm::BranchInst *Br =
951       llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
952     Fixups[0]->LatestBranch = Br;
953     Fixups[0]->LatestBranchIndex = 0;
954 
955   // Otherwise, force a switch statement and thread everything through
956   // the switch.
957   } else {
958     CreateCleanupSwitch(*this, Exit);
959     for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
960       ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
961   }
962 
963   // Emit the fallthrough destination block if necessary.
964   if (Cur.Destination)
965     EmitBlock(Cur.Destination);
966 
967   // We're finally done with the cleanup.
968   EHStack.popCleanup();
969 }
970 
971 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
972   if (!HaveInsertPoint())
973     return;
974 
975   // Create the branch.
976   llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
977 
978   // If we're not in a cleanup scope, we don't need to worry about
979   // fixups.
980   if (!EHStack.hasNormalCleanups()) {
981     Builder.ClearInsertionPoint();
982     return;
983   }
984 
985   // Initialize a fixup.
986   BranchFixup Fixup;
987   Fixup.Destination = Dest.Block;
988   Fixup.Origin = BI;
989   Fixup.LatestBranch = BI;
990   Fixup.LatestBranchIndex = 0;
991 
992   // If we can't resolve the destination cleanup scope, just add this
993   // to the current cleanup scope.
994   if (!Dest.ScopeDepth.isValid()) {
995     EHStack.addBranchFixup() = Fixup;
996     Builder.ClearInsertionPoint();
997     return;
998   }
999 
1000   for (EHScopeStack::iterator I = EHStack.begin(),
1001          E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
1002     if (isa<EHCleanupScope>(*I)) {
1003       EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
1004       if (Scope.isNormalCleanup())
1005         ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
1006                                   Scope.getNormalExit());
1007     }
1008   }
1009 
1010   Builder.ClearInsertionPoint();
1011 }
1012 
1013 void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
1014   if (!HaveInsertPoint())
1015     return;
1016 
1017   // Create the branch.
1018   llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
1019 
1020   // If we're not in a cleanup scope, we don't need to worry about
1021   // fixups.
1022   if (!EHStack.hasEHCleanups()) {
1023     Builder.ClearInsertionPoint();
1024     return;
1025   }
1026 
1027   // Initialize a fixup.
1028   BranchFixup Fixup;
1029   Fixup.Destination = Dest.Block;
1030   Fixup.Origin = BI;
1031   Fixup.LatestBranch = BI;
1032   Fixup.LatestBranchIndex = 0;
1033 
1034   // We should never get invalid scope depths for these: invalid scope
1035   // depths only arise for as-yet-unemitted labels, and we can't do an
1036   // EH-unwind to one of those.
1037   assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
1038 
1039   for (EHScopeStack::iterator I = EHStack.begin(),
1040          E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
1041     if (isa<EHCleanupScope>(*I)) {
1042       EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
1043       if (Scope.isEHCleanup())
1044         ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
1045                                   Scope.getEHExit());
1046     }
1047   }
1048 
1049   Builder.ClearInsertionPoint();
1050 }
1051