1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This coordinates the per-function state used while generating code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CodeGenFunction.h"
15 #include "CodeGenModule.h"
16 #include "CGDebugInfo.h"
17 #include "CGException.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/StmtCXX.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Intrinsics.h"
27 using namespace clang;
28 using namespace CodeGen;
29 
30 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
31   : BlockFunction(cgm, *this, Builder), CGM(cgm),
32     Target(CGM.getContext().Target),
33     Builder(cgm.getModule().getContext()),
34     ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
35     SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
36     DidCallStackSave(false), UnreachableBlock(0),
37     CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
38     ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
39     TrapBB(0) {
40 
41   // Get some frequently used types.
42   LLVMPointerWidth = Target.getPointerWidth(0);
43   llvm::LLVMContext &LLVMContext = CGM.getLLVMContext();
44   IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth);
45   Int32Ty  = llvm::Type::getInt32Ty(LLVMContext);
46   Int64Ty  = llvm::Type::getInt64Ty(LLVMContext);
47 
48   Exceptions = getContext().getLangOptions().Exceptions;
49   CatchUndefined = getContext().getLangOptions().CatchUndefined;
50   CGM.getMangleContext().startNewFunction();
51 }
52 
53 ASTContext &CodeGenFunction::getContext() const {
54   return CGM.getContext();
55 }
56 
57 
58 llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
59   llvm::Value *Res = LocalDeclMap[VD];
60   assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
61   return Res;
62 }
63 
64 llvm::Constant *
65 CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
66   return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
67 }
68 
69 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
70   return CGM.getTypes().ConvertTypeForMem(T);
71 }
72 
73 const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
74   return CGM.getTypes().ConvertType(T);
75 }
76 
77 bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
78   return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
79     T->isMemberFunctionPointerType();
80 }
81 
82 void CodeGenFunction::EmitReturnBlock() {
83   // For cleanliness, we try to avoid emitting the return block for
84   // simple cases.
85   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
86 
87   if (CurBB) {
88     assert(!CurBB->getTerminator() && "Unexpected terminated block.");
89 
90     // We have a valid insert point, reuse it if it is empty or there are no
91     // explicit jumps to the return block.
92     if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
93       ReturnBlock.Block->replaceAllUsesWith(CurBB);
94       delete ReturnBlock.Block;
95     } else
96       EmitBlock(ReturnBlock.Block);
97     return;
98   }
99 
100   // Otherwise, if the return block is the target of a single direct
101   // branch then we can just put the code in that block instead. This
102   // cleans up functions which started with a unified return block.
103   if (ReturnBlock.Block->hasOneUse()) {
104     llvm::BranchInst *BI =
105       dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
106     if (BI && BI->isUnconditional() &&
107         BI->getSuccessor(0) == ReturnBlock.Block) {
108       // Reset insertion point and delete the branch.
109       Builder.SetInsertPoint(BI->getParent());
110       BI->eraseFromParent();
111       delete ReturnBlock.Block;
112       return;
113     }
114   }
115 
116   // FIXME: We are at an unreachable point, there is no reason to emit the block
117   // unless it has uses. However, we still need a place to put the debug
118   // region.end for now.
119 
120   EmitBlock(ReturnBlock.Block);
121 }
122 
123 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
124   if (!BB) return;
125   if (!BB->use_empty())
126     return CGF.CurFn->getBasicBlockList().push_back(BB);
127   delete BB;
128 }
129 
130 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
131   assert(BreakContinueStack.empty() &&
132          "mismatched push/pop in break/continue stack!");
133 
134   // Emit function epilog (to return).
135   EmitReturnBlock();
136 
137   EmitFunctionInstrumentation("__cyg_profile_func_exit");
138 
139   // Emit debug descriptor for function end.
140   if (CGDebugInfo *DI = getDebugInfo()) {
141     DI->setLocation(EndLoc);
142     DI->EmitRegionEnd(CurFn, Builder);
143   }
144 
145   EmitFunctionEpilog(*CurFnInfo);
146   EmitEndEHSpec(CurCodeDecl);
147 
148   assert(EHStack.empty() &&
149          "did not remove all scopes from cleanup stack!");
150 
151   // If someone did an indirect goto, emit the indirect goto block at the end of
152   // the function.
153   if (IndirectBranch) {
154     EmitBlock(IndirectBranch->getParent());
155     Builder.ClearInsertionPoint();
156   }
157 
158   // Remove the AllocaInsertPt instruction, which is just a convenience for us.
159   llvm::Instruction *Ptr = AllocaInsertPt;
160   AllocaInsertPt = 0;
161   Ptr->eraseFromParent();
162 
163   // If someone took the address of a label but never did an indirect goto, we
164   // made a zero entry PHI node, which is illegal, zap it now.
165   if (IndirectBranch) {
166     llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
167     if (PN->getNumIncomingValues() == 0) {
168       PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
169       PN->eraseFromParent();
170     }
171   }
172 
173   EmitIfUsed(*this, TerminateLandingPad);
174   EmitIfUsed(*this, TerminateHandler);
175   EmitIfUsed(*this, UnreachableBlock);
176 
177   if (CGM.getCodeGenOpts().EmitDeclMetadata)
178     EmitDeclMetadata();
179 }
180 
181 /// ShouldInstrumentFunction - Return true if the current function should be
182 /// instrumented with __cyg_profile_func_* calls
183 bool CodeGenFunction::ShouldInstrumentFunction() {
184   if (!CGM.getCodeGenOpts().InstrumentFunctions)
185     return false;
186   if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
187     return false;
188   return true;
189 }
190 
191 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
192 /// instrumentation function with the current function and the call site, if
193 /// function instrumentation is enabled.
194 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
195   if (!ShouldInstrumentFunction())
196     return;
197 
198   const llvm::PointerType *PointerTy;
199   const llvm::FunctionType *FunctionTy;
200   std::vector<const llvm::Type*> ProfileFuncArgs;
201 
202   // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
203   PointerTy = llvm::Type::getInt8PtrTy(VMContext);
204   ProfileFuncArgs.push_back(PointerTy);
205   ProfileFuncArgs.push_back(PointerTy);
206   FunctionTy = llvm::FunctionType::get(
207     llvm::Type::getVoidTy(VMContext),
208     ProfileFuncArgs, false);
209 
210   llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
211   llvm::CallInst *CallSite = Builder.CreateCall(
212     CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0),
213     llvm::ConstantInt::get(Int32Ty, 0),
214     "callsite");
215 
216   Builder.CreateCall2(F,
217                       llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
218                       CallSite);
219 }
220 
221 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
222                                     llvm::Function *Fn,
223                                     const FunctionArgList &Args,
224                                     SourceLocation StartLoc) {
225   const Decl *D = GD.getDecl();
226 
227   DidCallStackSave = false;
228   CurCodeDecl = CurFuncDecl = D;
229   FnRetTy = RetTy;
230   CurFn = Fn;
231   assert(CurFn->isDeclaration() && "Function already has body?");
232 
233   // Pass inline keyword to optimizer if it appears explicitly on any
234   // declaration.
235   if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
236     for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
237            RE = FD->redecls_end(); RI != RE; ++RI)
238       if (RI->isInlineSpecified()) {
239         Fn->addFnAttr(llvm::Attribute::InlineHint);
240         break;
241       }
242 
243   llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
244 
245   // Create a marker to make it easy to insert allocas into the entryblock
246   // later.  Don't create this with the builder, because we don't want it
247   // folded.
248   llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
249   AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
250   if (Builder.isNamePreserving())
251     AllocaInsertPt->setName("allocapt");
252 
253   ReturnBlock = getJumpDestInCurrentScope("return");
254 
255   Builder.SetInsertPoint(EntryBB);
256 
257   QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
258                                                  false, false, 0, 0,
259                                                  /*FIXME?*/
260                                                  FunctionType::ExtInfo());
261 
262   // Emit subprogram debug descriptor.
263   if (CGDebugInfo *DI = getDebugInfo()) {
264     DI->setLocation(StartLoc);
265     DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
266   }
267 
268   EmitFunctionInstrumentation("__cyg_profile_func_enter");
269 
270   // FIXME: Leaked.
271   // CC info is ignored, hopefully?
272   CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
273                                               FunctionType::ExtInfo());
274 
275   if (RetTy->isVoidType()) {
276     // Void type; nothing to return.
277     ReturnValue = 0;
278   } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
279              hasAggregateLLVMType(CurFnInfo->getReturnType())) {
280     // Indirect aggregate return; emit returned value directly into sret slot.
281     // This reduces code size, and affects correctness in C++.
282     ReturnValue = CurFn->arg_begin();
283   } else {
284     ReturnValue = CreateIRTemp(RetTy, "retval");
285   }
286 
287   EmitStartEHSpec(CurCodeDecl);
288   EmitFunctionProlog(*CurFnInfo, CurFn, Args);
289 
290   if (CXXThisDecl)
291     CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
292   if (CXXVTTDecl)
293     CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
294 
295   // If any of the arguments have a variably modified type, make sure to
296   // emit the type size.
297   for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
298        i != e; ++i) {
299     QualType Ty = i->second;
300 
301     if (Ty->isVariablyModifiedType())
302       EmitVLASize(Ty);
303   }
304 }
305 
306 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
307   const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
308   assert(FD->getBody());
309   EmitStmt(FD->getBody());
310 }
311 
312 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
313   const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
314 
315   // Check if we should generate debug info for this function.
316   if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
317     DebugInfo = CGM.getDebugInfo();
318 
319   FunctionArgList Args;
320 
321   CurGD = GD;
322   if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
323     if (MD->isInstance()) {
324       // Create the implicit 'this' decl.
325       // FIXME: I'm not entirely sure I like using a fake decl just for code
326       // generation. Maybe we can come up with a better way?
327       CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
328                                               FD->getLocation(),
329                                               &getContext().Idents.get("this"),
330                                               MD->getThisType(getContext()));
331       Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
332 
333       // Check if we need a VTT parameter as well.
334       if (CodeGenVTables::needsVTTParameter(GD)) {
335         // FIXME: The comment about using a fake decl above applies here too.
336         QualType T = getContext().getPointerType(getContext().VoidPtrTy);
337         CXXVTTDecl =
338           ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
339                                     &getContext().Idents.get("vtt"), T);
340         Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
341       }
342     }
343   }
344 
345   if (FD->getNumParams()) {
346     const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
347     assert(FProto && "Function def must have prototype!");
348 
349     for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
350       Args.push_back(std::make_pair(FD->getParamDecl(i),
351                                     FProto->getArgType(i)));
352   }
353 
354   SourceRange BodyRange;
355   if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
356 
357   // Emit the standard function prologue.
358   StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
359 
360   // Generate the body of the function.
361   if (isa<CXXDestructorDecl>(FD))
362     EmitDestructorBody(Args);
363   else if (isa<CXXConstructorDecl>(FD))
364     EmitConstructorBody(Args);
365   else
366     EmitFunctionBody(Args);
367 
368   // Emit the standard function epilogue.
369   FinishFunction(BodyRange.getEnd());
370 
371   // Destroy the 'this' declaration.
372   if (CXXThisDecl)
373     CXXThisDecl->Destroy(getContext());
374 
375   // Destroy the VTT declaration.
376   if (CXXVTTDecl)
377     CXXVTTDecl->Destroy(getContext());
378 }
379 
380 /// ContainsLabel - Return true if the statement contains a label in it.  If
381 /// this statement is not executed normally, it not containing a label means
382 /// that we can just remove the code.
383 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
384   // Null statement, not a label!
385   if (S == 0) return false;
386 
387   // If this is a label, we have to emit the code, consider something like:
388   // if (0) {  ...  foo:  bar(); }  goto foo;
389   if (isa<LabelStmt>(S))
390     return true;
391 
392   // If this is a case/default statement, and we haven't seen a switch, we have
393   // to emit the code.
394   if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
395     return true;
396 
397   // If this is a switch statement, we want to ignore cases below it.
398   if (isa<SwitchStmt>(S))
399     IgnoreCaseStmts = true;
400 
401   // Scan subexpressions for verboten labels.
402   for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
403        I != E; ++I)
404     if (ContainsLabel(*I, IgnoreCaseStmts))
405       return true;
406 
407   return false;
408 }
409 
410 
411 /// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
412 /// a constant, or if it does but contains a label, return 0.  If it constant
413 /// folds to 'true' and does not contain a label, return 1, if it constant folds
414 /// to 'false' and does not contain a label, return -1.
415 int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
416   // FIXME: Rename and handle conversion of other evaluatable things
417   // to bool.
418   Expr::EvalResult Result;
419   if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
420       Result.HasSideEffects)
421     return 0;  // Not foldable, not integer or not fully evaluatable.
422 
423   if (CodeGenFunction::ContainsLabel(Cond))
424     return 0;  // Contains a label.
425 
426   return Result.Val.getInt().getBoolValue() ? 1 : -1;
427 }
428 
429 
430 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
431 /// statement) to the specified blocks.  Based on the condition, this might try
432 /// to simplify the codegen of the conditional based on the branch.
433 ///
434 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
435                                            llvm::BasicBlock *TrueBlock,
436                                            llvm::BasicBlock *FalseBlock) {
437   if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
438     return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
439 
440   if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
441     // Handle X && Y in a condition.
442     if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
443       // If we have "1 && X", simplify the code.  "0 && X" would have constant
444       // folded if the case was simple enough.
445       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
446         // br(1 && X) -> br(X).
447         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
448       }
449 
450       // If we have "X && 1", simplify the code to use an uncond branch.
451       // "X && 0" would have been constant folded to 0.
452       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
453         // br(X && 1) -> br(X).
454         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
455       }
456 
457       // Emit the LHS as a conditional.  If the LHS conditional is false, we
458       // want to jump to the FalseBlock.
459       llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
460       EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
461       EmitBlock(LHSTrue);
462 
463       // Any temporaries created here are conditional.
464       BeginConditionalBranch();
465       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
466       EndConditionalBranch();
467 
468       return;
469     } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
470       // If we have "0 || X", simplify the code.  "1 || X" would have constant
471       // folded if the case was simple enough.
472       if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
473         // br(0 || X) -> br(X).
474         return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
475       }
476 
477       // If we have "X || 0", simplify the code to use an uncond branch.
478       // "X || 1" would have been constant folded to 1.
479       if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
480         // br(X || 0) -> br(X).
481         return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
482       }
483 
484       // Emit the LHS as a conditional.  If the LHS conditional is true, we
485       // want to jump to the TrueBlock.
486       llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
487       EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
488       EmitBlock(LHSFalse);
489 
490       // Any temporaries created here are conditional.
491       BeginConditionalBranch();
492       EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
493       EndConditionalBranch();
494 
495       return;
496     }
497   }
498 
499   if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
500     // br(!x, t, f) -> br(x, f, t)
501     if (CondUOp->getOpcode() == UnaryOperator::LNot)
502       return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
503   }
504 
505   if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
506     // Handle ?: operator.
507 
508     // Just ignore GNU ?: extension.
509     if (CondOp->getLHS()) {
510       // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
511       llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
512       llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
513       EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
514       EmitBlock(LHSBlock);
515       EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
516       EmitBlock(RHSBlock);
517       EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
518       return;
519     }
520   }
521 
522   // Emit the code with the fully general case.
523   llvm::Value *CondV = EvaluateExprAsBool(Cond);
524   Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
525 }
526 
527 /// ErrorUnsupported - Print out an error that codegen doesn't support the
528 /// specified stmt yet.
529 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
530                                        bool OmitOnError) {
531   CGM.ErrorUnsupported(S, Type, OmitOnError);
532 }
533 
534 void
535 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
536   // If the type contains a pointer to data member we can't memset it to zero.
537   // Instead, create a null constant and copy it to the destination.
538   if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
539     llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
540 
541     llvm::GlobalVariable *NullVariable =
542       new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
543                                /*isConstant=*/true,
544                                llvm::GlobalVariable::PrivateLinkage,
545                                NullConstant, llvm::Twine());
546     EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
547     return;
548   }
549 
550 
551   // Ignore empty classes in C++.
552   if (getContext().getLangOptions().CPlusPlus) {
553     if (const RecordType *RT = Ty->getAs<RecordType>()) {
554       if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
555         return;
556     }
557   }
558 
559   // Otherwise, just memset the whole thing to zero.  This is legal
560   // because in LLVM, all default initializers (other than the ones we just
561   // handled above) are guaranteed to have a bit pattern of all zeros.
562   const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
563   if (DestPtr->getType() != BP)
564     DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
565 
566   // Get size and alignment info for this aggregate.
567   std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
568 
569   // Don't bother emitting a zero-byte memset.
570   if (TypeInfo.first == 0)
571     return;
572 
573   // FIXME: Handle variable sized types.
574   Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr,
575                  llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
576                       // TypeInfo.first describes size in bits.
577                       llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8),
578                       llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8),
579                       llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
580                                              0));
581 }
582 
583 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
584   // Make sure that there is a block for the indirect goto.
585   if (IndirectBranch == 0)
586     GetIndirectGotoBlock();
587 
588   llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
589 
590   // Make sure the indirect branch includes all of the address-taken blocks.
591   IndirectBranch->addDestination(BB);
592   return llvm::BlockAddress::get(CurFn, BB);
593 }
594 
595 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
596   // If we already made the indirect branch for indirect goto, return its block.
597   if (IndirectBranch) return IndirectBranch->getParent();
598 
599   CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
600 
601   const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
602 
603   // Create the PHI node that indirect gotos will add entries to.
604   llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
605 
606   // Create the indirect branch instruction.
607   IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
608   return IndirectBranch->getParent();
609 }
610 
611 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
612   llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
613 
614   assert(SizeEntry && "Did not emit size for type");
615   return SizeEntry;
616 }
617 
618 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
619   assert(Ty->isVariablyModifiedType() &&
620          "Must pass variably modified type to EmitVLASizes!");
621 
622   EnsureInsertPoint();
623 
624   if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
625     llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
626 
627     if (!SizeEntry) {
628       const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
629 
630       // Get the element size;
631       QualType ElemTy = VAT->getElementType();
632       llvm::Value *ElemSize;
633       if (ElemTy->isVariableArrayType())
634         ElemSize = EmitVLASize(ElemTy);
635       else
636         ElemSize = llvm::ConstantInt::get(SizeTy,
637             getContext().getTypeSizeInChars(ElemTy).getQuantity());
638 
639       llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
640       NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
641 
642       SizeEntry = Builder.CreateMul(ElemSize, NumElements);
643     }
644 
645     return SizeEntry;
646   }
647 
648   if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
649     EmitVLASize(AT->getElementType());
650     return 0;
651   }
652 
653   const PointerType *PT = Ty->getAs<PointerType>();
654   assert(PT && "unknown VM type!");
655   EmitVLASize(PT->getPointeeType());
656   return 0;
657 }
658 
659 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
660   if (CGM.getContext().getBuiltinVaListType()->isArrayType())
661     return EmitScalarExpr(E);
662   return EmitLValue(E).getAddress();
663 }
664 
665 /// Pops cleanup blocks until the given savepoint is reached.
666 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
667   assert(Old.isValid());
668 
669   EHScopeStack::iterator E = EHStack.find(Old);
670   while (EHStack.begin() != E)
671     PopCleanupBlock();
672 }
673 
674 /// Destroys a cleanup if it was unused.
675 static void DestroyCleanup(CodeGenFunction &CGF,
676                            llvm::BasicBlock *Entry,
677                            llvm::BasicBlock *Exit) {
678   assert(Entry->use_empty() && "destroying cleanup with uses!");
679   assert(Exit->getTerminator() == 0 &&
680          "exit has terminator but entry has no predecessors!");
681 
682   // This doesn't always remove the entire cleanup, but it's much
683   // safer as long as we don't know what blocks belong to the cleanup.
684   // A *much* better approach if we care about this inefficiency would
685   // be to lazily emit the cleanup.
686 
687   // If the exit block is distinct from the entry, give it a branch to
688   // an unreachable destination.  This preserves the well-formedness
689   // of the IR.
690   if (Entry != Exit)
691     llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
692 
693   assert(!Entry->getParent() && "cleanup entry already positioned?");
694   // We can't just delete the entry; we have to kill any references to
695   // its instructions in other blocks.
696   for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end();
697          I != E; ++I)
698     if (!I->use_empty())
699       I->replaceAllUsesWith(llvm::UndefValue::get(I->getType()));
700   delete Entry;
701 }
702 
703 /// Creates a switch instruction to thread branches out of the given
704 /// block (which is the exit block of a cleanup).
705 static void CreateCleanupSwitch(CodeGenFunction &CGF,
706                                 llvm::BasicBlock *Block) {
707   if (Block->getTerminator()) {
708     assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
709            "cleanup block already has a terminator, but it isn't a switch");
710     return;
711   }
712 
713   llvm::Value *DestCodePtr
714     = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
715   CGBuilderTy Builder(Block);
716   llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
717 
718   // Create a switch instruction to determine where to jump next.
719   Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
720 }
721 
722 /// Attempts to reduce a cleanup's entry block to a fallthrough.  This
723 /// is basically llvm::MergeBlockIntoPredecessor, except
724 /// simplified/optimized for the tighter constraints on cleanup
725 /// blocks.
726 static void SimplifyCleanupEntry(CodeGenFunction &CGF,
727                                  llvm::BasicBlock *Entry) {
728   llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
729   if (!Pred) return;
730 
731   llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
732   if (!Br || Br->isConditional()) return;
733   assert(Br->getSuccessor(0) == Entry);
734 
735   // If we were previously inserting at the end of the cleanup entry
736   // block, we'll need to continue inserting at the end of the
737   // predecessor.
738   bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
739   assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
740 
741   // Kill the branch.
742   Br->eraseFromParent();
743 
744   // Merge the blocks.
745   Pred->getInstList().splice(Pred->end(), Entry->getInstList());
746 
747   // Kill the entry block.
748   Entry->eraseFromParent();
749 
750   if (WasInsertBlock)
751     CGF.Builder.SetInsertPoint(Pred);
752 }
753 
754 /// Attempts to reduce an cleanup's exit switch to an unconditional
755 /// branch.
756 static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
757   llvm::TerminatorInst *Terminator = Exit->getTerminator();
758   assert(Terminator && "completed cleanup exit has no terminator");
759 
760   llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
761   if (!Switch) return;
762   if (Switch->getNumCases() != 2) return; // default + 1
763 
764   llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
765   llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
766 
767   // Replace the switch instruction with an unconditional branch.
768   llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
769   Switch->eraseFromParent();
770   llvm::BranchInst::Create(Dest, Exit);
771 
772   // Delete all uses of the condition variable.
773   Cond->eraseFromParent();
774   while (!CondVar->use_empty())
775     cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
776 
777   // Delete the condition variable itself.
778   CondVar->eraseFromParent();
779 }
780 
781 /// Threads a branch fixup through a cleanup block.
782 static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
783                                       BranchFixup &Fixup,
784                                       llvm::BasicBlock *Entry,
785                                       llvm::BasicBlock *Exit) {
786   if (!Exit->getTerminator())
787     CreateCleanupSwitch(CGF, Exit);
788 
789   // Find the switch and its destination index alloca.
790   llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
791   llvm::Value *DestCodePtr =
792     cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
793 
794   // Compute the index of the new case we're adding to the switch.
795   unsigned Index = Switch->getNumCases();
796 
797   const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
798   llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
799 
800   // Set the index in the origin block.
801   new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
802 
803   // Add a case to the switch.
804   Switch->addCase(IndexV, Fixup.Destination);
805 
806   // Change the last branch to point to the cleanup entry block.
807   Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
808 
809   // And finally, update the fixup.
810   Fixup.LatestBranch = Switch;
811   Fixup.LatestBranchIndex = Index;
812 }
813 
814 /// Try to simplify both the entry and exit edges of a cleanup.
815 static void SimplifyCleanupEdges(CodeGenFunction &CGF,
816                                  llvm::BasicBlock *Entry,
817                                  llvm::BasicBlock *Exit) {
818 
819   // Given their current implementations, it's important to run these
820   // in this order: SimplifyCleanupEntry will delete Entry if it can
821   // be merged into its predecessor, which will then break
822   // SimplifyCleanupExit if (as is common) Entry == Exit.
823 
824   SimplifyCleanupExit(Exit);
825   SimplifyCleanupEntry(CGF, Entry);
826 }
827 
828 static void EmitLazyCleanup(CodeGenFunction &CGF,
829                             EHScopeStack::LazyCleanup *Fn,
830                             bool ForEH) {
831   if (ForEH) CGF.EHStack.pushTerminate();
832   Fn->Emit(CGF, ForEH);
833   if (ForEH) CGF.EHStack.popTerminate();
834   assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
835 }
836 
837 static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF,
838                                     EHScopeStack::LazyCleanup *Fn,
839                                     bool ForEH,
840                                     llvm::BasicBlock *Entry) {
841   assert(Entry && "no entry block for cleanup");
842 
843   // Remove the switch and load from the end of the entry block.
844   llvm::Instruction *Switch = &Entry->getInstList().back();
845   Entry->getInstList().remove(Switch);
846   assert(isa<llvm::SwitchInst>(Switch));
847   llvm::Instruction *Load = &Entry->getInstList().back();
848   Entry->getInstList().remove(Load);
849   assert(isa<llvm::LoadInst>(Load));
850 
851   assert(Entry->getInstList().empty() &&
852          "lazy cleanup block not empty after removing load/switch pair?");
853 
854   // Emit the actual cleanup at the end of the entry block.
855   CGF.Builder.SetInsertPoint(Entry);
856   EmitLazyCleanup(CGF, Fn, ForEH);
857 
858   // Put the load and switch at the end of the exit block.
859   llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock();
860   Exit->getInstList().push_back(Load);
861   Exit->getInstList().push_back(Switch);
862 
863   // Clean up the edges if possible.
864   SimplifyCleanupEdges(CGF, Entry, Exit);
865 
866   CGF.Builder.ClearInsertionPoint();
867 }
868 
869 static void PopLazyCleanupBlock(CodeGenFunction &CGF) {
870   assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!");
871   EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin());
872   assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups());
873 
874   // Check whether we need an EH cleanup.  This is only true if we've
875   // generated a lazy EH cleanup block.
876   llvm::BasicBlock *EHEntry = Scope.getEHBlock();
877   bool RequiresEHCleanup = (EHEntry != 0);
878 
879   // Check the three conditions which might require a normal cleanup:
880 
881   // - whether there are branch fix-ups through this cleanup
882   unsigned FixupDepth = Scope.getFixupDepth();
883   bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth;
884 
885   // - whether control has already been threaded through this cleanup
886   llvm::BasicBlock *NormalEntry = Scope.getNormalBlock();
887   bool HasExistingBranches = (NormalEntry != 0);
888 
889   // - whether there's a fallthrough
890   llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock();
891   bool HasFallthrough = (FallthroughSource != 0);
892 
893   bool RequiresNormalCleanup = false;
894   if (Scope.isNormalCleanup() &&
895       (HasFixups || HasExistingBranches || HasFallthrough)) {
896     RequiresNormalCleanup = true;
897   }
898 
899   // If we don't need the cleanup at all, we're done.
900   if (!RequiresNormalCleanup && !RequiresEHCleanup) {
901     CGF.EHStack.popCleanup();
902     assert(CGF.EHStack.getNumBranchFixups() == 0 ||
903            CGF.EHStack.hasNormalCleanups());
904     return;
905   }
906 
907   // Copy the cleanup emission data out.  Note that SmallVector
908   // guarantees maximal alignment for its buffer regardless of its
909   // type parameter.
910   llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
911   CleanupBuffer.reserve(Scope.getCleanupSize());
912   memcpy(CleanupBuffer.data(),
913          Scope.getCleanupBuffer(), Scope.getCleanupSize());
914   CleanupBuffer.set_size(Scope.getCleanupSize());
915   EHScopeStack::LazyCleanup *Fn =
916     reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data());
917 
918   // We're done with the scope; pop it off so we can emit the cleanups.
919   CGF.EHStack.popCleanup();
920 
921   if (RequiresNormalCleanup) {
922     // If we have a fallthrough and no other need for the cleanup,
923     // emit it directly.
924     if (HasFallthrough && !HasFixups && !HasExistingBranches) {
925       EmitLazyCleanup(CGF, Fn, /*ForEH*/ false);
926 
927     // Otherwise, the best approach is to thread everything through
928     // the cleanup block and then try to clean up after ourselves.
929     } else {
930       // Force the entry block to exist.
931       if (!HasExistingBranches) {
932         NormalEntry = CGF.createBasicBlock("cleanup");
933         CreateCleanupSwitch(CGF, NormalEntry);
934       }
935 
936       CGF.EmitBlock(NormalEntry);
937 
938       // Thread the fallthrough edge through the (momentarily trivial)
939       // cleanup.
940       llvm::BasicBlock *FallthroughDestination = 0;
941       if (HasFallthrough) {
942         assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator()));
943         FallthroughDestination = CGF.createBasicBlock("cleanup.cont");
944 
945         BranchFixup Fix;
946         Fix.Destination = FallthroughDestination;
947         Fix.LatestBranch = FallthroughSource->getTerminator();
948         Fix.LatestBranchIndex = 0;
949         Fix.Origin = Fix.LatestBranch;
950 
951         // Restore fixup invariant.  EmitBlock added a branch to the
952         // cleanup which we need to redirect to the destination.
953         cast<llvm::BranchInst>(Fix.LatestBranch)
954           ->setSuccessor(0, Fix.Destination);
955 
956         ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry);
957       }
958 
959       // Thread any "real" fixups we need to thread.
960       for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups();
961            I != E; ++I)
962         if (CGF.EHStack.getBranchFixup(I).Destination)
963           ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I),
964                                     NormalEntry, NormalEntry);
965 
966       SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry);
967 
968       if (HasFallthrough)
969         CGF.EmitBlock(FallthroughDestination);
970     }
971   }
972 
973   // Emit the EH cleanup if required.
974   if (RequiresEHCleanup) {
975     CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
976     CGF.EmitBlock(EHEntry);
977     SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry);
978     CGF.Builder.restoreIP(SavedIP);
979   }
980 }
981 
982 /// Pops a cleanup block.  If the block includes a normal cleanup, the
983 /// current insertion point is threaded through the cleanup, as are
984 /// any branch fixups on the cleanup.
985 void CodeGenFunction::PopCleanupBlock() {
986   assert(!EHStack.empty() && "cleanup stack is empty!");
987   if (isa<EHLazyCleanupScope>(*EHStack.begin()))
988     return PopLazyCleanupBlock(*this);
989 
990   assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
991   EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
992   assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
993 
994   // Handle the EH cleanup if (1) there is one and (2) it's different
995   // from the normal cleanup.
996   if (Scope.isEHCleanup() &&
997       Scope.getEHEntry() != Scope.getNormalEntry()) {
998     llvm::BasicBlock *EHEntry = Scope.getEHEntry();
999     llvm::BasicBlock *EHExit = Scope.getEHExit();
1000 
1001     if (EHEntry->use_empty()) {
1002       DestroyCleanup(*this, EHEntry, EHExit);
1003     } else {
1004       // TODO: this isn't really the ideal location to put this EH
1005       // cleanup, but lazy emission is a better solution than trying
1006       // to pick a better spot.
1007       CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
1008       EmitBlock(EHEntry);
1009       Builder.restoreIP(SavedIP);
1010 
1011       SimplifyCleanupEdges(*this, EHEntry, EHExit);
1012     }
1013   }
1014 
1015   // If we only have an EH cleanup, we don't really need to do much
1016   // here.  Branch fixups just naturally drop down to the enclosing
1017   // cleanup scope.
1018   if (!Scope.isNormalCleanup()) {
1019     EHStack.popCleanup();
1020     assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
1021     return;
1022   }
1023 
1024   // Check whether the scope has any fixups that need to be threaded.
1025   unsigned FixupDepth = Scope.getFixupDepth();
1026   bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
1027 
1028   // Grab the entry and exit blocks.
1029   llvm::BasicBlock *Entry = Scope.getNormalEntry();
1030   llvm::BasicBlock *Exit = Scope.getNormalExit();
1031 
1032   // Check whether anything's been threaded through the cleanup already.
1033   assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
1034          "cleanup entry/exit mismatch");
1035   bool HasExistingBranches = !Entry->use_empty();
1036 
1037   // Check whether we need to emit a "fallthrough" branch through the
1038   // cleanup for the current insertion point.
1039   llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
1040   if (FallThrough && FallThrough->getTerminator())
1041     FallThrough = 0;
1042 
1043   // If *nothing* is using the cleanup, kill it.
1044   if (!FallThrough && !HasFixups && !HasExistingBranches) {
1045     EHStack.popCleanup();
1046     DestroyCleanup(*this, Entry, Exit);
1047     return;
1048   }
1049 
1050   // Otherwise, add the block to the function.
1051   EmitBlock(Entry);
1052 
1053   if (FallThrough)
1054     Builder.SetInsertPoint(Exit);
1055   else
1056     Builder.ClearInsertionPoint();
1057 
1058   // Fast case: if we don't have to add any fixups, and either
1059   // we don't have a fallthrough or the cleanup wasn't previously
1060   // used, then the setup above is sufficient.
1061   if (!HasFixups) {
1062     if (!FallThrough) {
1063       assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
1064       EHStack.popCleanup();
1065       SimplifyCleanupEdges(*this, Entry, Exit);
1066       return;
1067     } else if (!HasExistingBranches) {
1068       assert(FallThrough && "no reason for cleanup but didn't kill before");
1069       // We can't simplify the exit edge in this case because we're
1070       // already inserting at the end of the exit block.
1071       EHStack.popCleanup();
1072       SimplifyCleanupEntry(*this, Entry);
1073       return;
1074     }
1075   }
1076 
1077   // Otherwise we're going to have to thread things through the cleanup.
1078   llvm::SmallVector<BranchFixup*, 8> Fixups;
1079 
1080   // Synthesize a fixup for the current insertion point.
1081   BranchFixup Cur;
1082   if (FallThrough) {
1083     Cur.Destination = createBasicBlock("cleanup.cont");
1084     Cur.LatestBranch = FallThrough->getTerminator();
1085     Cur.LatestBranchIndex = 0;
1086     Cur.Origin = Cur.LatestBranch;
1087 
1088     // Restore fixup invariant.  EmitBlock added a branch to the cleanup
1089     // which we need to redirect to the destination.
1090     cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
1091 
1092     Fixups.push_back(&Cur);
1093   } else {
1094     Cur.Destination = 0;
1095   }
1096 
1097   // Collect any "real" fixups we need to thread.
1098   for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
1099         I != E; ++I)
1100     if (EHStack.getBranchFixup(I).Destination)
1101       Fixups.push_back(&EHStack.getBranchFixup(I));
1102 
1103   assert(!Fixups.empty() && "no fixups, invariants broken!");
1104 
1105   // If there's only a single fixup to thread through, do so with
1106   // unconditional branches.  This only happens if there's a single
1107   // branch and no fallthrough.
1108   if (Fixups.size() == 1 && !HasExistingBranches) {
1109     Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
1110     llvm::BranchInst *Br =
1111       llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
1112     Fixups[0]->LatestBranch = Br;
1113     Fixups[0]->LatestBranchIndex = 0;
1114 
1115   // Otherwise, force a switch statement and thread everything through
1116   // the switch.
1117   } else {
1118     CreateCleanupSwitch(*this, Exit);
1119     for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
1120       ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
1121   }
1122 
1123   // Emit the fallthrough destination block if necessary.
1124   if (Cur.Destination)
1125     EmitBlock(Cur.Destination);
1126 
1127   // We're finally done with the cleanup.
1128   EHStack.popCleanup();
1129 }
1130 
1131 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
1132   if (!HaveInsertPoint())
1133     return;
1134 
1135   // Create the branch.
1136   llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
1137 
1138   // If we're not in a cleanup scope, we don't need to worry about
1139   // fixups.
1140   if (!EHStack.hasNormalCleanups()) {
1141     Builder.ClearInsertionPoint();
1142     return;
1143   }
1144 
1145   // Initialize a fixup.
1146   BranchFixup Fixup;
1147   Fixup.Destination = Dest.Block;
1148   Fixup.Origin = BI;
1149   Fixup.LatestBranch = BI;
1150   Fixup.LatestBranchIndex = 0;
1151 
1152   // If we can't resolve the destination cleanup scope, just add this
1153   // to the current cleanup scope.
1154   if (!Dest.ScopeDepth.isValid()) {
1155     EHStack.addBranchFixup() = Fixup;
1156     Builder.ClearInsertionPoint();
1157     return;
1158   }
1159 
1160   for (EHScopeStack::iterator I = EHStack.begin(),
1161          E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
1162     if (isa<EHCleanupScope>(*I)) {
1163       EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
1164       if (Scope.isNormalCleanup())
1165         ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
1166                                   Scope.getNormalExit());
1167     } else if (isa<EHLazyCleanupScope>(*I)) {
1168       EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
1169       if (Scope.isNormalCleanup()) {
1170         llvm::BasicBlock *Block = Scope.getNormalBlock();
1171         if (!Block) {
1172           Block = createBasicBlock("cleanup");
1173           Scope.setNormalBlock(Block);
1174         }
1175         ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
1176       }
1177     }
1178   }
1179 
1180   Builder.ClearInsertionPoint();
1181 }
1182 
1183 void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
1184   if (!HaveInsertPoint())
1185     return;
1186 
1187   // Create the branch.
1188   llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
1189 
1190   // If we're not in a cleanup scope, we don't need to worry about
1191   // fixups.
1192   if (!EHStack.hasEHCleanups()) {
1193     Builder.ClearInsertionPoint();
1194     return;
1195   }
1196 
1197   // Initialize a fixup.
1198   BranchFixup Fixup;
1199   Fixup.Destination = Dest.Block;
1200   Fixup.Origin = BI;
1201   Fixup.LatestBranch = BI;
1202   Fixup.LatestBranchIndex = 0;
1203 
1204   // We should never get invalid scope depths for these: invalid scope
1205   // depths only arise for as-yet-unemitted labels, and we can't do an
1206   // EH-unwind to one of those.
1207   assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
1208 
1209   for (EHScopeStack::iterator I = EHStack.begin(),
1210          E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
1211     if (isa<EHCleanupScope>(*I)) {
1212       EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
1213       if (Scope.isEHCleanup())
1214         ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
1215                                   Scope.getEHExit());
1216     } else if (isa<EHLazyCleanupScope>(*I)) {
1217       EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I);
1218       if (Scope.isEHCleanup()) {
1219         llvm::BasicBlock *Block = Scope.getEHBlock();
1220         if (!Block) {
1221           Block = createBasicBlock("eh.cleanup");
1222           Scope.setEHBlock(Block);
1223         }
1224         ThreadFixupThroughCleanup(*this, Fixup, Block, Block);
1225       }
1226     }
1227   }
1228 
1229   Builder.ClearInsertionPoint();
1230 }
1231