1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGDebugInfo.h" 17 #include "clang/Basic/TargetInfo.h" 18 #include "clang/AST/APValue.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/Decl.h" 21 #include "clang/AST/DeclCXX.h" 22 #include "llvm/Support/CFG.h" 23 #include "llvm/Target/TargetData.h" 24 using namespace clang; 25 using namespace CodeGen; 26 27 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) 28 : BlockFunction(cgm, *this, Builder), CGM(cgm), 29 Target(CGM.getContext().Target), 30 Builder(cgm.getModule().getContext()), 31 DebugInfo(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0), 32 CXXThisDecl(0) { 33 LLVMIntTy = ConvertType(getContext().IntTy); 34 LLVMPointerWidth = Target.getPointerWidth(0); 35 } 36 37 ASTContext &CodeGenFunction::getContext() const { 38 return CGM.getContext(); 39 } 40 41 42 llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) { 43 llvm::BasicBlock *&BB = LabelMap[S]; 44 if (BB) return BB; 45 46 // Create, but don't insert, the new block. 47 return BB = createBasicBlock(S->getName()); 48 } 49 50 llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) { 51 llvm::Value *Res = LocalDeclMap[VD]; 52 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!"); 53 return Res; 54 } 55 56 llvm::Constant * 57 CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) { 58 return cast<llvm::Constant>(GetAddrOfLocalVar(BVD)); 59 } 60 61 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 62 return CGM.getTypes().ConvertTypeForMem(T); 63 } 64 65 const llvm::Type *CodeGenFunction::ConvertType(QualType T) { 66 return CGM.getTypes().ConvertType(T); 67 } 68 69 bool CodeGenFunction::hasAggregateLLVMType(QualType T) { 70 // FIXME: Use positive checks instead of negative ones to be more robust in 71 // the face of extension. 72 return !T->hasPointerRepresentation() &&!T->isRealType() && 73 !T->isVoidType() && !T->isVectorType() && !T->isFunctionType() && 74 !T->isBlockPointerType(); 75 } 76 77 void CodeGenFunction::EmitReturnBlock() { 78 // For cleanliness, we try to avoid emitting the return block for 79 // simple cases. 80 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 81 82 if (CurBB) { 83 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 84 85 // We have a valid insert point, reuse it if it is empty or there are no 86 // explicit jumps to the return block. 87 if (CurBB->empty() || ReturnBlock->use_empty()) { 88 ReturnBlock->replaceAllUsesWith(CurBB); 89 delete ReturnBlock; 90 } else 91 EmitBlock(ReturnBlock); 92 return; 93 } 94 95 // Otherwise, if the return block is the target of a single direct 96 // branch then we can just put the code in that block instead. This 97 // cleans up functions which started with a unified return block. 98 if (ReturnBlock->hasOneUse()) { 99 llvm::BranchInst *BI = 100 dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin()); 101 if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) { 102 // Reset insertion point and delete the branch. 103 Builder.SetInsertPoint(BI->getParent()); 104 BI->eraseFromParent(); 105 delete ReturnBlock; 106 return; 107 } 108 } 109 110 // FIXME: We are at an unreachable point, there is no reason to emit the block 111 // unless it has uses. However, we still need a place to put the debug 112 // region.end for now. 113 114 EmitBlock(ReturnBlock); 115 } 116 117 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 118 // Finish emission of indirect switches. 119 EmitIndirectSwitches(); 120 121 assert(BreakContinueStack.empty() && 122 "mismatched push/pop in break/continue stack!"); 123 assert(BlockScopes.empty() && 124 "did not remove all blocks from block scope map!"); 125 assert(CleanupEntries.empty() && 126 "mismatched push/pop in cleanup stack!"); 127 128 // Emit function epilog (to return). 129 EmitReturnBlock(); 130 131 // Emit debug descriptor for function end. 132 if (CGDebugInfo *DI = getDebugInfo()) { 133 DI->setLocation(EndLoc); 134 DI->EmitRegionEnd(CurFn, Builder); 135 } 136 137 EmitFunctionEpilog(*CurFnInfo, ReturnValue); 138 139 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 140 llvm::Instruction *Ptr = AllocaInsertPt; 141 AllocaInsertPt = 0; 142 Ptr->eraseFromParent(); 143 } 144 145 void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy, 146 llvm::Function *Fn, 147 const FunctionArgList &Args, 148 SourceLocation StartLoc) { 149 DidCallStackSave = false; 150 CurCodeDecl = CurFuncDecl = D; 151 FnRetTy = RetTy; 152 CurFn = Fn; 153 assert(CurFn->isDeclaration() && "Function already has body?"); 154 155 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 156 157 // Create a marker to make it easy to insert allocas into the entryblock 158 // later. Don't create this with the builder, because we don't want it 159 // folded. 160 llvm::Value *Undef = VMContext.getUndef(llvm::Type::Int32Ty); 161 AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "", 162 EntryBB); 163 if (Builder.isNamePreserving()) 164 AllocaInsertPt->setName("allocapt"); 165 166 ReturnBlock = createBasicBlock("return"); 167 ReturnValue = 0; 168 if (!RetTy->isVoidType()) 169 ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval"); 170 171 Builder.SetInsertPoint(EntryBB); 172 173 // Emit subprogram debug descriptor. 174 // FIXME: The cast here is a huge hack. 175 if (CGDebugInfo *DI = getDebugInfo()) { 176 DI->setLocation(StartLoc); 177 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { 178 DI->EmitFunctionStart(CGM.getMangledName(FD), RetTy, CurFn, Builder); 179 } else { 180 // Just use LLVM function name. 181 DI->EmitFunctionStart(Fn->getName().c_str(), 182 RetTy, CurFn, Builder); 183 } 184 } 185 186 // FIXME: Leaked. 187 CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args); 188 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 189 190 // If any of the arguments have a variably modified type, make sure to 191 // emit the type size. 192 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 193 i != e; ++i) { 194 QualType Ty = i->second; 195 196 if (Ty->isVariablyModifiedType()) 197 EmitVLASize(Ty); 198 } 199 } 200 201 void CodeGenFunction::GenerateCode(const FunctionDecl *FD, 202 llvm::Function *Fn) { 203 // Check if we should generate debug info for this function. 204 if (CGM.getDebugInfo() && !FD->hasAttr<NodebugAttr>()) 205 DebugInfo = CGM.getDebugInfo(); 206 207 FunctionArgList Args; 208 209 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 210 if (MD->isInstance()) { 211 // Create the implicit 'this' decl. 212 // FIXME: I'm not entirely sure I like using a fake decl just for code 213 // generation. Maybe we can come up with a better way? 214 CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 215 &getContext().Idents.get("this"), 216 MD->getThisType(getContext())); 217 Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType())); 218 } 219 } 220 221 if (FD->getNumParams()) { 222 const FunctionProtoType* FProto = FD->getType()->getAsFunctionProtoType(); 223 assert(FProto && "Function def must have prototype!"); 224 225 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 226 Args.push_back(std::make_pair(FD->getParamDecl(i), 227 FProto->getArgType(i))); 228 } 229 230 // FIXME: Support CXXTryStmt here, too. 231 if (const CompoundStmt *S = FD->getCompoundBody()) { 232 StartFunction(FD, FD->getResultType(), Fn, Args, S->getLBracLoc()); 233 EmitStmt(S); 234 FinishFunction(S->getRBracLoc()); 235 } 236 237 // Destroy the 'this' declaration. 238 if (CXXThisDecl) 239 CXXThisDecl->Destroy(getContext()); 240 } 241 242 /// ContainsLabel - Return true if the statement contains a label in it. If 243 /// this statement is not executed normally, it not containing a label means 244 /// that we can just remove the code. 245 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 246 // Null statement, not a label! 247 if (S == 0) return false; 248 249 // If this is a label, we have to emit the code, consider something like: 250 // if (0) { ... foo: bar(); } goto foo; 251 if (isa<LabelStmt>(S)) 252 return true; 253 254 // If this is a case/default statement, and we haven't seen a switch, we have 255 // to emit the code. 256 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 257 return true; 258 259 // If this is a switch statement, we want to ignore cases below it. 260 if (isa<SwitchStmt>(S)) 261 IgnoreCaseStmts = true; 262 263 // Scan subexpressions for verboten labels. 264 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 265 I != E; ++I) 266 if (ContainsLabel(*I, IgnoreCaseStmts)) 267 return true; 268 269 return false; 270 } 271 272 273 /// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to 274 /// a constant, or if it does but contains a label, return 0. If it constant 275 /// folds to 'true' and does not contain a label, return 1, if it constant folds 276 /// to 'false' and does not contain a label, return -1. 277 int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) { 278 // FIXME: Rename and handle conversion of other evaluatable things 279 // to bool. 280 Expr::EvalResult Result; 281 if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() || 282 Result.HasSideEffects) 283 return 0; // Not foldable, not integer or not fully evaluatable. 284 285 if (CodeGenFunction::ContainsLabel(Cond)) 286 return 0; // Contains a label. 287 288 return Result.Val.getInt().getBoolValue() ? 1 : -1; 289 } 290 291 292 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 293 /// statement) to the specified blocks. Based on the condition, this might try 294 /// to simplify the codegen of the conditional based on the branch. 295 /// 296 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 297 llvm::BasicBlock *TrueBlock, 298 llvm::BasicBlock *FalseBlock) { 299 if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) 300 return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock); 301 302 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 303 // Handle X && Y in a condition. 304 if (CondBOp->getOpcode() == BinaryOperator::LAnd) { 305 // If we have "1 && X", simplify the code. "0 && X" would have constant 306 // folded if the case was simple enough. 307 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) { 308 // br(1 && X) -> br(X). 309 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 310 } 311 312 // If we have "X && 1", simplify the code to use an uncond branch. 313 // "X && 0" would have been constant folded to 0. 314 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) { 315 // br(X && 1) -> br(X). 316 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 317 } 318 319 // Emit the LHS as a conditional. If the LHS conditional is false, we 320 // want to jump to the FalseBlock. 321 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 322 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 323 EmitBlock(LHSTrue); 324 325 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 326 return; 327 } else if (CondBOp->getOpcode() == BinaryOperator::LOr) { 328 // If we have "0 || X", simplify the code. "1 || X" would have constant 329 // folded if the case was simple enough. 330 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) { 331 // br(0 || X) -> br(X). 332 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 333 } 334 335 // If we have "X || 0", simplify the code to use an uncond branch. 336 // "X || 1" would have been constant folded to 1. 337 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) { 338 // br(X || 0) -> br(X). 339 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 340 } 341 342 // Emit the LHS as a conditional. If the LHS conditional is true, we 343 // want to jump to the TrueBlock. 344 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 345 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 346 EmitBlock(LHSFalse); 347 348 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 349 return; 350 } 351 } 352 353 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 354 // br(!x, t, f) -> br(x, f, t) 355 if (CondUOp->getOpcode() == UnaryOperator::LNot) 356 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 357 } 358 359 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 360 // Handle ?: operator. 361 362 // Just ignore GNU ?: extension. 363 if (CondOp->getLHS()) { 364 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 365 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 366 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 367 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 368 EmitBlock(LHSBlock); 369 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 370 EmitBlock(RHSBlock); 371 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 372 return; 373 } 374 } 375 376 // Emit the code with the fully general case. 377 llvm::Value *CondV = EvaluateExprAsBool(Cond); 378 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 379 } 380 381 /// getCGRecordLayout - Return record layout info. 382 const CGRecordLayout *CodeGenFunction::getCGRecordLayout(CodeGenTypes &CGT, 383 QualType Ty) { 384 const RecordType *RTy = Ty->getAsRecordType(); 385 assert (RTy && "Unexpected type. RecordType expected here."); 386 387 return CGT.getCGRecordLayout(RTy->getDecl()); 388 } 389 390 /// ErrorUnsupported - Print out an error that codegen doesn't support the 391 /// specified stmt yet. 392 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 393 bool OmitOnError) { 394 CGM.ErrorUnsupported(S, Type, OmitOnError); 395 } 396 397 unsigned CodeGenFunction::GetIDForAddrOfLabel(const LabelStmt *L) { 398 // Use LabelIDs.size() as the new ID if one hasn't been assigned. 399 return LabelIDs.insert(std::make_pair(L, LabelIDs.size())).first->second; 400 } 401 402 void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) { 403 const llvm::Type *BP = VMContext.getPointerTypeUnqual(llvm::Type::Int8Ty); 404 if (DestPtr->getType() != BP) 405 DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); 406 407 // Get size and alignment info for this aggregate. 408 std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); 409 410 // Don't bother emitting a zero-byte memset. 411 if (TypeInfo.first == 0) 412 return; 413 414 // FIXME: Handle variable sized types. 415 const llvm::Type *IntPtr = VMContext.getIntegerType(LLVMPointerWidth); 416 417 Builder.CreateCall4(CGM.getMemSetFn(), DestPtr, 418 getLLVMContext().getNullValue(llvm::Type::Int8Ty), 419 // TypeInfo.first describes size in bits. 420 VMContext.getConstantInt(IntPtr, TypeInfo.first/8), 421 VMContext.getConstantInt(llvm::Type::Int32Ty, 422 TypeInfo.second/8)); 423 } 424 425 void CodeGenFunction::EmitIndirectSwitches() { 426 llvm::BasicBlock *Default; 427 428 if (IndirectSwitches.empty()) 429 return; 430 431 if (!LabelIDs.empty()) { 432 Default = getBasicBlockForLabel(LabelIDs.begin()->first); 433 } else { 434 // No possible targets for indirect goto, just emit an infinite 435 // loop. 436 Default = createBasicBlock("indirectgoto.loop", CurFn); 437 llvm::BranchInst::Create(Default, Default); 438 } 439 440 for (std::vector<llvm::SwitchInst*>::iterator i = IndirectSwitches.begin(), 441 e = IndirectSwitches.end(); i != e; ++i) { 442 llvm::SwitchInst *I = *i; 443 444 I->setSuccessor(0, Default); 445 for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(), 446 LE = LabelIDs.end(); LI != LE; ++LI) { 447 I->addCase(VMContext.getConstantInt(llvm::Type::Int32Ty, 448 LI->second), 449 getBasicBlockForLabel(LI->first)); 450 } 451 } 452 } 453 454 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) { 455 llvm::Value *&SizeEntry = VLASizeMap[VAT]; 456 457 assert(SizeEntry && "Did not emit size for type"); 458 return SizeEntry; 459 } 460 461 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { 462 assert(Ty->isVariablyModifiedType() && 463 "Must pass variably modified type to EmitVLASizes!"); 464 465 EnsureInsertPoint(); 466 467 if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) { 468 llvm::Value *&SizeEntry = VLASizeMap[VAT]; 469 470 if (!SizeEntry) { 471 // Get the element size; 472 llvm::Value *ElemSize; 473 474 QualType ElemTy = VAT->getElementType(); 475 476 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 477 478 if (ElemTy->isVariableArrayType()) 479 ElemSize = EmitVLASize(ElemTy); 480 else { 481 ElemSize = VMContext.getConstantInt(SizeTy, 482 getContext().getTypeSize(ElemTy) / 8); 483 } 484 485 llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr()); 486 NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp"); 487 488 SizeEntry = Builder.CreateMul(ElemSize, NumElements); 489 } 490 491 return SizeEntry; 492 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 493 EmitVLASize(AT->getElementType()); 494 } else if (const PointerType *PT = Ty->getAsPointerType()) 495 EmitVLASize(PT->getPointeeType()); 496 else { 497 assert(0 && "unknown VM type!"); 498 } 499 500 return 0; 501 } 502 503 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 504 if (CGM.getContext().getBuiltinVaListType()->isArrayType()) { 505 return EmitScalarExpr(E); 506 } 507 return EmitLValue(E).getAddress(); 508 } 509 510 void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupBlock) 511 { 512 CleanupEntries.push_back(CleanupEntry(CleanupBlock)); 513 } 514 515 void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) 516 { 517 assert(CleanupEntries.size() >= OldCleanupStackSize && 518 "Cleanup stack mismatch!"); 519 520 while (CleanupEntries.size() > OldCleanupStackSize) 521 EmitCleanupBlock(); 522 } 523 524 CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() 525 { 526 CleanupEntry &CE = CleanupEntries.back(); 527 528 llvm::BasicBlock *CleanupBlock = CE.CleanupBlock; 529 530 std::vector<llvm::BasicBlock *> Blocks; 531 std::swap(Blocks, CE.Blocks); 532 533 std::vector<llvm::BranchInst *> BranchFixups; 534 std::swap(BranchFixups, CE.BranchFixups); 535 536 CleanupEntries.pop_back(); 537 538 // Check if any branch fixups pointed to the scope we just popped. If so, 539 // we can remove them. 540 for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) { 541 llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0); 542 BlockScopeMap::iterator I = BlockScopes.find(Dest); 543 544 if (I == BlockScopes.end()) 545 continue; 546 547 assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!"); 548 549 if (I->second == CleanupEntries.size()) { 550 // We don't need to do this branch fixup. 551 BranchFixups[i] = BranchFixups.back(); 552 BranchFixups.pop_back(); 553 i--; 554 e--; 555 continue; 556 } 557 } 558 559 llvm::BasicBlock *SwitchBlock = 0; 560 llvm::BasicBlock *EndBlock = 0; 561 if (!BranchFixups.empty()) { 562 SwitchBlock = createBasicBlock("cleanup.switch"); 563 EndBlock = createBasicBlock("cleanup.end"); 564 565 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 566 567 Builder.SetInsertPoint(SwitchBlock); 568 569 llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::Int32Ty, 570 "cleanup.dst"); 571 llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); 572 573 // Create a switch instruction to determine where to jump next. 574 llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock, 575 BranchFixups.size()); 576 577 // Restore the current basic block (if any) 578 if (CurBB) { 579 Builder.SetInsertPoint(CurBB); 580 581 // If we had a current basic block, we also need to emit an instruction 582 // to initialize the cleanup destination. 583 Builder.CreateStore(getLLVMContext().getNullValue(llvm::Type::Int32Ty), 584 DestCodePtr); 585 } else 586 Builder.ClearInsertionPoint(); 587 588 for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) { 589 llvm::BranchInst *BI = BranchFixups[i]; 590 llvm::BasicBlock *Dest = BI->getSuccessor(0); 591 592 // Fixup the branch instruction to point to the cleanup block. 593 BI->setSuccessor(0, CleanupBlock); 594 595 if (CleanupEntries.empty()) { 596 llvm::ConstantInt *ID; 597 598 // Check if we already have a destination for this block. 599 if (Dest == SI->getDefaultDest()) 600 ID = VMContext.getConstantInt(llvm::Type::Int32Ty, 0); 601 else { 602 ID = SI->findCaseDest(Dest); 603 if (!ID) { 604 // No code found, get a new unique one by using the number of 605 // switch successors. 606 ID = VMContext.getConstantInt(llvm::Type::Int32Ty, 607 SI->getNumSuccessors()); 608 SI->addCase(ID, Dest); 609 } 610 } 611 612 // Store the jump destination before the branch instruction. 613 new llvm::StoreInst(ID, DestCodePtr, BI); 614 } else { 615 // We need to jump through another cleanup block. Create a pad block 616 // with a branch instruction that jumps to the final destination and 617 // add it as a branch fixup to the current cleanup scope. 618 619 // Create the pad block. 620 llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn); 621 622 // Create a unique case ID. 623 llvm::ConstantInt *ID = VMContext.getConstantInt(llvm::Type::Int32Ty, 624 SI->getNumSuccessors()); 625 626 // Store the jump destination before the branch instruction. 627 new llvm::StoreInst(ID, DestCodePtr, BI); 628 629 // Add it as the destination. 630 SI->addCase(ID, CleanupPad); 631 632 // Create the branch to the final destination. 633 llvm::BranchInst *BI = llvm::BranchInst::Create(Dest); 634 CleanupPad->getInstList().push_back(BI); 635 636 // And add it as a branch fixup. 637 CleanupEntries.back().BranchFixups.push_back(BI); 638 } 639 } 640 } 641 642 // Remove all blocks from the block scope map. 643 for (size_t i = 0, e = Blocks.size(); i != e; ++i) { 644 assert(BlockScopes.count(Blocks[i]) && 645 "Did not find block in scope map!"); 646 647 BlockScopes.erase(Blocks[i]); 648 } 649 650 return CleanupBlockInfo(CleanupBlock, SwitchBlock, EndBlock); 651 } 652 653 void CodeGenFunction::EmitCleanupBlock() 654 { 655 CleanupBlockInfo Info = PopCleanupBlock(); 656 657 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 658 if (CurBB && !CurBB->getTerminator() && 659 Info.CleanupBlock->getNumUses() == 0) { 660 CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList()); 661 delete Info.CleanupBlock; 662 } else 663 EmitBlock(Info.CleanupBlock); 664 665 if (Info.SwitchBlock) 666 EmitBlock(Info.SwitchBlock); 667 if (Info.EndBlock) 668 EmitBlock(Info.EndBlock); 669 } 670 671 void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) 672 { 673 assert(!CleanupEntries.empty() && 674 "Trying to add branch fixup without cleanup block!"); 675 676 // FIXME: We could be more clever here and check if there's already a branch 677 // fixup for this destination and recycle it. 678 CleanupEntries.back().BranchFixups.push_back(BI); 679 } 680 681 void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) 682 { 683 if (!HaveInsertPoint()) 684 return; 685 686 llvm::BranchInst* BI = Builder.CreateBr(Dest); 687 688 Builder.ClearInsertionPoint(); 689 690 // The stack is empty, no need to do any cleanup. 691 if (CleanupEntries.empty()) 692 return; 693 694 if (!Dest->getParent()) { 695 // We are trying to branch to a block that hasn't been inserted yet. 696 AddBranchFixup(BI); 697 return; 698 } 699 700 BlockScopeMap::iterator I = BlockScopes.find(Dest); 701 if (I == BlockScopes.end()) { 702 // We are trying to jump to a block that is outside of any cleanup scope. 703 AddBranchFixup(BI); 704 return; 705 } 706 707 assert(I->second < CleanupEntries.size() && 708 "Trying to branch into cleanup region"); 709 710 if (I->second == CleanupEntries.size() - 1) { 711 // We have a branch to a block in the same scope. 712 return; 713 } 714 715 AddBranchFixup(BI); 716 } 717