1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGDebugInfo.h" 17 #include "CGException.h" 18 #include "clang/Basic/TargetInfo.h" 19 #include "clang/AST/APValue.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Target/TargetData.h" 26 #include "llvm/Intrinsics.h" 27 using namespace clang; 28 using namespace CodeGen; 29 30 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) 31 : BlockFunction(cgm, *this, Builder), CGM(cgm), 32 Target(CGM.getContext().Target), 33 Builder(cgm.getModule().getContext()), 34 ExceptionSlot(0), DebugInfo(0), IndirectBranch(0), 35 SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0), 36 DidCallStackSave(false), UnreachableBlock(0), 37 CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0), 38 ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0), 39 TrapBB(0), ThrowLengthErrorBB(0) { 40 41 // Get some frequently used types. 42 LLVMPointerWidth = Target.getPointerWidth(0); 43 llvm::LLVMContext &LLVMContext = CGM.getLLVMContext(); 44 IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth); 45 Int32Ty = llvm::Type::getInt32Ty(LLVMContext); 46 Int64Ty = llvm::Type::getInt64Ty(LLVMContext); 47 48 Exceptions = getContext().getLangOptions().Exceptions; 49 CatchUndefined = getContext().getLangOptions().CatchUndefined; 50 CGM.getMangleContext().startNewFunction(); 51 } 52 53 ASTContext &CodeGenFunction::getContext() const { 54 return CGM.getContext(); 55 } 56 57 58 llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) { 59 llvm::Value *Res = LocalDeclMap[VD]; 60 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!"); 61 return Res; 62 } 63 64 llvm::Constant * 65 CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) { 66 return cast<llvm::Constant>(GetAddrOfLocalVar(BVD)); 67 } 68 69 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 70 return CGM.getTypes().ConvertTypeForMem(T); 71 } 72 73 const llvm::Type *CodeGenFunction::ConvertType(QualType T) { 74 return CGM.getTypes().ConvertType(T); 75 } 76 77 bool CodeGenFunction::hasAggregateLLVMType(QualType T) { 78 return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() || 79 T->isMemberFunctionPointerType(); 80 } 81 82 void CodeGenFunction::EmitReturnBlock() { 83 // For cleanliness, we try to avoid emitting the return block for 84 // simple cases. 85 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 86 87 if (CurBB) { 88 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 89 90 // We have a valid insert point, reuse it if it is empty or there are no 91 // explicit jumps to the return block. 92 if (CurBB->empty() || ReturnBlock.Block->use_empty()) { 93 ReturnBlock.Block->replaceAllUsesWith(CurBB); 94 delete ReturnBlock.Block; 95 } else 96 EmitBlock(ReturnBlock.Block); 97 return; 98 } 99 100 // Otherwise, if the return block is the target of a single direct 101 // branch then we can just put the code in that block instead. This 102 // cleans up functions which started with a unified return block. 103 if (ReturnBlock.Block->hasOneUse()) { 104 llvm::BranchInst *BI = 105 dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin()); 106 if (BI && BI->isUnconditional() && 107 BI->getSuccessor(0) == ReturnBlock.Block) { 108 // Reset insertion point and delete the branch. 109 Builder.SetInsertPoint(BI->getParent()); 110 BI->eraseFromParent(); 111 delete ReturnBlock.Block; 112 return; 113 } 114 } 115 116 // FIXME: We are at an unreachable point, there is no reason to emit the block 117 // unless it has uses. However, we still need a place to put the debug 118 // region.end for now. 119 120 EmitBlock(ReturnBlock.Block); 121 } 122 123 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 124 if (!BB) return; 125 if (!BB->use_empty()) 126 return CGF.CurFn->getBasicBlockList().push_back(BB); 127 delete BB; 128 } 129 130 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 131 assert(BreakContinueStack.empty() && 132 "mismatched push/pop in break/continue stack!"); 133 134 // Emit function epilog (to return). 135 EmitReturnBlock(); 136 137 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 138 139 // Emit debug descriptor for function end. 140 if (CGDebugInfo *DI = getDebugInfo()) { 141 DI->setLocation(EndLoc); 142 DI->EmitRegionEnd(CurFn, Builder); 143 } 144 145 EmitFunctionEpilog(*CurFnInfo); 146 EmitEndEHSpec(CurCodeDecl); 147 148 assert(EHStack.empty() && 149 "did not remove all scopes from cleanup stack!"); 150 151 // If someone did an indirect goto, emit the indirect goto block at the end of 152 // the function. 153 if (IndirectBranch) { 154 EmitBlock(IndirectBranch->getParent()); 155 Builder.ClearInsertionPoint(); 156 } 157 158 // If someone called operator new[] and needs a throw_length_error block, emit 159 // it at the end of the function. 160 if (ThrowLengthErrorBB) { 161 EmitBlock(ThrowLengthErrorBB); 162 Builder.ClearInsertionPoint(); 163 } 164 165 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 166 llvm::Instruction *Ptr = AllocaInsertPt; 167 AllocaInsertPt = 0; 168 Ptr->eraseFromParent(); 169 170 // If someone took the address of a label but never did an indirect goto, we 171 // made a zero entry PHI node, which is illegal, zap it now. 172 if (IndirectBranch) { 173 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 174 if (PN->getNumIncomingValues() == 0) { 175 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 176 PN->eraseFromParent(); 177 } 178 } 179 180 EmitIfUsed(*this, TerminateLandingPad); 181 EmitIfUsed(*this, TerminateHandler); 182 EmitIfUsed(*this, UnreachableBlock); 183 184 if (CGM.getCodeGenOpts().EmitDeclMetadata) 185 EmitDeclMetadata(); 186 } 187 188 /// getThrowLengthErrorBB - Create a basic block that will call 189 /// std::__throw_length_error to throw a std::length_error exception. 190 llvm::BasicBlock *CodeGenFunction::getThrowLengthErrorBB() { 191 if (ThrowLengthErrorBB) return ThrowLengthErrorBB; 192 193 llvm::IRBuilder<>::InsertPoint SavedIP = Builder.saveIP(); 194 195 ThrowLengthErrorBB = createBasicBlock("throw_length_error"); 196 Builder.SetInsertPoint(ThrowLengthErrorBB); 197 198 // Call to void std::__throw_length_error("length_error"); 199 const llvm::Type *ResultType = Builder.getVoidTy(); 200 const llvm::Type *PtrToInt8Ty = Builder.getInt8PtrTy(); 201 std::vector<const llvm::Type*> ArgTys(1, PtrToInt8Ty); 202 llvm::Constant *Fn = 203 CGM.CreateRuntimeFunction(llvm::FunctionType::get(ResultType, ArgTys, false), 204 "_ZSt20__throw_length_errorPKc"); 205 206 llvm::Value *C = CGM.GetAddrOfConstantCString("length_error"); 207 C = Builder.CreateStructGEP(C, 0, "arraydecay"); 208 llvm::CallInst *TheCall = Builder.CreateCall(Fn, C); 209 TheCall->setDoesNotReturn(); 210 211 Builder.CreateUnreachable(); 212 213 214 Builder.restoreIP(SavedIP); 215 return ThrowLengthErrorBB; 216 } 217 218 219 /// ShouldInstrumentFunction - Return true if the current function should be 220 /// instrumented with __cyg_profile_func_* calls 221 bool CodeGenFunction::ShouldInstrumentFunction() { 222 if (!CGM.getCodeGenOpts().InstrumentFunctions) 223 return false; 224 if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 225 return false; 226 return true; 227 } 228 229 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified 230 /// instrumentation function with the current function and the call site, if 231 /// function instrumentation is enabled. 232 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 233 if (!ShouldInstrumentFunction()) 234 return; 235 236 const llvm::PointerType *PointerTy; 237 const llvm::FunctionType *FunctionTy; 238 std::vector<const llvm::Type*> ProfileFuncArgs; 239 240 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 241 PointerTy = llvm::Type::getInt8PtrTy(VMContext); 242 ProfileFuncArgs.push_back(PointerTy); 243 ProfileFuncArgs.push_back(PointerTy); 244 FunctionTy = llvm::FunctionType::get( 245 llvm::Type::getVoidTy(VMContext), 246 ProfileFuncArgs, false); 247 248 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 249 llvm::CallInst *CallSite = Builder.CreateCall( 250 CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0), 251 llvm::ConstantInt::get(Int32Ty, 0), 252 "callsite"); 253 254 Builder.CreateCall2(F, 255 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 256 CallSite); 257 } 258 259 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 260 llvm::Function *Fn, 261 const FunctionArgList &Args, 262 SourceLocation StartLoc) { 263 const Decl *D = GD.getDecl(); 264 265 DidCallStackSave = false; 266 CurCodeDecl = CurFuncDecl = D; 267 FnRetTy = RetTy; 268 CurFn = Fn; 269 assert(CurFn->isDeclaration() && "Function already has body?"); 270 271 // Pass inline keyword to optimizer if it appears explicitly on any 272 // declaration. 273 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 274 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), 275 RE = FD->redecls_end(); RI != RE; ++RI) 276 if (RI->isInlineSpecified()) { 277 Fn->addFnAttr(llvm::Attribute::InlineHint); 278 break; 279 } 280 281 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 282 283 // Create a marker to make it easy to insert allocas into the entryblock 284 // later. Don't create this with the builder, because we don't want it 285 // folded. 286 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 287 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 288 if (Builder.isNamePreserving()) 289 AllocaInsertPt->setName("allocapt"); 290 291 ReturnBlock = getJumpDestInCurrentScope("return"); 292 293 Builder.SetInsertPoint(EntryBB); 294 295 QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0, 296 false, false, 0, 0, 297 /*FIXME?*/ 298 FunctionType::ExtInfo()); 299 300 // Emit subprogram debug descriptor. 301 if (CGDebugInfo *DI = getDebugInfo()) { 302 DI->setLocation(StartLoc); 303 DI->EmitFunctionStart(GD, FnType, CurFn, Builder); 304 } 305 306 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 307 308 // FIXME: Leaked. 309 // CC info is ignored, hopefully? 310 CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args, 311 FunctionType::ExtInfo()); 312 313 if (RetTy->isVoidType()) { 314 // Void type; nothing to return. 315 ReturnValue = 0; 316 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 317 hasAggregateLLVMType(CurFnInfo->getReturnType())) { 318 // Indirect aggregate return; emit returned value directly into sret slot. 319 // This reduces code size, and affects correctness in C++. 320 ReturnValue = CurFn->arg_begin(); 321 } else { 322 ReturnValue = CreateIRTemp(RetTy, "retval"); 323 } 324 325 EmitStartEHSpec(CurCodeDecl); 326 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 327 328 if (CXXThisDecl) 329 CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this"); 330 if (CXXVTTDecl) 331 CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt"); 332 333 // If any of the arguments have a variably modified type, make sure to 334 // emit the type size. 335 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 336 i != e; ++i) { 337 QualType Ty = i->second; 338 339 if (Ty->isVariablyModifiedType()) 340 EmitVLASize(Ty); 341 } 342 } 343 344 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) { 345 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl()); 346 assert(FD->getBody()); 347 EmitStmt(FD->getBody()); 348 } 349 350 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) { 351 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 352 353 // Check if we should generate debug info for this function. 354 if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>()) 355 DebugInfo = CGM.getDebugInfo(); 356 357 FunctionArgList Args; 358 359 CurGD = GD; 360 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 361 if (MD->isInstance()) { 362 // Create the implicit 'this' decl. 363 // FIXME: I'm not entirely sure I like using a fake decl just for code 364 // generation. Maybe we can come up with a better way? 365 CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, 366 FD->getLocation(), 367 &getContext().Idents.get("this"), 368 MD->getThisType(getContext())); 369 Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType())); 370 371 // Check if we need a VTT parameter as well. 372 if (CodeGenVTables::needsVTTParameter(GD)) { 373 // FIXME: The comment about using a fake decl above applies here too. 374 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 375 CXXVTTDecl = 376 ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(), 377 &getContext().Idents.get("vtt"), T); 378 Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType())); 379 } 380 } 381 } 382 383 if (FD->getNumParams()) { 384 const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>(); 385 assert(FProto && "Function def must have prototype!"); 386 387 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 388 Args.push_back(std::make_pair(FD->getParamDecl(i), 389 FProto->getArgType(i))); 390 } 391 392 SourceRange BodyRange; 393 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 394 395 // Emit the standard function prologue. 396 StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin()); 397 398 // Generate the body of the function. 399 if (isa<CXXDestructorDecl>(FD)) 400 EmitDestructorBody(Args); 401 else if (isa<CXXConstructorDecl>(FD)) 402 EmitConstructorBody(Args); 403 else 404 EmitFunctionBody(Args); 405 406 // Emit the standard function epilogue. 407 FinishFunction(BodyRange.getEnd()); 408 409 // Destroy the 'this' declaration. 410 if (CXXThisDecl) 411 CXXThisDecl->Destroy(getContext()); 412 413 // Destroy the VTT declaration. 414 if (CXXVTTDecl) 415 CXXVTTDecl->Destroy(getContext()); 416 } 417 418 /// ContainsLabel - Return true if the statement contains a label in it. If 419 /// this statement is not executed normally, it not containing a label means 420 /// that we can just remove the code. 421 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 422 // Null statement, not a label! 423 if (S == 0) return false; 424 425 // If this is a label, we have to emit the code, consider something like: 426 // if (0) { ... foo: bar(); } goto foo; 427 if (isa<LabelStmt>(S)) 428 return true; 429 430 // If this is a case/default statement, and we haven't seen a switch, we have 431 // to emit the code. 432 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 433 return true; 434 435 // If this is a switch statement, we want to ignore cases below it. 436 if (isa<SwitchStmt>(S)) 437 IgnoreCaseStmts = true; 438 439 // Scan subexpressions for verboten labels. 440 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 441 I != E; ++I) 442 if (ContainsLabel(*I, IgnoreCaseStmts)) 443 return true; 444 445 return false; 446 } 447 448 449 /// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to 450 /// a constant, or if it does but contains a label, return 0. If it constant 451 /// folds to 'true' and does not contain a label, return 1, if it constant folds 452 /// to 'false' and does not contain a label, return -1. 453 int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) { 454 // FIXME: Rename and handle conversion of other evaluatable things 455 // to bool. 456 Expr::EvalResult Result; 457 if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() || 458 Result.HasSideEffects) 459 return 0; // Not foldable, not integer or not fully evaluatable. 460 461 if (CodeGenFunction::ContainsLabel(Cond)) 462 return 0; // Contains a label. 463 464 return Result.Val.getInt().getBoolValue() ? 1 : -1; 465 } 466 467 468 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 469 /// statement) to the specified blocks. Based on the condition, this might try 470 /// to simplify the codegen of the conditional based on the branch. 471 /// 472 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 473 llvm::BasicBlock *TrueBlock, 474 llvm::BasicBlock *FalseBlock) { 475 if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) 476 return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock); 477 478 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 479 // Handle X && Y in a condition. 480 if (CondBOp->getOpcode() == BinaryOperator::LAnd) { 481 // If we have "1 && X", simplify the code. "0 && X" would have constant 482 // folded if the case was simple enough. 483 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) { 484 // br(1 && X) -> br(X). 485 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 486 } 487 488 // If we have "X && 1", simplify the code to use an uncond branch. 489 // "X && 0" would have been constant folded to 0. 490 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) { 491 // br(X && 1) -> br(X). 492 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 493 } 494 495 // Emit the LHS as a conditional. If the LHS conditional is false, we 496 // want to jump to the FalseBlock. 497 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 498 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 499 EmitBlock(LHSTrue); 500 501 // Any temporaries created here are conditional. 502 BeginConditionalBranch(); 503 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 504 EndConditionalBranch(); 505 506 return; 507 } else if (CondBOp->getOpcode() == BinaryOperator::LOr) { 508 // If we have "0 || X", simplify the code. "1 || X" would have constant 509 // folded if the case was simple enough. 510 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) { 511 // br(0 || X) -> br(X). 512 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 513 } 514 515 // If we have "X || 0", simplify the code to use an uncond branch. 516 // "X || 1" would have been constant folded to 1. 517 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) { 518 // br(X || 0) -> br(X). 519 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 520 } 521 522 // Emit the LHS as a conditional. If the LHS conditional is true, we 523 // want to jump to the TrueBlock. 524 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 525 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 526 EmitBlock(LHSFalse); 527 528 // Any temporaries created here are conditional. 529 BeginConditionalBranch(); 530 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 531 EndConditionalBranch(); 532 533 return; 534 } 535 } 536 537 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 538 // br(!x, t, f) -> br(x, f, t) 539 if (CondUOp->getOpcode() == UnaryOperator::LNot) 540 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 541 } 542 543 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 544 // Handle ?: operator. 545 546 // Just ignore GNU ?: extension. 547 if (CondOp->getLHS()) { 548 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 549 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 550 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 551 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 552 EmitBlock(LHSBlock); 553 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 554 EmitBlock(RHSBlock); 555 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 556 return; 557 } 558 } 559 560 // Emit the code with the fully general case. 561 llvm::Value *CondV = EvaluateExprAsBool(Cond); 562 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 563 } 564 565 /// ErrorUnsupported - Print out an error that codegen doesn't support the 566 /// specified stmt yet. 567 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 568 bool OmitOnError) { 569 CGM.ErrorUnsupported(S, Type, OmitOnError); 570 } 571 572 void 573 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 574 // If the type contains a pointer to data member we can't memset it to zero. 575 // Instead, create a null constant and copy it to the destination. 576 if (CGM.getTypes().ContainsPointerToDataMember(Ty)) { 577 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 578 579 llvm::GlobalVariable *NullVariable = 580 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 581 /*isConstant=*/true, 582 llvm::GlobalVariable::PrivateLinkage, 583 NullConstant, llvm::Twine()); 584 EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false); 585 return; 586 } 587 588 589 // Ignore empty classes in C++. 590 if (getContext().getLangOptions().CPlusPlus) { 591 if (const RecordType *RT = Ty->getAs<RecordType>()) { 592 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 593 return; 594 } 595 } 596 597 // Otherwise, just memset the whole thing to zero. This is legal 598 // because in LLVM, all default initializers (other than the ones we just 599 // handled above) are guaranteed to have a bit pattern of all zeros. 600 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 601 if (DestPtr->getType() != BP) 602 DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); 603 604 // Get size and alignment info for this aggregate. 605 std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); 606 607 // Don't bother emitting a zero-byte memset. 608 if (TypeInfo.first == 0) 609 return; 610 611 // FIXME: Handle variable sized types. 612 Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr, 613 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), 614 // TypeInfo.first describes size in bits. 615 llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), 616 llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8), 617 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 618 0)); 619 } 620 621 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) { 622 // Make sure that there is a block for the indirect goto. 623 if (IndirectBranch == 0) 624 GetIndirectGotoBlock(); 625 626 llvm::BasicBlock *BB = getJumpDestForLabel(L).Block; 627 628 // Make sure the indirect branch includes all of the address-taken blocks. 629 IndirectBranch->addDestination(BB); 630 return llvm::BlockAddress::get(CurFn, BB); 631 } 632 633 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 634 // If we already made the indirect branch for indirect goto, return its block. 635 if (IndirectBranch) return IndirectBranch->getParent(); 636 637 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 638 639 const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); 640 641 // Create the PHI node that indirect gotos will add entries to. 642 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest"); 643 644 // Create the indirect branch instruction. 645 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 646 return IndirectBranch->getParent(); 647 } 648 649 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) { 650 llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; 651 652 assert(SizeEntry && "Did not emit size for type"); 653 return SizeEntry; 654 } 655 656 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { 657 assert(Ty->isVariablyModifiedType() && 658 "Must pass variably modified type to EmitVLASizes!"); 659 660 EnsureInsertPoint(); 661 662 if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) { 663 llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; 664 665 if (!SizeEntry) { 666 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 667 668 // Get the element size; 669 QualType ElemTy = VAT->getElementType(); 670 llvm::Value *ElemSize; 671 if (ElemTy->isVariableArrayType()) 672 ElemSize = EmitVLASize(ElemTy); 673 else 674 ElemSize = llvm::ConstantInt::get(SizeTy, 675 getContext().getTypeSizeInChars(ElemTy).getQuantity()); 676 677 llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr()); 678 NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp"); 679 680 SizeEntry = Builder.CreateMul(ElemSize, NumElements); 681 } 682 683 return SizeEntry; 684 } 685 686 if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 687 EmitVLASize(AT->getElementType()); 688 return 0; 689 } 690 691 const PointerType *PT = Ty->getAs<PointerType>(); 692 assert(PT && "unknown VM type!"); 693 EmitVLASize(PT->getPointeeType()); 694 return 0; 695 } 696 697 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 698 if (CGM.getContext().getBuiltinVaListType()->isArrayType()) 699 return EmitScalarExpr(E); 700 return EmitLValue(E).getAddress(); 701 } 702 703 /// Pops cleanup blocks until the given savepoint is reached. 704 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { 705 assert(Old.isValid()); 706 707 EHScopeStack::iterator E = EHStack.find(Old); 708 while (EHStack.begin() != E) 709 PopCleanupBlock(); 710 } 711 712 /// Destroys a cleanup if it was unused. 713 static void DestroyCleanup(CodeGenFunction &CGF, 714 llvm::BasicBlock *Entry, 715 llvm::BasicBlock *Exit) { 716 assert(Entry->use_empty() && "destroying cleanup with uses!"); 717 assert(Exit->getTerminator() == 0 && 718 "exit has terminator but entry has no predecessors!"); 719 720 // This doesn't always remove the entire cleanup, but it's much 721 // safer as long as we don't know what blocks belong to the cleanup. 722 // A *much* better approach if we care about this inefficiency would 723 // be to lazily emit the cleanup. 724 725 // If the exit block is distinct from the entry, give it a branch to 726 // an unreachable destination. This preserves the well-formedness 727 // of the IR. 728 if (Entry != Exit) 729 llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit); 730 731 assert(!Entry->getParent() && "cleanup entry already positioned?"); 732 // We can't just delete the entry; we have to kill any references to 733 // its instructions in other blocks. 734 for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end(); 735 I != E; ++I) 736 if (!I->use_empty()) 737 I->replaceAllUsesWith(llvm::UndefValue::get(I->getType())); 738 delete Entry; 739 } 740 741 /// Creates a switch instruction to thread branches out of the given 742 /// block (which is the exit block of a cleanup). 743 static void CreateCleanupSwitch(CodeGenFunction &CGF, 744 llvm::BasicBlock *Block) { 745 if (Block->getTerminator()) { 746 assert(isa<llvm::SwitchInst>(Block->getTerminator()) && 747 "cleanup block already has a terminator, but it isn't a switch"); 748 return; 749 } 750 751 llvm::Value *DestCodePtr 752 = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst"); 753 CGBuilderTy Builder(Block); 754 llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); 755 756 // Create a switch instruction to determine where to jump next. 757 Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock()); 758 } 759 760 /// Attempts to reduce a cleanup's entry block to a fallthrough. This 761 /// is basically llvm::MergeBlockIntoPredecessor, except 762 /// simplified/optimized for the tighter constraints on cleanup 763 /// blocks. 764 static void SimplifyCleanupEntry(CodeGenFunction &CGF, 765 llvm::BasicBlock *Entry) { 766 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 767 if (!Pred) return; 768 769 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 770 if (!Br || Br->isConditional()) return; 771 assert(Br->getSuccessor(0) == Entry); 772 773 // If we were previously inserting at the end of the cleanup entry 774 // block, we'll need to continue inserting at the end of the 775 // predecessor. 776 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 777 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 778 779 // Kill the branch. 780 Br->eraseFromParent(); 781 782 // Merge the blocks. 783 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 784 785 // Kill the entry block. 786 Entry->eraseFromParent(); 787 788 if (WasInsertBlock) 789 CGF.Builder.SetInsertPoint(Pred); 790 } 791 792 /// Attempts to reduce an cleanup's exit switch to an unconditional 793 /// branch. 794 static void SimplifyCleanupExit(llvm::BasicBlock *Exit) { 795 llvm::TerminatorInst *Terminator = Exit->getTerminator(); 796 assert(Terminator && "completed cleanup exit has no terminator"); 797 798 llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator); 799 if (!Switch) return; 800 if (Switch->getNumCases() != 2) return; // default + 1 801 802 llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition()); 803 llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand()); 804 805 // Replace the switch instruction with an unconditional branch. 806 llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0 807 Switch->eraseFromParent(); 808 llvm::BranchInst::Create(Dest, Exit); 809 810 // Delete all uses of the condition variable. 811 Cond->eraseFromParent(); 812 while (!CondVar->use_empty()) 813 cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent(); 814 815 // Delete the condition variable itself. 816 CondVar->eraseFromParent(); 817 } 818 819 /// Threads a branch fixup through a cleanup block. 820 static void ThreadFixupThroughCleanup(CodeGenFunction &CGF, 821 BranchFixup &Fixup, 822 llvm::BasicBlock *Entry, 823 llvm::BasicBlock *Exit) { 824 if (!Exit->getTerminator()) 825 CreateCleanupSwitch(CGF, Exit); 826 827 // Find the switch and its destination index alloca. 828 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator()); 829 llvm::Value *DestCodePtr = 830 cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand(); 831 832 // Compute the index of the new case we're adding to the switch. 833 unsigned Index = Switch->getNumCases(); 834 835 const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext()); 836 llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index); 837 838 // Set the index in the origin block. 839 new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin); 840 841 // Add a case to the switch. 842 Switch->addCase(IndexV, Fixup.Destination); 843 844 // Change the last branch to point to the cleanup entry block. 845 Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry); 846 847 // And finally, update the fixup. 848 Fixup.LatestBranch = Switch; 849 Fixup.LatestBranchIndex = Index; 850 } 851 852 /// Try to simplify both the entry and exit edges of a cleanup. 853 static void SimplifyCleanupEdges(CodeGenFunction &CGF, 854 llvm::BasicBlock *Entry, 855 llvm::BasicBlock *Exit) { 856 857 // Given their current implementations, it's important to run these 858 // in this order: SimplifyCleanupEntry will delete Entry if it can 859 // be merged into its predecessor, which will then break 860 // SimplifyCleanupExit if (as is common) Entry == Exit. 861 862 SimplifyCleanupExit(Exit); 863 SimplifyCleanupEntry(CGF, Entry); 864 } 865 866 static void EmitLazyCleanup(CodeGenFunction &CGF, 867 EHScopeStack::LazyCleanup *Fn, 868 bool ForEH) { 869 if (ForEH) CGF.EHStack.pushTerminate(); 870 Fn->Emit(CGF, ForEH); 871 if (ForEH) CGF.EHStack.popTerminate(); 872 assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); 873 } 874 875 static void SplitAndEmitLazyCleanup(CodeGenFunction &CGF, 876 EHScopeStack::LazyCleanup *Fn, 877 bool ForEH, 878 llvm::BasicBlock *Entry) { 879 assert(Entry && "no entry block for cleanup"); 880 881 // Remove the switch and load from the end of the entry block. 882 llvm::Instruction *Switch = &Entry->getInstList().back(); 883 Entry->getInstList().remove(Switch); 884 assert(isa<llvm::SwitchInst>(Switch)); 885 llvm::Instruction *Load = &Entry->getInstList().back(); 886 Entry->getInstList().remove(Load); 887 assert(isa<llvm::LoadInst>(Load)); 888 889 assert(Entry->getInstList().empty() && 890 "lazy cleanup block not empty after removing load/switch pair?"); 891 892 // Emit the actual cleanup at the end of the entry block. 893 CGF.Builder.SetInsertPoint(Entry); 894 EmitLazyCleanup(CGF, Fn, ForEH); 895 896 // Put the load and switch at the end of the exit block. 897 llvm::BasicBlock *Exit = CGF.Builder.GetInsertBlock(); 898 Exit->getInstList().push_back(Load); 899 Exit->getInstList().push_back(Switch); 900 901 // Clean up the edges if possible. 902 SimplifyCleanupEdges(CGF, Entry, Exit); 903 904 CGF.Builder.ClearInsertionPoint(); 905 } 906 907 static void PopLazyCleanupBlock(CodeGenFunction &CGF) { 908 assert(isa<EHLazyCleanupScope>(*CGF.EHStack.begin()) && "top not a cleanup!"); 909 EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*CGF.EHStack.begin()); 910 assert(Scope.getFixupDepth() <= CGF.EHStack.getNumBranchFixups()); 911 912 // Check whether we need an EH cleanup. This is only true if we've 913 // generated a lazy EH cleanup block. 914 llvm::BasicBlock *EHEntry = Scope.getEHBlock(); 915 bool RequiresEHCleanup = (EHEntry != 0); 916 917 // Check the three conditions which might require a normal cleanup: 918 919 // - whether there are branch fix-ups through this cleanup 920 unsigned FixupDepth = Scope.getFixupDepth(); 921 bool HasFixups = CGF.EHStack.getNumBranchFixups() != FixupDepth; 922 923 // - whether control has already been threaded through this cleanup 924 llvm::BasicBlock *NormalEntry = Scope.getNormalBlock(); 925 bool HasExistingBranches = (NormalEntry != 0); 926 927 // - whether there's a fallthrough 928 llvm::BasicBlock *FallthroughSource = CGF.Builder.GetInsertBlock(); 929 bool HasFallthrough = (FallthroughSource != 0); 930 931 bool RequiresNormalCleanup = false; 932 if (Scope.isNormalCleanup() && 933 (HasFixups || HasExistingBranches || HasFallthrough)) { 934 RequiresNormalCleanup = true; 935 } 936 937 // If we don't need the cleanup at all, we're done. 938 if (!RequiresNormalCleanup && !RequiresEHCleanup) { 939 CGF.EHStack.popCleanup(); 940 assert(CGF.EHStack.getNumBranchFixups() == 0 || 941 CGF.EHStack.hasNormalCleanups()); 942 return; 943 } 944 945 // Copy the cleanup emission data out. Note that SmallVector 946 // guarantees maximal alignment for its buffer regardless of its 947 // type parameter. 948 llvm::SmallVector<char, 8*sizeof(void*)> CleanupBuffer; 949 CleanupBuffer.reserve(Scope.getCleanupSize()); 950 memcpy(CleanupBuffer.data(), 951 Scope.getCleanupBuffer(), Scope.getCleanupSize()); 952 CleanupBuffer.set_size(Scope.getCleanupSize()); 953 EHScopeStack::LazyCleanup *Fn = 954 reinterpret_cast<EHScopeStack::LazyCleanup*>(CleanupBuffer.data()); 955 956 // We're done with the scope; pop it off so we can emit the cleanups. 957 CGF.EHStack.popCleanup(); 958 959 if (RequiresNormalCleanup) { 960 // If we have a fallthrough and no other need for the cleanup, 961 // emit it directly. 962 if (HasFallthrough && !HasFixups && !HasExistingBranches) { 963 EmitLazyCleanup(CGF, Fn, /*ForEH*/ false); 964 965 // Otherwise, the best approach is to thread everything through 966 // the cleanup block and then try to clean up after ourselves. 967 } else { 968 // Force the entry block to exist. 969 if (!HasExistingBranches) { 970 NormalEntry = CGF.createBasicBlock("cleanup"); 971 CreateCleanupSwitch(CGF, NormalEntry); 972 } 973 974 CGF.EmitBlock(NormalEntry); 975 976 // Thread the fallthrough edge through the (momentarily trivial) 977 // cleanup. 978 llvm::BasicBlock *FallthroughDestination = 0; 979 if (HasFallthrough) { 980 assert(isa<llvm::BranchInst>(FallthroughSource->getTerminator())); 981 FallthroughDestination = CGF.createBasicBlock("cleanup.cont"); 982 983 BranchFixup Fix; 984 Fix.Destination = FallthroughDestination; 985 Fix.LatestBranch = FallthroughSource->getTerminator(); 986 Fix.LatestBranchIndex = 0; 987 Fix.Origin = Fix.LatestBranch; 988 989 // Restore fixup invariant. EmitBlock added a branch to the 990 // cleanup which we need to redirect to the destination. 991 cast<llvm::BranchInst>(Fix.LatestBranch) 992 ->setSuccessor(0, Fix.Destination); 993 994 ThreadFixupThroughCleanup(CGF, Fix, NormalEntry, NormalEntry); 995 } 996 997 // Thread any "real" fixups we need to thread. 998 for (unsigned I = FixupDepth, E = CGF.EHStack.getNumBranchFixups(); 999 I != E; ++I) 1000 if (CGF.EHStack.getBranchFixup(I).Destination) 1001 ThreadFixupThroughCleanup(CGF, CGF.EHStack.getBranchFixup(I), 1002 NormalEntry, NormalEntry); 1003 1004 SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ false, NormalEntry); 1005 1006 if (HasFallthrough) 1007 CGF.EmitBlock(FallthroughDestination); 1008 } 1009 } 1010 1011 // Emit the EH cleanup if required. 1012 if (RequiresEHCleanup) { 1013 CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); 1014 CGF.EmitBlock(EHEntry); 1015 SplitAndEmitLazyCleanup(CGF, Fn, /*ForEH*/ true, EHEntry); 1016 CGF.Builder.restoreIP(SavedIP); 1017 } 1018 } 1019 1020 /// Pops a cleanup block. If the block includes a normal cleanup, the 1021 /// current insertion point is threaded through the cleanup, as are 1022 /// any branch fixups on the cleanup. 1023 void CodeGenFunction::PopCleanupBlock() { 1024 assert(!EHStack.empty() && "cleanup stack is empty!"); 1025 if (isa<EHLazyCleanupScope>(*EHStack.begin())) 1026 return PopLazyCleanupBlock(*this); 1027 1028 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 1029 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 1030 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 1031 1032 // Handle the EH cleanup if (1) there is one and (2) it's different 1033 // from the normal cleanup. 1034 if (Scope.isEHCleanup() && 1035 Scope.getEHEntry() != Scope.getNormalEntry()) { 1036 llvm::BasicBlock *EHEntry = Scope.getEHEntry(); 1037 llvm::BasicBlock *EHExit = Scope.getEHExit(); 1038 1039 if (EHEntry->use_empty()) { 1040 DestroyCleanup(*this, EHEntry, EHExit); 1041 } else { 1042 // TODO: this isn't really the ideal location to put this EH 1043 // cleanup, but lazy emission is a better solution than trying 1044 // to pick a better spot. 1045 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 1046 EmitBlock(EHEntry); 1047 Builder.restoreIP(SavedIP); 1048 1049 SimplifyCleanupEdges(*this, EHEntry, EHExit); 1050 } 1051 } 1052 1053 // If we only have an EH cleanup, we don't really need to do much 1054 // here. Branch fixups just naturally drop down to the enclosing 1055 // cleanup scope. 1056 if (!Scope.isNormalCleanup()) { 1057 EHStack.popCleanup(); 1058 assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups()); 1059 return; 1060 } 1061 1062 // Check whether the scope has any fixups that need to be threaded. 1063 unsigned FixupDepth = Scope.getFixupDepth(); 1064 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 1065 1066 // Grab the entry and exit blocks. 1067 llvm::BasicBlock *Entry = Scope.getNormalEntry(); 1068 llvm::BasicBlock *Exit = Scope.getNormalExit(); 1069 1070 // Check whether anything's been threaded through the cleanup already. 1071 assert((Exit->getTerminator() == 0) == Entry->use_empty() && 1072 "cleanup entry/exit mismatch"); 1073 bool HasExistingBranches = !Entry->use_empty(); 1074 1075 // Check whether we need to emit a "fallthrough" branch through the 1076 // cleanup for the current insertion point. 1077 llvm::BasicBlock *FallThrough = Builder.GetInsertBlock(); 1078 if (FallThrough && FallThrough->getTerminator()) 1079 FallThrough = 0; 1080 1081 // If *nothing* is using the cleanup, kill it. 1082 if (!FallThrough && !HasFixups && !HasExistingBranches) { 1083 EHStack.popCleanup(); 1084 DestroyCleanup(*this, Entry, Exit); 1085 return; 1086 } 1087 1088 // Otherwise, add the block to the function. 1089 EmitBlock(Entry); 1090 1091 if (FallThrough) 1092 Builder.SetInsertPoint(Exit); 1093 else 1094 Builder.ClearInsertionPoint(); 1095 1096 // Fast case: if we don't have to add any fixups, and either 1097 // we don't have a fallthrough or the cleanup wasn't previously 1098 // used, then the setup above is sufficient. 1099 if (!HasFixups) { 1100 if (!FallThrough) { 1101 assert(HasExistingBranches && "no reason for cleanup but didn't kill before"); 1102 EHStack.popCleanup(); 1103 SimplifyCleanupEdges(*this, Entry, Exit); 1104 return; 1105 } else if (!HasExistingBranches) { 1106 assert(FallThrough && "no reason for cleanup but didn't kill before"); 1107 // We can't simplify the exit edge in this case because we're 1108 // already inserting at the end of the exit block. 1109 EHStack.popCleanup(); 1110 SimplifyCleanupEntry(*this, Entry); 1111 return; 1112 } 1113 } 1114 1115 // Otherwise we're going to have to thread things through the cleanup. 1116 llvm::SmallVector<BranchFixup*, 8> Fixups; 1117 1118 // Synthesize a fixup for the current insertion point. 1119 BranchFixup Cur; 1120 if (FallThrough) { 1121 Cur.Destination = createBasicBlock("cleanup.cont"); 1122 Cur.LatestBranch = FallThrough->getTerminator(); 1123 Cur.LatestBranchIndex = 0; 1124 Cur.Origin = Cur.LatestBranch; 1125 1126 // Restore fixup invariant. EmitBlock added a branch to the cleanup 1127 // which we need to redirect to the destination. 1128 cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination); 1129 1130 Fixups.push_back(&Cur); 1131 } else { 1132 Cur.Destination = 0; 1133 } 1134 1135 // Collect any "real" fixups we need to thread. 1136 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 1137 I != E; ++I) 1138 if (EHStack.getBranchFixup(I).Destination) 1139 Fixups.push_back(&EHStack.getBranchFixup(I)); 1140 1141 assert(!Fixups.empty() && "no fixups, invariants broken!"); 1142 1143 // If there's only a single fixup to thread through, do so with 1144 // unconditional branches. This only happens if there's a single 1145 // branch and no fallthrough. 1146 if (Fixups.size() == 1 && !HasExistingBranches) { 1147 Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry); 1148 llvm::BranchInst *Br = 1149 llvm::BranchInst::Create(Fixups[0]->Destination, Exit); 1150 Fixups[0]->LatestBranch = Br; 1151 Fixups[0]->LatestBranchIndex = 0; 1152 1153 // Otherwise, force a switch statement and thread everything through 1154 // the switch. 1155 } else { 1156 CreateCleanupSwitch(*this, Exit); 1157 for (unsigned I = 0, E = Fixups.size(); I != E; ++I) 1158 ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit); 1159 } 1160 1161 // Emit the fallthrough destination block if necessary. 1162 if (Cur.Destination) 1163 EmitBlock(Cur.Destination); 1164 1165 // We're finally done with the cleanup. 1166 EHStack.popCleanup(); 1167 } 1168 1169 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 1170 if (!HaveInsertPoint()) 1171 return; 1172 1173 // Create the branch. 1174 llvm::BranchInst *BI = Builder.CreateBr(Dest.Block); 1175 1176 // If we're not in a cleanup scope, we don't need to worry about 1177 // fixups. 1178 if (!EHStack.hasNormalCleanups()) { 1179 Builder.ClearInsertionPoint(); 1180 return; 1181 } 1182 1183 // Initialize a fixup. 1184 BranchFixup Fixup; 1185 Fixup.Destination = Dest.Block; 1186 Fixup.Origin = BI; 1187 Fixup.LatestBranch = BI; 1188 Fixup.LatestBranchIndex = 0; 1189 1190 // If we can't resolve the destination cleanup scope, just add this 1191 // to the current cleanup scope. 1192 if (!Dest.ScopeDepth.isValid()) { 1193 EHStack.addBranchFixup() = Fixup; 1194 Builder.ClearInsertionPoint(); 1195 return; 1196 } 1197 1198 for (EHScopeStack::iterator I = EHStack.begin(), 1199 E = EHStack.find(Dest.ScopeDepth); I != E; ++I) { 1200 if (isa<EHCleanupScope>(*I)) { 1201 EHCleanupScope &Scope = cast<EHCleanupScope>(*I); 1202 if (Scope.isNormalCleanup()) 1203 ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(), 1204 Scope.getNormalExit()); 1205 } else if (isa<EHLazyCleanupScope>(*I)) { 1206 EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I); 1207 if (Scope.isNormalCleanup()) { 1208 llvm::BasicBlock *Block = Scope.getNormalBlock(); 1209 if (!Block) { 1210 Block = createBasicBlock("cleanup"); 1211 Scope.setNormalBlock(Block); 1212 } 1213 ThreadFixupThroughCleanup(*this, Fixup, Block, Block); 1214 } 1215 } 1216 } 1217 1218 Builder.ClearInsertionPoint(); 1219 } 1220 1221 void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) { 1222 if (!HaveInsertPoint()) 1223 return; 1224 1225 // Create the branch. 1226 llvm::BranchInst *BI = Builder.CreateBr(Dest.Block); 1227 1228 // If we're not in a cleanup scope, we don't need to worry about 1229 // fixups. 1230 if (!EHStack.hasEHCleanups()) { 1231 Builder.ClearInsertionPoint(); 1232 return; 1233 } 1234 1235 // Initialize a fixup. 1236 BranchFixup Fixup; 1237 Fixup.Destination = Dest.Block; 1238 Fixup.Origin = BI; 1239 Fixup.LatestBranch = BI; 1240 Fixup.LatestBranchIndex = 0; 1241 1242 // We should never get invalid scope depths for these: invalid scope 1243 // depths only arise for as-yet-unemitted labels, and we can't do an 1244 // EH-unwind to one of those. 1245 assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?"); 1246 1247 for (EHScopeStack::iterator I = EHStack.begin(), 1248 E = EHStack.find(Dest.ScopeDepth); I != E; ++I) { 1249 if (isa<EHCleanupScope>(*I)) { 1250 EHCleanupScope &Scope = cast<EHCleanupScope>(*I); 1251 if (Scope.isEHCleanup()) 1252 ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(), 1253 Scope.getEHExit()); 1254 } else if (isa<EHLazyCleanupScope>(*I)) { 1255 EHLazyCleanupScope &Scope = cast<EHLazyCleanupScope>(*I); 1256 if (Scope.isEHCleanup()) { 1257 llvm::BasicBlock *Block = Scope.getEHBlock(); 1258 if (!Block) { 1259 Block = createBasicBlock("eh.cleanup"); 1260 Scope.setEHBlock(Block); 1261 } 1262 ThreadFixupThroughCleanup(*this, Fixup, Block, Block); 1263 } 1264 } 1265 } 1266 1267 Builder.ClearInsertionPoint(); 1268 } 1269