1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This coordinates the per-function state used while generating code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CodeGenFunction.h" 15 #include "CodeGenModule.h" 16 #include "CGDebugInfo.h" 17 #include "CGException.h" 18 #include "clang/Basic/TargetInfo.h" 19 #include "clang/AST/APValue.h" 20 #include "clang/AST/ASTContext.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/StmtCXX.h" 24 #include "clang/Frontend/CodeGenOptions.h" 25 #include "llvm/Target/TargetData.h" 26 #include "llvm/Intrinsics.h" 27 using namespace clang; 28 using namespace CodeGen; 29 30 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) 31 : BlockFunction(cgm, *this, Builder), CGM(cgm), 32 Target(CGM.getContext().Target), 33 Builder(cgm.getModule().getContext()), 34 ExceptionSlot(0), DebugInfo(0), IndirectBranch(0), 35 SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0), 36 DidCallStackSave(false), UnreachableBlock(0), 37 CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0), 38 ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0), 39 TrapBB(0) { 40 41 // Get some frequently used types. 42 LLVMPointerWidth = Target.getPointerWidth(0); 43 llvm::LLVMContext &LLVMContext = CGM.getLLVMContext(); 44 IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth); 45 Int32Ty = llvm::Type::getInt32Ty(LLVMContext); 46 Int64Ty = llvm::Type::getInt64Ty(LLVMContext); 47 48 Exceptions = getContext().getLangOptions().Exceptions; 49 CatchUndefined = getContext().getLangOptions().CatchUndefined; 50 CGM.getMangleContext().startNewFunction(); 51 } 52 53 ASTContext &CodeGenFunction::getContext() const { 54 return CGM.getContext(); 55 } 56 57 58 llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) { 59 llvm::Value *Res = LocalDeclMap[VD]; 60 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!"); 61 return Res; 62 } 63 64 llvm::Constant * 65 CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) { 66 return cast<llvm::Constant>(GetAddrOfLocalVar(BVD)); 67 } 68 69 const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 70 return CGM.getTypes().ConvertTypeForMem(T); 71 } 72 73 const llvm::Type *CodeGenFunction::ConvertType(QualType T) { 74 return CGM.getTypes().ConvertType(T); 75 } 76 77 bool CodeGenFunction::hasAggregateLLVMType(QualType T) { 78 return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() || 79 T->isMemberFunctionPointerType(); 80 } 81 82 void CodeGenFunction::EmitReturnBlock() { 83 // For cleanliness, we try to avoid emitting the return block for 84 // simple cases. 85 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 86 87 if (CurBB) { 88 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 89 90 // We have a valid insert point, reuse it if it is empty or there are no 91 // explicit jumps to the return block. 92 if (CurBB->empty() || ReturnBlock.Block->use_empty()) { 93 ReturnBlock.Block->replaceAllUsesWith(CurBB); 94 delete ReturnBlock.Block; 95 } else 96 EmitBlock(ReturnBlock.Block); 97 return; 98 } 99 100 // Otherwise, if the return block is the target of a single direct 101 // branch then we can just put the code in that block instead. This 102 // cleans up functions which started with a unified return block. 103 if (ReturnBlock.Block->hasOneUse()) { 104 llvm::BranchInst *BI = 105 dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin()); 106 if (BI && BI->isUnconditional() && 107 BI->getSuccessor(0) == ReturnBlock.Block) { 108 // Reset insertion point and delete the branch. 109 Builder.SetInsertPoint(BI->getParent()); 110 BI->eraseFromParent(); 111 delete ReturnBlock.Block; 112 return; 113 } 114 } 115 116 // FIXME: We are at an unreachable point, there is no reason to emit the block 117 // unless it has uses. However, we still need a place to put the debug 118 // region.end for now. 119 120 EmitBlock(ReturnBlock.Block); 121 } 122 123 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 124 if (!BB) return; 125 if (!BB->use_empty()) 126 return CGF.CurFn->getBasicBlockList().push_back(BB); 127 delete BB; 128 } 129 130 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 131 assert(BreakContinueStack.empty() && 132 "mismatched push/pop in break/continue stack!"); 133 134 // Emit function epilog (to return). 135 EmitReturnBlock(); 136 137 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 138 139 // Emit debug descriptor for function end. 140 if (CGDebugInfo *DI = getDebugInfo()) { 141 DI->setLocation(EndLoc); 142 DI->EmitRegionEnd(CurFn, Builder); 143 } 144 145 EmitFunctionEpilog(*CurFnInfo); 146 EmitEndEHSpec(CurCodeDecl); 147 148 assert(EHStack.empty() && 149 "did not remove all scopes from cleanup stack!"); 150 151 // If someone did an indirect goto, emit the indirect goto block at the end of 152 // the function. 153 if (IndirectBranch) { 154 EmitBlock(IndirectBranch->getParent()); 155 Builder.ClearInsertionPoint(); 156 } 157 158 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 159 llvm::Instruction *Ptr = AllocaInsertPt; 160 AllocaInsertPt = 0; 161 Ptr->eraseFromParent(); 162 163 // If someone took the address of a label but never did an indirect goto, we 164 // made a zero entry PHI node, which is illegal, zap it now. 165 if (IndirectBranch) { 166 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 167 if (PN->getNumIncomingValues() == 0) { 168 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 169 PN->eraseFromParent(); 170 } 171 } 172 173 EmitIfUsed(*this, TerminateLandingPad); 174 EmitIfUsed(*this, TerminateHandler); 175 EmitIfUsed(*this, UnreachableBlock); 176 177 if (CGM.getCodeGenOpts().EmitDeclMetadata) 178 EmitDeclMetadata(); 179 } 180 181 /// ShouldInstrumentFunction - Return true if the current function should be 182 /// instrumented with __cyg_profile_func_* calls 183 bool CodeGenFunction::ShouldInstrumentFunction() { 184 if (!CGM.getCodeGenOpts().InstrumentFunctions) 185 return false; 186 if (CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 187 return false; 188 return true; 189 } 190 191 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified 192 /// instrumentation function with the current function and the call site, if 193 /// function instrumentation is enabled. 194 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 195 if (!ShouldInstrumentFunction()) 196 return; 197 198 const llvm::PointerType *PointerTy; 199 const llvm::FunctionType *FunctionTy; 200 std::vector<const llvm::Type*> ProfileFuncArgs; 201 202 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 203 PointerTy = llvm::Type::getInt8PtrTy(VMContext); 204 ProfileFuncArgs.push_back(PointerTy); 205 ProfileFuncArgs.push_back(PointerTy); 206 FunctionTy = llvm::FunctionType::get( 207 llvm::Type::getVoidTy(VMContext), 208 ProfileFuncArgs, false); 209 210 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 211 llvm::CallInst *CallSite = Builder.CreateCall( 212 CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0), 213 llvm::ConstantInt::get(Int32Ty, 0), 214 "callsite"); 215 216 Builder.CreateCall2(F, 217 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 218 CallSite); 219 } 220 221 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, 222 llvm::Function *Fn, 223 const FunctionArgList &Args, 224 SourceLocation StartLoc) { 225 const Decl *D = GD.getDecl(); 226 227 DidCallStackSave = false; 228 CurCodeDecl = CurFuncDecl = D; 229 FnRetTy = RetTy; 230 CurFn = Fn; 231 assert(CurFn->isDeclaration() && "Function already has body?"); 232 233 // Pass inline keyword to optimizer if it appears explicitly on any 234 // declaration. 235 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 236 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(), 237 RE = FD->redecls_end(); RI != RE; ++RI) 238 if (RI->isInlineSpecified()) { 239 Fn->addFnAttr(llvm::Attribute::InlineHint); 240 break; 241 } 242 243 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 244 245 // Create a marker to make it easy to insert allocas into the entryblock 246 // later. Don't create this with the builder, because we don't want it 247 // folded. 248 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 249 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 250 if (Builder.isNamePreserving()) 251 AllocaInsertPt->setName("allocapt"); 252 253 ReturnBlock = getJumpDestInCurrentScope("return"); 254 255 Builder.SetInsertPoint(EntryBB); 256 257 QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0, 258 false, false, 0, 0, 259 /*FIXME?*/ 260 FunctionType::ExtInfo()); 261 262 // Emit subprogram debug descriptor. 263 if (CGDebugInfo *DI = getDebugInfo()) { 264 DI->setLocation(StartLoc); 265 DI->EmitFunctionStart(GD, FnType, CurFn, Builder); 266 } 267 268 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 269 270 // FIXME: Leaked. 271 // CC info is ignored, hopefully? 272 CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args, 273 FunctionType::ExtInfo()); 274 275 if (RetTy->isVoidType()) { 276 // Void type; nothing to return. 277 ReturnValue = 0; 278 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 279 hasAggregateLLVMType(CurFnInfo->getReturnType())) { 280 // Indirect aggregate return; emit returned value directly into sret slot. 281 // This reduces code size, and affects correctness in C++. 282 ReturnValue = CurFn->arg_begin(); 283 } else { 284 ReturnValue = CreateIRTemp(RetTy, "retval"); 285 } 286 287 EmitStartEHSpec(CurCodeDecl); 288 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 289 290 if (CXXThisDecl) 291 CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this"); 292 if (CXXVTTDecl) 293 CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt"); 294 295 // If any of the arguments have a variably modified type, make sure to 296 // emit the type size. 297 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 298 i != e; ++i) { 299 QualType Ty = i->second; 300 301 if (Ty->isVariablyModifiedType()) 302 EmitVLASize(Ty); 303 } 304 } 305 306 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) { 307 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl()); 308 assert(FD->getBody()); 309 EmitStmt(FD->getBody()); 310 } 311 312 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) { 313 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 314 315 // Check if we should generate debug info for this function. 316 if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>()) 317 DebugInfo = CGM.getDebugInfo(); 318 319 FunctionArgList Args; 320 321 CurGD = GD; 322 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { 323 if (MD->isInstance()) { 324 // Create the implicit 'this' decl. 325 // FIXME: I'm not entirely sure I like using a fake decl just for code 326 // generation. Maybe we can come up with a better way? 327 CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, 328 FD->getLocation(), 329 &getContext().Idents.get("this"), 330 MD->getThisType(getContext())); 331 Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType())); 332 333 // Check if we need a VTT parameter as well. 334 if (CodeGenVTables::needsVTTParameter(GD)) { 335 // FIXME: The comment about using a fake decl above applies here too. 336 QualType T = getContext().getPointerType(getContext().VoidPtrTy); 337 CXXVTTDecl = 338 ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(), 339 &getContext().Idents.get("vtt"), T); 340 Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType())); 341 } 342 } 343 } 344 345 if (FD->getNumParams()) { 346 const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>(); 347 assert(FProto && "Function def must have prototype!"); 348 349 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) 350 Args.push_back(std::make_pair(FD->getParamDecl(i), 351 FProto->getArgType(i))); 352 } 353 354 SourceRange BodyRange; 355 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 356 357 // Emit the standard function prologue. 358 StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin()); 359 360 // Generate the body of the function. 361 if (isa<CXXDestructorDecl>(FD)) 362 EmitDestructorBody(Args); 363 else if (isa<CXXConstructorDecl>(FD)) 364 EmitConstructorBody(Args); 365 else 366 EmitFunctionBody(Args); 367 368 // Emit the standard function epilogue. 369 FinishFunction(BodyRange.getEnd()); 370 371 // Destroy the 'this' declaration. 372 if (CXXThisDecl) 373 CXXThisDecl->Destroy(getContext()); 374 375 // Destroy the VTT declaration. 376 if (CXXVTTDecl) 377 CXXVTTDecl->Destroy(getContext()); 378 } 379 380 /// ContainsLabel - Return true if the statement contains a label in it. If 381 /// this statement is not executed normally, it not containing a label means 382 /// that we can just remove the code. 383 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 384 // Null statement, not a label! 385 if (S == 0) return false; 386 387 // If this is a label, we have to emit the code, consider something like: 388 // if (0) { ... foo: bar(); } goto foo; 389 if (isa<LabelStmt>(S)) 390 return true; 391 392 // If this is a case/default statement, and we haven't seen a switch, we have 393 // to emit the code. 394 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 395 return true; 396 397 // If this is a switch statement, we want to ignore cases below it. 398 if (isa<SwitchStmt>(S)) 399 IgnoreCaseStmts = true; 400 401 // Scan subexpressions for verboten labels. 402 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end(); 403 I != E; ++I) 404 if (ContainsLabel(*I, IgnoreCaseStmts)) 405 return true; 406 407 return false; 408 } 409 410 411 /// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to 412 /// a constant, or if it does but contains a label, return 0. If it constant 413 /// folds to 'true' and does not contain a label, return 1, if it constant folds 414 /// to 'false' and does not contain a label, return -1. 415 int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) { 416 // FIXME: Rename and handle conversion of other evaluatable things 417 // to bool. 418 Expr::EvalResult Result; 419 if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() || 420 Result.HasSideEffects) 421 return 0; // Not foldable, not integer or not fully evaluatable. 422 423 if (CodeGenFunction::ContainsLabel(Cond)) 424 return 0; // Contains a label. 425 426 return Result.Val.getInt().getBoolValue() ? 1 : -1; 427 } 428 429 430 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 431 /// statement) to the specified blocks. Based on the condition, this might try 432 /// to simplify the codegen of the conditional based on the branch. 433 /// 434 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 435 llvm::BasicBlock *TrueBlock, 436 llvm::BasicBlock *FalseBlock) { 437 if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) 438 return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock); 439 440 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 441 // Handle X && Y in a condition. 442 if (CondBOp->getOpcode() == BinaryOperator::LAnd) { 443 // If we have "1 && X", simplify the code. "0 && X" would have constant 444 // folded if the case was simple enough. 445 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) { 446 // br(1 && X) -> br(X). 447 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 448 } 449 450 // If we have "X && 1", simplify the code to use an uncond branch. 451 // "X && 0" would have been constant folded to 0. 452 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) { 453 // br(X && 1) -> br(X). 454 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 455 } 456 457 // Emit the LHS as a conditional. If the LHS conditional is false, we 458 // want to jump to the FalseBlock. 459 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 460 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock); 461 EmitBlock(LHSTrue); 462 463 // Any temporaries created here are conditional. 464 BeginConditionalBranch(); 465 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 466 EndConditionalBranch(); 467 468 return; 469 } else if (CondBOp->getOpcode() == BinaryOperator::LOr) { 470 // If we have "0 || X", simplify the code. "1 || X" would have constant 471 // folded if the case was simple enough. 472 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) { 473 // br(0 || X) -> br(X). 474 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 475 } 476 477 // If we have "X || 0", simplify the code to use an uncond branch. 478 // "X || 1" would have been constant folded to 1. 479 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) { 480 // br(X || 0) -> br(X). 481 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock); 482 } 483 484 // Emit the LHS as a conditional. If the LHS conditional is true, we 485 // want to jump to the TrueBlock. 486 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 487 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse); 488 EmitBlock(LHSFalse); 489 490 // Any temporaries created here are conditional. 491 BeginConditionalBranch(); 492 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock); 493 EndConditionalBranch(); 494 495 return; 496 } 497 } 498 499 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 500 // br(!x, t, f) -> br(x, f, t) 501 if (CondUOp->getOpcode() == UnaryOperator::LNot) 502 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock); 503 } 504 505 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 506 // Handle ?: operator. 507 508 // Just ignore GNU ?: extension. 509 if (CondOp->getLHS()) { 510 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 511 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 512 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 513 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock); 514 EmitBlock(LHSBlock); 515 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock); 516 EmitBlock(RHSBlock); 517 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock); 518 return; 519 } 520 } 521 522 // Emit the code with the fully general case. 523 llvm::Value *CondV = EvaluateExprAsBool(Cond); 524 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock); 525 } 526 527 /// ErrorUnsupported - Print out an error that codegen doesn't support the 528 /// specified stmt yet. 529 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type, 530 bool OmitOnError) { 531 CGM.ErrorUnsupported(S, Type, OmitOnError); 532 } 533 534 void 535 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 536 // If the type contains a pointer to data member we can't memset it to zero. 537 // Instead, create a null constant and copy it to the destination. 538 if (CGM.getTypes().ContainsPointerToDataMember(Ty)) { 539 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 540 541 llvm::GlobalVariable *NullVariable = 542 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 543 /*isConstant=*/true, 544 llvm::GlobalVariable::PrivateLinkage, 545 NullConstant, llvm::Twine()); 546 EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false); 547 return; 548 } 549 550 551 // Ignore empty classes in C++. 552 if (getContext().getLangOptions().CPlusPlus) { 553 if (const RecordType *RT = Ty->getAs<RecordType>()) { 554 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 555 return; 556 } 557 } 558 559 // Otherwise, just memset the whole thing to zero. This is legal 560 // because in LLVM, all default initializers (other than the ones we just 561 // handled above) are guaranteed to have a bit pattern of all zeros. 562 const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext); 563 if (DestPtr->getType() != BP) 564 DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); 565 566 // Get size and alignment info for this aggregate. 567 std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty); 568 569 // Don't bother emitting a zero-byte memset. 570 if (TypeInfo.first == 0) 571 return; 572 573 // FIXME: Handle variable sized types. 574 Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr, 575 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), 576 // TypeInfo.first describes size in bits. 577 llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), 578 llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8), 579 llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 580 0)); 581 } 582 583 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) { 584 // Make sure that there is a block for the indirect goto. 585 if (IndirectBranch == 0) 586 GetIndirectGotoBlock(); 587 588 llvm::BasicBlock *BB = getJumpDestForLabel(L).Block; 589 590 // Make sure the indirect branch includes all of the address-taken blocks. 591 IndirectBranch->addDestination(BB); 592 return llvm::BlockAddress::get(CurFn, BB); 593 } 594 595 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 596 // If we already made the indirect branch for indirect goto, return its block. 597 if (IndirectBranch) return IndirectBranch->getParent(); 598 599 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 600 601 const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext); 602 603 // Create the PHI node that indirect gotos will add entries to. 604 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest"); 605 606 // Create the indirect branch instruction. 607 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 608 return IndirectBranch->getParent(); 609 } 610 611 llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) { 612 llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; 613 614 assert(SizeEntry && "Did not emit size for type"); 615 return SizeEntry; 616 } 617 618 llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) { 619 assert(Ty->isVariablyModifiedType() && 620 "Must pass variably modified type to EmitVLASizes!"); 621 622 EnsureInsertPoint(); 623 624 if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) { 625 llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()]; 626 627 if (!SizeEntry) { 628 const llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 629 630 // Get the element size; 631 QualType ElemTy = VAT->getElementType(); 632 llvm::Value *ElemSize; 633 if (ElemTy->isVariableArrayType()) 634 ElemSize = EmitVLASize(ElemTy); 635 else 636 ElemSize = llvm::ConstantInt::get(SizeTy, 637 getContext().getTypeSizeInChars(ElemTy).getQuantity()); 638 639 llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr()); 640 NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp"); 641 642 SizeEntry = Builder.CreateMul(ElemSize, NumElements); 643 } 644 645 return SizeEntry; 646 } 647 648 if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 649 EmitVLASize(AT->getElementType()); 650 return 0; 651 } 652 653 const PointerType *PT = Ty->getAs<PointerType>(); 654 assert(PT && "unknown VM type!"); 655 EmitVLASize(PT->getPointeeType()); 656 return 0; 657 } 658 659 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 660 if (CGM.getContext().getBuiltinVaListType()->isArrayType()) 661 return EmitScalarExpr(E); 662 return EmitLValue(E).getAddress(); 663 } 664 665 /// Pops cleanup blocks until the given savepoint is reached. 666 void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { 667 assert(Old.isValid()); 668 669 EHScopeStack::iterator E = EHStack.find(Old); 670 while (EHStack.begin() != E) 671 PopCleanupBlock(); 672 } 673 674 /// Destroys a cleanup if it was unused. 675 static void DestroyCleanup(CodeGenFunction &CGF, 676 llvm::BasicBlock *Entry, 677 llvm::BasicBlock *Exit) { 678 assert(Entry->use_empty() && "destroying cleanup with uses!"); 679 assert(Exit->getTerminator() == 0 && 680 "exit has terminator but entry has no predecessors!"); 681 682 // This doesn't always remove the entire cleanup, but it's much 683 // safer as long as we don't know what blocks belong to the cleanup. 684 // A *much* better approach if we care about this inefficiency would 685 // be to lazily emit the cleanup. 686 687 // If the exit block is distinct from the entry, give it a branch to 688 // an unreachable destination. This preserves the well-formedness 689 // of the IR. 690 if (Entry != Exit) 691 llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit); 692 693 assert(!Entry->getParent() && "cleanup entry already positioned?"); 694 // We can't just delete the entry; we have to kill any references to 695 // its instructions in other blocks. 696 for (llvm::BasicBlock::iterator I = Entry->begin(), E = Entry->end(); 697 I != E; ++I) 698 if (!I->use_empty()) 699 I->replaceAllUsesWith(llvm::UndefValue::get(I->getType())); 700 delete Entry; 701 } 702 703 /// Creates a switch instruction to thread branches out of the given 704 /// block (which is the exit block of a cleanup). 705 static void CreateCleanupSwitch(CodeGenFunction &CGF, 706 llvm::BasicBlock *Block) { 707 if (Block->getTerminator()) { 708 assert(isa<llvm::SwitchInst>(Block->getTerminator()) && 709 "cleanup block already has a terminator, but it isn't a switch"); 710 return; 711 } 712 713 llvm::Value *DestCodePtr 714 = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst"); 715 CGBuilderTy Builder(Block); 716 llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); 717 718 // Create a switch instruction to determine where to jump next. 719 Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock()); 720 } 721 722 /// Attempts to reduce a cleanup's entry block to a fallthrough. This 723 /// is basically llvm::MergeBlockIntoPredecessor, except 724 /// simplified/optimized for the tighter constraints on cleanup 725 /// blocks. 726 static void SimplifyCleanupEntry(CodeGenFunction &CGF, 727 llvm::BasicBlock *Entry) { 728 llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); 729 if (!Pred) return; 730 731 llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); 732 if (!Br || Br->isConditional()) return; 733 assert(Br->getSuccessor(0) == Entry); 734 735 // If we were previously inserting at the end of the cleanup entry 736 // block, we'll need to continue inserting at the end of the 737 // predecessor. 738 bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; 739 assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); 740 741 // Kill the branch. 742 Br->eraseFromParent(); 743 744 // Merge the blocks. 745 Pred->getInstList().splice(Pred->end(), Entry->getInstList()); 746 747 // Kill the entry block. 748 Entry->eraseFromParent(); 749 750 if (WasInsertBlock) 751 CGF.Builder.SetInsertPoint(Pred); 752 } 753 754 /// Attempts to reduce an cleanup's exit switch to an unconditional 755 /// branch. 756 static void SimplifyCleanupExit(llvm::BasicBlock *Exit) { 757 llvm::TerminatorInst *Terminator = Exit->getTerminator(); 758 assert(Terminator && "completed cleanup exit has no terminator"); 759 760 llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator); 761 if (!Switch) return; 762 if (Switch->getNumCases() != 2) return; // default + 1 763 764 llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition()); 765 llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand()); 766 767 // Replace the switch instruction with an unconditional branch. 768 llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0 769 Switch->eraseFromParent(); 770 llvm::BranchInst::Create(Dest, Exit); 771 772 // Delete all uses of the condition variable. 773 Cond->eraseFromParent(); 774 while (!CondVar->use_empty()) 775 cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent(); 776 777 // Delete the condition variable itself. 778 CondVar->eraseFromParent(); 779 } 780 781 /// Threads a branch fixup through a cleanup block. 782 static void ThreadFixupThroughCleanup(CodeGenFunction &CGF, 783 BranchFixup &Fixup, 784 llvm::BasicBlock *Entry, 785 llvm::BasicBlock *Exit) { 786 if (!Exit->getTerminator()) 787 CreateCleanupSwitch(CGF, Exit); 788 789 // Find the switch and its destination index alloca. 790 llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator()); 791 llvm::Value *DestCodePtr = 792 cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand(); 793 794 // Compute the index of the new case we're adding to the switch. 795 unsigned Index = Switch->getNumCases(); 796 797 const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext()); 798 llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index); 799 800 // Set the index in the origin block. 801 new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin); 802 803 // Add a case to the switch. 804 Switch->addCase(IndexV, Fixup.Destination); 805 806 // Change the last branch to point to the cleanup entry block. 807 Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry); 808 809 // And finally, update the fixup. 810 Fixup.LatestBranch = Switch; 811 Fixup.LatestBranchIndex = Index; 812 } 813 814 /// Try to simplify both the entry and exit edges of a cleanup. 815 static void SimplifyCleanupEdges(CodeGenFunction &CGF, 816 llvm::BasicBlock *Entry, 817 llvm::BasicBlock *Exit) { 818 819 // Given their current implementations, it's important to run these 820 // in this order: SimplifyCleanupEntry will delete Entry if it can 821 // be merged into its predecessor, which will then break 822 // SimplifyCleanupExit if (as is common) Entry == Exit. 823 824 SimplifyCleanupExit(Exit); 825 SimplifyCleanupEntry(CGF, Entry); 826 } 827 828 /// Pops a cleanup block. If the block includes a normal cleanup, the 829 /// current insertion point is threaded through the cleanup, as are 830 /// any branch fixups on the cleanup. 831 void CodeGenFunction::PopCleanupBlock() { 832 assert(!EHStack.empty() && "cleanup stack is empty!"); 833 assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); 834 EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); 835 assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); 836 837 // Handle the EH cleanup if (1) there is one and (2) it's different 838 // from the normal cleanup. 839 if (Scope.isEHCleanup() && 840 Scope.getEHEntry() != Scope.getNormalEntry()) { 841 llvm::BasicBlock *EHEntry = Scope.getEHEntry(); 842 llvm::BasicBlock *EHExit = Scope.getEHExit(); 843 844 if (EHEntry->use_empty()) { 845 DestroyCleanup(*this, EHEntry, EHExit); 846 } else { 847 // TODO: this isn't really the ideal location to put this EH 848 // cleanup, but lazy emission is a better solution than trying 849 // to pick a better spot. 850 CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); 851 EmitBlock(EHEntry); 852 Builder.restoreIP(SavedIP); 853 854 SimplifyCleanupEdges(*this, EHEntry, EHExit); 855 } 856 } 857 858 // If we only have an EH cleanup, we don't really need to do much 859 // here. Branch fixups just naturally drop down to the enclosing 860 // cleanup scope. 861 if (!Scope.isNormalCleanup()) { 862 EHStack.popCleanup(); 863 assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups()); 864 return; 865 } 866 867 // Check whether the scope has any fixups that need to be threaded. 868 unsigned FixupDepth = Scope.getFixupDepth(); 869 bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; 870 871 // Grab the entry and exit blocks. 872 llvm::BasicBlock *Entry = Scope.getNormalEntry(); 873 llvm::BasicBlock *Exit = Scope.getNormalExit(); 874 875 // Check whether anything's been threaded through the cleanup already. 876 assert((Exit->getTerminator() == 0) == Entry->use_empty() && 877 "cleanup entry/exit mismatch"); 878 bool HasExistingBranches = !Entry->use_empty(); 879 880 // Check whether we need to emit a "fallthrough" branch through the 881 // cleanup for the current insertion point. 882 llvm::BasicBlock *FallThrough = Builder.GetInsertBlock(); 883 if (FallThrough && FallThrough->getTerminator()) 884 FallThrough = 0; 885 886 // If *nothing* is using the cleanup, kill it. 887 if (!FallThrough && !HasFixups && !HasExistingBranches) { 888 EHStack.popCleanup(); 889 DestroyCleanup(*this, Entry, Exit); 890 return; 891 } 892 893 // Otherwise, add the block to the function. 894 EmitBlock(Entry); 895 896 if (FallThrough) 897 Builder.SetInsertPoint(Exit); 898 else 899 Builder.ClearInsertionPoint(); 900 901 // Fast case: if we don't have to add any fixups, and either 902 // we don't have a fallthrough or the cleanup wasn't previously 903 // used, then the setup above is sufficient. 904 if (!HasFixups) { 905 if (!FallThrough) { 906 assert(HasExistingBranches && "no reason for cleanup but didn't kill before"); 907 EHStack.popCleanup(); 908 SimplifyCleanupEdges(*this, Entry, Exit); 909 return; 910 } else if (!HasExistingBranches) { 911 assert(FallThrough && "no reason for cleanup but didn't kill before"); 912 // We can't simplify the exit edge in this case because we're 913 // already inserting at the end of the exit block. 914 EHStack.popCleanup(); 915 SimplifyCleanupEntry(*this, Entry); 916 return; 917 } 918 } 919 920 // Otherwise we're going to have to thread things through the cleanup. 921 llvm::SmallVector<BranchFixup*, 8> Fixups; 922 923 // Synthesize a fixup for the current insertion point. 924 BranchFixup Cur; 925 if (FallThrough) { 926 Cur.Destination = createBasicBlock("cleanup.cont"); 927 Cur.LatestBranch = FallThrough->getTerminator(); 928 Cur.LatestBranchIndex = 0; 929 Cur.Origin = Cur.LatestBranch; 930 931 // Restore fixup invariant. EmitBlock added a branch to the cleanup 932 // which we need to redirect to the destination. 933 cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination); 934 935 Fixups.push_back(&Cur); 936 } else { 937 Cur.Destination = 0; 938 } 939 940 // Collect any "real" fixups we need to thread. 941 for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); 942 I != E; ++I) 943 if (EHStack.getBranchFixup(I).Destination) 944 Fixups.push_back(&EHStack.getBranchFixup(I)); 945 946 assert(!Fixups.empty() && "no fixups, invariants broken!"); 947 948 // If there's only a single fixup to thread through, do so with 949 // unconditional branches. This only happens if there's a single 950 // branch and no fallthrough. 951 if (Fixups.size() == 1 && !HasExistingBranches) { 952 Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry); 953 llvm::BranchInst *Br = 954 llvm::BranchInst::Create(Fixups[0]->Destination, Exit); 955 Fixups[0]->LatestBranch = Br; 956 Fixups[0]->LatestBranchIndex = 0; 957 958 // Otherwise, force a switch statement and thread everything through 959 // the switch. 960 } else { 961 CreateCleanupSwitch(*this, Exit); 962 for (unsigned I = 0, E = Fixups.size(); I != E; ++I) 963 ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit); 964 } 965 966 // Emit the fallthrough destination block if necessary. 967 if (Cur.Destination) 968 EmitBlock(Cur.Destination); 969 970 // We're finally done with the cleanup. 971 EHStack.popCleanup(); 972 } 973 974 void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { 975 if (!HaveInsertPoint()) 976 return; 977 978 // Create the branch. 979 llvm::BranchInst *BI = Builder.CreateBr(Dest.Block); 980 981 // If we're not in a cleanup scope, we don't need to worry about 982 // fixups. 983 if (!EHStack.hasNormalCleanups()) { 984 Builder.ClearInsertionPoint(); 985 return; 986 } 987 988 // Initialize a fixup. 989 BranchFixup Fixup; 990 Fixup.Destination = Dest.Block; 991 Fixup.Origin = BI; 992 Fixup.LatestBranch = BI; 993 Fixup.LatestBranchIndex = 0; 994 995 // If we can't resolve the destination cleanup scope, just add this 996 // to the current cleanup scope. 997 if (!Dest.ScopeDepth.isValid()) { 998 EHStack.addBranchFixup() = Fixup; 999 Builder.ClearInsertionPoint(); 1000 return; 1001 } 1002 1003 for (EHScopeStack::iterator I = EHStack.begin(), 1004 E = EHStack.find(Dest.ScopeDepth); I != E; ++I) { 1005 if (isa<EHCleanupScope>(*I)) { 1006 EHCleanupScope &Scope = cast<EHCleanupScope>(*I); 1007 if (Scope.isNormalCleanup()) 1008 ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(), 1009 Scope.getNormalExit()); 1010 } 1011 } 1012 1013 Builder.ClearInsertionPoint(); 1014 } 1015 1016 void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) { 1017 if (!HaveInsertPoint()) 1018 return; 1019 1020 // Create the branch. 1021 llvm::BranchInst *BI = Builder.CreateBr(Dest.Block); 1022 1023 // If we're not in a cleanup scope, we don't need to worry about 1024 // fixups. 1025 if (!EHStack.hasEHCleanups()) { 1026 Builder.ClearInsertionPoint(); 1027 return; 1028 } 1029 1030 // Initialize a fixup. 1031 BranchFixup Fixup; 1032 Fixup.Destination = Dest.Block; 1033 Fixup.Origin = BI; 1034 Fixup.LatestBranch = BI; 1035 Fixup.LatestBranchIndex = 0; 1036 1037 // We should never get invalid scope depths for these: invalid scope 1038 // depths only arise for as-yet-unemitted labels, and we can't do an 1039 // EH-unwind to one of those. 1040 assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?"); 1041 1042 for (EHScopeStack::iterator I = EHStack.begin(), 1043 E = EHStack.find(Dest.ScopeDepth); I != E; ++I) { 1044 if (isa<EHCleanupScope>(*I)) { 1045 EHCleanupScope &Scope = cast<EHCleanupScope>(*I); 1046 if (Scope.isEHCleanup()) 1047 ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(), 1048 Scope.getEHExit()); 1049 } 1050 } 1051 1052 Builder.ClearInsertionPoint(); 1053 } 1054