1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/Constants.h" 17 #include "llvm/DerivedTypes.h" 18 #include "llvm/Module.h" 19 #include "llvm/Instructions.h" 20 #include "llvm/IntrinsicInst.h" 21 #include "llvm/Intrinsics.h" 22 #include "llvm/Attributes.h" 23 #include "llvm/Analysis/CallGraph.h" 24 #include "llvm/Analysis/DebugInfo.h" 25 #include "llvm/Target/TargetData.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/Support/CallSite.h" 29 using namespace llvm; 30 31 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) { 32 return InlineFunction(CallSite(CI), IFI); 33 } 34 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) { 35 return InlineFunction(CallSite(II), IFI); 36 } 37 38 39 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into 40 /// an invoke, we have to turn all of the calls that can throw into 41 /// invokes. This function analyze BB to see if there are any calls, and if so, 42 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 43 /// nodes in that block with the values specified in InvokeDestPHIValues. 44 /// 45 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, 46 BasicBlock *InvokeDest, 47 const SmallVectorImpl<Value*> &InvokeDestPHIValues) { 48 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 49 Instruction *I = BBI++; 50 51 // We only need to check for function calls: inlined invoke 52 // instructions require no special handling. 53 CallInst *CI = dyn_cast<CallInst>(I); 54 if (CI == 0) continue; 55 56 // If this call cannot unwind, don't convert it to an invoke. 57 if (CI->doesNotThrow()) 58 continue; 59 60 // Convert this function call into an invoke instruction. 61 // First, split the basic block. 62 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 63 64 // Next, create the new invoke instruction, inserting it at the end 65 // of the old basic block. 66 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end()); 67 InvokeInst *II = 68 InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest, 69 InvokeArgs.begin(), InvokeArgs.end(), 70 CI->getName(), BB->getTerminator()); 71 II->setCallingConv(CI->getCallingConv()); 72 II->setAttributes(CI->getAttributes()); 73 74 // Make sure that anything using the call now uses the invoke! This also 75 // updates the CallGraph if present, because it uses a WeakVH. 76 CI->replaceAllUsesWith(II); 77 78 // Delete the unconditional branch inserted by splitBasicBlock 79 BB->getInstList().pop_back(); 80 Split->getInstList().pop_front(); // Delete the original call 81 82 // Update any PHI nodes in the exceptional block to indicate that 83 // there is now a new entry in them. 84 unsigned i = 0; 85 for (BasicBlock::iterator I = InvokeDest->begin(); 86 isa<PHINode>(I); ++I, ++i) 87 cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB); 88 89 // This basic block is now complete, the caller will continue scanning the 90 // next one. 91 return; 92 } 93 } 94 95 96 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 97 /// in the body of the inlined function into invokes and turn unwind 98 /// instructions into branches to the invoke unwind dest. 99 /// 100 /// II is the invoke instruction being inlined. FirstNewBlock is the first 101 /// block of the inlined code (the last block is the end of the function), 102 /// and InlineCodeInfo is information about the code that got inlined. 103 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 104 ClonedCodeInfo &InlinedCodeInfo) { 105 BasicBlock *InvokeDest = II->getUnwindDest(); 106 SmallVector<Value*, 8> InvokeDestPHIValues; 107 108 // If there are PHI nodes in the unwind destination block, we need to 109 // keep track of which values came into them from this invoke, then remove 110 // the entry for this block. 111 BasicBlock *InvokeBlock = II->getParent(); 112 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) { 113 PHINode *PN = cast<PHINode>(I); 114 // Save the value to use for this edge. 115 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock)); 116 } 117 118 Function *Caller = FirstNewBlock->getParent(); 119 120 // The inlined code is currently at the end of the function, scan from the 121 // start of the inlined code to its end, checking for stuff we need to 122 // rewrite. If the code doesn't have calls or unwinds, we know there is 123 // nothing to rewrite. 124 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) { 125 // Now that everything is happy, we have one final detail. The PHI nodes in 126 // the exception destination block still have entries due to the original 127 // invoke instruction. Eliminate these entries (which might even delete the 128 // PHI node) now. 129 InvokeDest->removePredecessor(II->getParent()); 130 return; 131 } 132 133 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ 134 if (InlinedCodeInfo.ContainsCalls) 135 HandleCallsInBlockInlinedThroughInvoke(BB, InvokeDest, 136 InvokeDestPHIValues); 137 138 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 139 // An UnwindInst requires special handling when it gets inlined into an 140 // invoke site. Once this happens, we know that the unwind would cause 141 // a control transfer to the invoke exception destination, so we can 142 // transform it into a direct branch to the exception destination. 143 BranchInst::Create(InvokeDest, UI); 144 145 // Delete the unwind instruction! 146 UI->eraseFromParent(); 147 148 // Update any PHI nodes in the exceptional block to indicate that 149 // there is now a new entry in them. 150 unsigned i = 0; 151 for (BasicBlock::iterator I = InvokeDest->begin(); 152 isa<PHINode>(I); ++I, ++i) { 153 PHINode *PN = cast<PHINode>(I); 154 PN->addIncoming(InvokeDestPHIValues[i], BB); 155 } 156 } 157 } 158 159 // Now that everything is happy, we have one final detail. The PHI nodes in 160 // the exception destination block still have entries due to the original 161 // invoke instruction. Eliminate these entries (which might even delete the 162 // PHI node) now. 163 InvokeDest->removePredecessor(II->getParent()); 164 } 165 166 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 167 /// into the caller, update the specified callgraph to reflect the changes we 168 /// made. Note that it's possible that not all code was copied over, so only 169 /// some edges of the callgraph may remain. 170 static void UpdateCallGraphAfterInlining(CallSite CS, 171 Function::iterator FirstNewBlock, 172 DenseMap<const Value*, Value*> &ValueMap, 173 InlineFunctionInfo &IFI) { 174 CallGraph &CG = *IFI.CG; 175 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 176 const Function *Callee = CS.getCalledFunction(); 177 CallGraphNode *CalleeNode = CG[Callee]; 178 CallGraphNode *CallerNode = CG[Caller]; 179 180 // Since we inlined some uninlined call sites in the callee into the caller, 181 // add edges from the caller to all of the callees of the callee. 182 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 183 184 // Consider the case where CalleeNode == CallerNode. 185 CallGraphNode::CalledFunctionsVector CallCache; 186 if (CalleeNode == CallerNode) { 187 CallCache.assign(I, E); 188 I = CallCache.begin(); 189 E = CallCache.end(); 190 } 191 192 for (; I != E; ++I) { 193 const Value *OrigCall = I->first; 194 195 DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall); 196 // Only copy the edge if the call was inlined! 197 if (VMI == ValueMap.end() || VMI->second == 0) 198 continue; 199 200 // If the call was inlined, but then constant folded, there is no edge to 201 // add. Check for this case. 202 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 203 if (NewCall == 0) continue; 204 205 // Remember that this call site got inlined for the client of 206 // InlineFunction. 207 IFI.InlinedCalls.push_back(NewCall); 208 209 // It's possible that inlining the callsite will cause it to go from an 210 // indirect to a direct call by resolving a function pointer. If this 211 // happens, set the callee of the new call site to a more precise 212 // destination. This can also happen if the call graph node of the caller 213 // was just unnecessarily imprecise. 214 if (I->second->getFunction() == 0) 215 if (Function *F = CallSite(NewCall).getCalledFunction()) { 216 // Indirect call site resolved to direct call. 217 CallerNode->addCalledFunction(CallSite::get(NewCall), CG[F]); 218 219 continue; 220 } 221 222 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second); 223 } 224 225 // Update the call graph by deleting the edge from Callee to Caller. We must 226 // do this after the loop above in case Caller and Callee are the same. 227 CallerNode->removeCallEdgeFor(CS); 228 } 229 230 // InlineFunction - This function inlines the called function into the basic 231 // block of the caller. This returns false if it is not possible to inline this 232 // call. The program is still in a well defined state if this occurs though. 233 // 234 // Note that this only does one level of inlining. For example, if the 235 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 236 // exists in the instruction stream. Similiarly this will inline a recursive 237 // function by one level. 238 // 239 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) { 240 Instruction *TheCall = CS.getInstruction(); 241 LLVMContext &Context = TheCall->getContext(); 242 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 243 "Instruction not in function!"); 244 245 // If IFI has any state in it, zap it before we fill it in. 246 IFI.reset(); 247 248 const Function *CalledFunc = CS.getCalledFunction(); 249 if (CalledFunc == 0 || // Can't inline external function or indirect 250 CalledFunc->isDeclaration() || // call, or call to a vararg function! 251 CalledFunc->getFunctionType()->isVarArg()) return false; 252 253 254 // If the call to the callee is not a tail call, we must clear the 'tail' 255 // flags on any calls that we inline. 256 bool MustClearTailCallFlags = 257 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall()); 258 259 // If the call to the callee cannot throw, set the 'nounwind' flag on any 260 // calls that we inline. 261 bool MarkNoUnwind = CS.doesNotThrow(); 262 263 BasicBlock *OrigBB = TheCall->getParent(); 264 Function *Caller = OrigBB->getParent(); 265 266 // GC poses two hazards to inlining, which only occur when the callee has GC: 267 // 1. If the caller has no GC, then the callee's GC must be propagated to the 268 // caller. 269 // 2. If the caller has a differing GC, it is invalid to inline. 270 if (CalledFunc->hasGC()) { 271 if (!Caller->hasGC()) 272 Caller->setGC(CalledFunc->getGC()); 273 else if (CalledFunc->getGC() != Caller->getGC()) 274 return false; 275 } 276 277 // Get an iterator to the last basic block in the function, which will have 278 // the new function inlined after it. 279 // 280 Function::iterator LastBlock = &Caller->back(); 281 282 // Make sure to capture all of the return instructions from the cloned 283 // function. 284 SmallVector<ReturnInst*, 8> Returns; 285 ClonedCodeInfo InlinedFunctionInfo; 286 Function::iterator FirstNewBlock; 287 288 { // Scope to destroy ValueMap after cloning. 289 DenseMap<const Value*, Value*> ValueMap; 290 291 assert(CalledFunc->arg_size() == CS.arg_size() && 292 "No varargs calls can be inlined!"); 293 294 // Calculate the vector of arguments to pass into the function cloner, which 295 // matches up the formal to the actual argument values. 296 CallSite::arg_iterator AI = CS.arg_begin(); 297 unsigned ArgNo = 0; 298 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 299 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 300 Value *ActualArg = *AI; 301 302 // When byval arguments actually inlined, we need to make the copy implied 303 // by them explicit. However, we don't do this if the callee is readonly 304 // or readnone, because the copy would be unneeded: the callee doesn't 305 // modify the struct. 306 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) && 307 !CalledFunc->onlyReadsMemory()) { 308 const Type *AggTy = cast<PointerType>(I->getType())->getElementType(); 309 const Type *VoidPtrTy = 310 Type::getInt8PtrTy(Context); 311 312 // Create the alloca. If we have TargetData, use nice alignment. 313 unsigned Align = 1; 314 if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy); 315 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, 316 I->getName(), 317 &*Caller->begin()->begin()); 318 // Emit a memcpy. 319 const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)}; 320 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(), 321 Intrinsic::memcpy, 322 Tys, 3); 323 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall); 324 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall); 325 326 Value *Size; 327 if (IFI.TD == 0) 328 Size = ConstantExpr::getSizeOf(AggTy); 329 else 330 Size = ConstantInt::get(Type::getInt64Ty(Context), 331 IFI.TD->getTypeStoreSize(AggTy)); 332 333 // Always generate a memcpy of alignment 1 here because we don't know 334 // the alignment of the src pointer. Other optimizations can infer 335 // better alignment. 336 Value *CallArgs[] = { 337 DestCast, SrcCast, Size, 338 ConstantInt::get(Type::getInt32Ty(Context), 1), 339 ConstantInt::get(Type::getInt1Ty(Context), 0) 340 }; 341 CallInst *TheMemCpy = 342 CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall); 343 344 // If we have a call graph, update it. 345 if (CallGraph *CG = IFI.CG) { 346 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn); 347 CallGraphNode *CallerNode = (*CG)[Caller]; 348 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN); 349 } 350 351 // Uses of the argument in the function should use our new alloca 352 // instead. 353 ActualArg = NewAlloca; 354 355 // Calls that we inline may use the new alloca, so we need to clear 356 // their 'tail' flags. 357 MustClearTailCallFlags = true; 358 } 359 360 ValueMap[I] = ActualArg; 361 } 362 363 // We want the inliner to prune the code as it copies. We would LOVE to 364 // have no dead or constant instructions leftover after inlining occurs 365 // (which can happen, e.g., because an argument was constant), but we'll be 366 // happy with whatever the cloner can do. 367 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i", 368 &InlinedFunctionInfo, IFI.TD, TheCall); 369 370 // Remember the first block that is newly cloned over. 371 FirstNewBlock = LastBlock; ++FirstNewBlock; 372 373 // Update the callgraph if requested. 374 if (IFI.CG) 375 UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, IFI); 376 } 377 378 // If there are any alloca instructions in the block that used to be the entry 379 // block for the callee, move them to the entry block of the caller. First 380 // calculate which instruction they should be inserted before. We insert the 381 // instructions at the end of the current alloca list. 382 // 383 { 384 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 385 for (BasicBlock::iterator I = FirstNewBlock->begin(), 386 E = FirstNewBlock->end(); I != E; ) { 387 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 388 if (AI == 0) continue; 389 390 // If the alloca is now dead, remove it. This often occurs due to code 391 // specialization. 392 if (AI->use_empty()) { 393 AI->eraseFromParent(); 394 continue; 395 } 396 397 if (!isa<Constant>(AI->getArraySize())) 398 continue; 399 400 // Keep track of the static allocas that we inline into the caller if the 401 // StaticAllocas pointer is non-null. 402 IFI.StaticAllocas.push_back(AI); 403 404 // Scan for the block of allocas that we can move over, and move them 405 // all at once. 406 while (isa<AllocaInst>(I) && 407 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 408 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 409 ++I; 410 } 411 412 // Transfer all of the allocas over in a block. Using splice means 413 // that the instructions aren't removed from the symbol table, then 414 // reinserted. 415 Caller->getEntryBlock().getInstList().splice(InsertPoint, 416 FirstNewBlock->getInstList(), 417 AI, I); 418 } 419 } 420 421 // If the inlined code contained dynamic alloca instructions, wrap the inlined 422 // code with llvm.stacksave/llvm.stackrestore intrinsics. 423 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 424 Module *M = Caller->getParent(); 425 // Get the two intrinsics we care about. 426 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 427 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 428 429 // If we are preserving the callgraph, add edges to the stacksave/restore 430 // functions for the calls we insert. 431 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0; 432 if (CallGraph *CG = IFI.CG) { 433 StackSaveCGN = CG->getOrInsertFunction(StackSave); 434 StackRestoreCGN = CG->getOrInsertFunction(StackRestore); 435 CallerNode = (*CG)[Caller]; 436 } 437 438 // Insert the llvm.stacksave. 439 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack", 440 FirstNewBlock->begin()); 441 if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN); 442 443 // Insert a call to llvm.stackrestore before any return instructions in the 444 // inlined function. 445 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 446 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]); 447 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); 448 } 449 450 // Count the number of StackRestore calls we insert. 451 unsigned NumStackRestores = Returns.size(); 452 453 // If we are inlining an invoke instruction, insert restores before each 454 // unwind. These unwinds will be rewritten into branches later. 455 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) { 456 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 457 BB != E; ++BB) 458 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) { 459 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI); 460 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN); 461 ++NumStackRestores; 462 } 463 } 464 } 465 466 // If we are inlining tail call instruction through a call site that isn't 467 // marked 'tail', we must remove the tail marker for any calls in the inlined 468 // code. Also, calls inlined through a 'nounwind' call site should be marked 469 // 'nounwind'. 470 if (InlinedFunctionInfo.ContainsCalls && 471 (MustClearTailCallFlags || MarkNoUnwind)) { 472 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 473 BB != E; ++BB) 474 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 475 if (CallInst *CI = dyn_cast<CallInst>(I)) { 476 if (MustClearTailCallFlags) 477 CI->setTailCall(false); 478 if (MarkNoUnwind) 479 CI->setDoesNotThrow(); 480 } 481 } 482 483 // If we are inlining through a 'nounwind' call site then any inlined 'unwind' 484 // instructions are unreachable. 485 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind) 486 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); 487 BB != E; ++BB) { 488 TerminatorInst *Term = BB->getTerminator(); 489 if (isa<UnwindInst>(Term)) { 490 new UnreachableInst(Context, Term); 491 BB->getInstList().erase(Term); 492 } 493 } 494 495 // If we are inlining for an invoke instruction, we must make sure to rewrite 496 // any inlined 'unwind' instructions into branches to the invoke exception 497 // destination, and call instructions into invoke instructions. 498 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 499 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 500 501 // If we cloned in _exactly one_ basic block, and if that block ends in a 502 // return instruction, we splice the body of the inlined callee directly into 503 // the calling basic block. 504 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 505 // Move all of the instructions right before the call. 506 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 507 FirstNewBlock->begin(), FirstNewBlock->end()); 508 // Remove the cloned basic block. 509 Caller->getBasicBlockList().pop_back(); 510 511 // If the call site was an invoke instruction, add a branch to the normal 512 // destination. 513 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 514 BranchInst::Create(II->getNormalDest(), TheCall); 515 516 // If the return instruction returned a value, replace uses of the call with 517 // uses of the returned value. 518 if (!TheCall->use_empty()) { 519 ReturnInst *R = Returns[0]; 520 if (TheCall == R->getReturnValue()) 521 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 522 else 523 TheCall->replaceAllUsesWith(R->getReturnValue()); 524 } 525 // Since we are now done with the Call/Invoke, we can delete it. 526 TheCall->eraseFromParent(); 527 528 // Since we are now done with the return instruction, delete it also. 529 Returns[0]->eraseFromParent(); 530 531 // We are now done with the inlining. 532 return true; 533 } 534 535 // Otherwise, we have the normal case, of more than one block to inline or 536 // multiple return sites. 537 538 // We want to clone the entire callee function into the hole between the 539 // "starter" and "ender" blocks. How we accomplish this depends on whether 540 // this is an invoke instruction or a call instruction. 541 BasicBlock *AfterCallBB; 542 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 543 544 // Add an unconditional branch to make this look like the CallInst case... 545 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 546 547 // Split the basic block. This guarantees that no PHI nodes will have to be 548 // updated due to new incoming edges, and make the invoke case more 549 // symmetric to the call case. 550 AfterCallBB = OrigBB->splitBasicBlock(NewBr, 551 CalledFunc->getName()+".exit"); 552 553 } else { // It's a call 554 // If this is a call instruction, we need to split the basic block that 555 // the call lives in. 556 // 557 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 558 CalledFunc->getName()+".exit"); 559 } 560 561 // Change the branch that used to go to AfterCallBB to branch to the first 562 // basic block of the inlined function. 563 // 564 TerminatorInst *Br = OrigBB->getTerminator(); 565 assert(Br && Br->getOpcode() == Instruction::Br && 566 "splitBasicBlock broken!"); 567 Br->setOperand(0, FirstNewBlock); 568 569 570 // Now that the function is correct, make it a little bit nicer. In 571 // particular, move the basic blocks inserted from the end of the function 572 // into the space made by splitting the source basic block. 573 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 574 FirstNewBlock, Caller->end()); 575 576 // Handle all of the return instructions that we just cloned in, and eliminate 577 // any users of the original call/invoke instruction. 578 const Type *RTy = CalledFunc->getReturnType(); 579 580 if (Returns.size() > 1) { 581 // The PHI node should go at the front of the new basic block to merge all 582 // possible incoming values. 583 PHINode *PHI = 0; 584 if (!TheCall->use_empty()) { 585 PHI = PHINode::Create(RTy, TheCall->getName(), 586 AfterCallBB->begin()); 587 // Anything that used the result of the function call should now use the 588 // PHI node as their operand. 589 TheCall->replaceAllUsesWith(PHI); 590 } 591 592 // Loop over all of the return instructions adding entries to the PHI node 593 // as appropriate. 594 if (PHI) { 595 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 596 ReturnInst *RI = Returns[i]; 597 assert(RI->getReturnValue()->getType() == PHI->getType() && 598 "Ret value not consistent in function!"); 599 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 600 } 601 602 // Now that we inserted the PHI, check to see if it has a single value 603 // (e.g. all the entries are the same or undef). If so, remove the PHI so 604 // it doesn't block other optimizations. 605 if (Value *V = PHI->hasConstantValue()) { 606 PHI->replaceAllUsesWith(V); 607 PHI->eraseFromParent(); 608 } 609 } 610 611 612 // Add a branch to the merge points and remove return instructions. 613 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 614 ReturnInst *RI = Returns[i]; 615 BranchInst::Create(AfterCallBB, RI); 616 RI->eraseFromParent(); 617 } 618 } else if (!Returns.empty()) { 619 // Otherwise, if there is exactly one return value, just replace anything 620 // using the return value of the call with the computed value. 621 if (!TheCall->use_empty()) { 622 if (TheCall == Returns[0]->getReturnValue()) 623 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 624 else 625 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 626 } 627 628 // Splice the code from the return block into the block that it will return 629 // to, which contains the code that was after the call. 630 BasicBlock *ReturnBB = Returns[0]->getParent(); 631 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 632 ReturnBB->getInstList()); 633 634 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 635 ReturnBB->replaceAllUsesWith(AfterCallBB); 636 637 // Delete the return instruction now and empty ReturnBB now. 638 Returns[0]->eraseFromParent(); 639 ReturnBB->eraseFromParent(); 640 } else if (!TheCall->use_empty()) { 641 // No returns, but something is using the return value of the call. Just 642 // nuke the result. 643 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 644 } 645 646 // Since we are now done with the Call/Invoke, we can delete it. 647 TheCall->eraseFromParent(); 648 649 // We should always be able to fold the entry block of the function into the 650 // single predecessor of the block... 651 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 652 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 653 654 // Splice the code entry block into calling block, right before the 655 // unconditional branch. 656 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 657 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 658 659 // Remove the unconditional branch. 660 OrigBB->getInstList().erase(Br); 661 662 // Now we can remove the CalleeEntry block, which is now empty. 663 Caller->getBasicBlockList().erase(CalleeEntry); 664 665 return true; 666 } 667