1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SmallVector.h" 17 #include "llvm/ADT/StringExtras.h" 18 #include "llvm/Analysis/CallGraph.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/IR/Attributes.h" 21 #include "llvm/IR/CallSite.h" 22 #include "llvm/IR/CFG.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/DebugInfo.h" 26 #include "llvm/IR/DerivedTypes.h" 27 #include "llvm/IR/IRBuilder.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/IR/Intrinsics.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 using namespace llvm; 34 35 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 36 bool InsertLifetime) { 37 return InlineFunction(CallSite(CI), IFI, InsertLifetime); 38 } 39 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 40 bool InsertLifetime) { 41 return InlineFunction(CallSite(II), IFI, InsertLifetime); 42 } 43 44 namespace { 45 /// A class for recording information about inlining through an invoke. 46 class InvokeInliningInfo { 47 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 48 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 49 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 50 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 51 SmallVector<Value*, 8> UnwindDestPHIValues; 52 53 public: 54 InvokeInliningInfo(InvokeInst *II) 55 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 56 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 57 // If there are PHI nodes in the unwind destination block, we need to keep 58 // track of which values came into them from the invoke before removing 59 // the edge from this block. 60 llvm::BasicBlock *InvokeBB = II->getParent(); 61 BasicBlock::iterator I = OuterResumeDest->begin(); 62 for (; isa<PHINode>(I); ++I) { 63 // Save the value to use for this edge. 64 PHINode *PHI = cast<PHINode>(I); 65 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 66 } 67 68 CallerLPad = cast<LandingPadInst>(I); 69 } 70 71 /// getOuterResumeDest - The outer unwind destination is the target of 72 /// unwind edges introduced for calls within the inlined function. 73 BasicBlock *getOuterResumeDest() const { 74 return OuterResumeDest; 75 } 76 77 BasicBlock *getInnerResumeDest(); 78 79 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 80 81 /// forwardResume - Forward the 'resume' instruction to the caller's landing 82 /// pad block. When the landing pad block has only one predecessor, this is 83 /// a simple branch. When there is more than one predecessor, we need to 84 /// split the landing pad block after the landingpad instruction and jump 85 /// to there. 86 void forwardResume(ResumeInst *RI, 87 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads); 88 89 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind 90 /// destination block for the given basic block, using the values for the 91 /// original invoke's source block. 92 void addIncomingPHIValuesFor(BasicBlock *BB) const { 93 addIncomingPHIValuesForInto(BB, OuterResumeDest); 94 } 95 96 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 97 BasicBlock::iterator I = dest->begin(); 98 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 99 PHINode *phi = cast<PHINode>(I); 100 phi->addIncoming(UnwindDestPHIValues[i], src); 101 } 102 } 103 }; 104 } 105 106 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts. 107 BasicBlock *InvokeInliningInfo::getInnerResumeDest() { 108 if (InnerResumeDest) return InnerResumeDest; 109 110 // Split the landing pad. 111 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint; 112 InnerResumeDest = 113 OuterResumeDest->splitBasicBlock(SplitPoint, 114 OuterResumeDest->getName() + ".body"); 115 116 // The number of incoming edges we expect to the inner landing pad. 117 const unsigned PHICapacity = 2; 118 119 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 120 BasicBlock::iterator InsertPoint = InnerResumeDest->begin(); 121 BasicBlock::iterator I = OuterResumeDest->begin(); 122 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 123 PHINode *OuterPHI = cast<PHINode>(I); 124 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 125 OuterPHI->getName() + ".lpad-body", 126 InsertPoint); 127 OuterPHI->replaceAllUsesWith(InnerPHI); 128 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 129 } 130 131 // Create a PHI for the exception values. 132 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 133 "eh.lpad-body", InsertPoint); 134 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 135 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 136 137 // All done. 138 return InnerResumeDest; 139 } 140 141 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad 142 /// block. When the landing pad block has only one predecessor, this is a simple 143 /// branch. When there is more than one predecessor, we need to split the 144 /// landing pad block after the landingpad instruction and jump to there. 145 void InvokeInliningInfo::forwardResume(ResumeInst *RI, 146 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) { 147 BasicBlock *Dest = getInnerResumeDest(); 148 BasicBlock *Src = RI->getParent(); 149 150 BranchInst::Create(Dest, Src); 151 152 // Update the PHIs in the destination. They were inserted in an order which 153 // makes this work. 154 addIncomingPHIValuesForInto(Src, Dest); 155 156 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 157 RI->eraseFromParent(); 158 } 159 160 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into 161 /// an invoke, we have to turn all of the calls that can throw into 162 /// invokes. This function analyze BB to see if there are any calls, and if so, 163 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 164 /// nodes in that block with the values specified in InvokeDestPHIValues. 165 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, 166 InvokeInliningInfo &Invoke) { 167 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 168 Instruction *I = BBI++; 169 170 // We only need to check for function calls: inlined invoke 171 // instructions require no special handling. 172 CallInst *CI = dyn_cast<CallInst>(I); 173 174 // If this call cannot unwind, don't convert it to an invoke. 175 // Inline asm calls cannot throw. 176 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 177 continue; 178 179 // Convert this function call into an invoke instruction. First, split the 180 // basic block. 181 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 182 183 // Delete the unconditional branch inserted by splitBasicBlock 184 BB->getInstList().pop_back(); 185 186 // Create the new invoke instruction. 187 ImmutableCallSite CS(CI); 188 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); 189 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, 190 Invoke.getOuterResumeDest(), 191 InvokeArgs, CI->getName(), BB); 192 II->setCallingConv(CI->getCallingConv()); 193 II->setAttributes(CI->getAttributes()); 194 195 // Make sure that anything using the call now uses the invoke! This also 196 // updates the CallGraph if present, because it uses a WeakVH. 197 CI->replaceAllUsesWith(II); 198 199 // Delete the original call 200 Split->getInstList().pop_front(); 201 202 // Update any PHI nodes in the exceptional block to indicate that there is 203 // now a new entry in them. 204 Invoke.addIncomingPHIValuesFor(BB); 205 return; 206 } 207 } 208 209 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 210 /// in the body of the inlined function into invokes. 211 /// 212 /// II is the invoke instruction being inlined. FirstNewBlock is the first 213 /// block of the inlined code (the last block is the end of the function), 214 /// and InlineCodeInfo is information about the code that got inlined. 215 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 216 ClonedCodeInfo &InlinedCodeInfo) { 217 BasicBlock *InvokeDest = II->getUnwindDest(); 218 219 Function *Caller = FirstNewBlock->getParent(); 220 221 // The inlined code is currently at the end of the function, scan from the 222 // start of the inlined code to its end, checking for stuff we need to 223 // rewrite. 224 InvokeInliningInfo Invoke(II); 225 226 // Get all of the inlined landing pad instructions. 227 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 228 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I) 229 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 230 InlinedLPads.insert(II->getLandingPadInst()); 231 232 // Append the clauses from the outer landing pad instruction into the inlined 233 // landing pad instructions. 234 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 235 for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(), 236 E = InlinedLPads.end(); I != E; ++I) { 237 LandingPadInst *InlinedLPad = *I; 238 unsigned OuterNum = OuterLPad->getNumClauses(); 239 InlinedLPad->reserveClauses(OuterNum); 240 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 241 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 242 if (OuterLPad->isCleanup()) 243 InlinedLPad->setCleanup(true); 244 } 245 246 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ 247 if (InlinedCodeInfo.ContainsCalls) 248 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke); 249 250 // Forward any resumes that are remaining here. 251 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 252 Invoke.forwardResume(RI, InlinedLPads); 253 } 254 255 // Now that everything is happy, we have one final detail. The PHI nodes in 256 // the exception destination block still have entries due to the original 257 // invoke instruction. Eliminate these entries (which might even delete the 258 // PHI node) now. 259 InvokeDest->removePredecessor(II->getParent()); 260 } 261 262 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 263 /// into the caller, update the specified callgraph to reflect the changes we 264 /// made. Note that it's possible that not all code was copied over, so only 265 /// some edges of the callgraph may remain. 266 static void UpdateCallGraphAfterInlining(CallSite CS, 267 Function::iterator FirstNewBlock, 268 ValueToValueMapTy &VMap, 269 InlineFunctionInfo &IFI) { 270 CallGraph &CG = *IFI.CG; 271 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 272 const Function *Callee = CS.getCalledFunction(); 273 CallGraphNode *CalleeNode = CG[Callee]; 274 CallGraphNode *CallerNode = CG[Caller]; 275 276 // Since we inlined some uninlined call sites in the callee into the caller, 277 // add edges from the caller to all of the callees of the callee. 278 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 279 280 // Consider the case where CalleeNode == CallerNode. 281 CallGraphNode::CalledFunctionsVector CallCache; 282 if (CalleeNode == CallerNode) { 283 CallCache.assign(I, E); 284 I = CallCache.begin(); 285 E = CallCache.end(); 286 } 287 288 for (; I != E; ++I) { 289 const Value *OrigCall = I->first; 290 291 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 292 // Only copy the edge if the call was inlined! 293 if (VMI == VMap.end() || VMI->second == nullptr) 294 continue; 295 296 // If the call was inlined, but then constant folded, there is no edge to 297 // add. Check for this case. 298 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 299 if (!NewCall) continue; 300 301 // Remember that this call site got inlined for the client of 302 // InlineFunction. 303 IFI.InlinedCalls.push_back(NewCall); 304 305 // It's possible that inlining the callsite will cause it to go from an 306 // indirect to a direct call by resolving a function pointer. If this 307 // happens, set the callee of the new call site to a more precise 308 // destination. This can also happen if the call graph node of the caller 309 // was just unnecessarily imprecise. 310 if (!I->second->getFunction()) 311 if (Function *F = CallSite(NewCall).getCalledFunction()) { 312 // Indirect call site resolved to direct call. 313 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 314 315 continue; 316 } 317 318 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 319 } 320 321 // Update the call graph by deleting the edge from Callee to Caller. We must 322 // do this after the loop above in case Caller and Callee are the same. 323 CallerNode->removeCallEdgeFor(CS); 324 } 325 326 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 327 BasicBlock *InsertBlock, 328 InlineFunctionInfo &IFI) { 329 LLVMContext &Context = Src->getContext(); 330 Type *VoidPtrTy = Type::getInt8PtrTy(Context); 331 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 332 Type *Tys[3] = { VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context) }; 333 Function *MemCpyFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys); 334 IRBuilder<> builder(InsertBlock->begin()); 335 Value *DstCast = builder.CreateBitCast(Dst, VoidPtrTy, "tmp"); 336 Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp"); 337 338 Value *Size; 339 if (IFI.DL == nullptr) 340 Size = ConstantExpr::getSizeOf(AggTy); 341 else 342 Size = ConstantInt::get(Type::getInt64Ty(Context), 343 IFI.DL->getTypeStoreSize(AggTy)); 344 345 // Always generate a memcpy of alignment 1 here because we don't know 346 // the alignment of the src pointer. Other optimizations can infer 347 // better alignment. 348 Value *CallArgs[] = { 349 DstCast, SrcCast, Size, 350 ConstantInt::get(Type::getInt32Ty(Context), 1), 351 ConstantInt::getFalse(Context) // isVolatile 352 }; 353 builder.CreateCall(MemCpyFn, CallArgs); 354 } 355 356 /// HandleByValArgument - When inlining a call site that has a byval argument, 357 /// we have to make the implicit memcpy explicit by adding it. 358 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 359 const Function *CalledFunc, 360 InlineFunctionInfo &IFI, 361 unsigned ByValAlignment) { 362 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 363 Type *AggTy = ArgTy->getElementType(); 364 365 // If the called function is readonly, then it could not mutate the caller's 366 // copy of the byval'd memory. In this case, it is safe to elide the copy and 367 // temporary. 368 if (CalledFunc->onlyReadsMemory()) { 369 // If the byval argument has a specified alignment that is greater than the 370 // passed in pointer, then we either have to round up the input pointer or 371 // give up on this transformation. 372 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 373 return Arg; 374 375 // If the pointer is already known to be sufficiently aligned, or if we can 376 // round it up to a larger alignment, then we don't need a temporary. 377 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, 378 IFI.DL) >= ByValAlignment) 379 return Arg; 380 381 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 382 // for code quality, but rarely happens and is required for correctness. 383 } 384 385 // Create the alloca. If we have DataLayout, use nice alignment. 386 unsigned Align = 1; 387 if (IFI.DL) 388 Align = IFI.DL->getPrefTypeAlignment(AggTy); 389 390 // If the byval had an alignment specified, we *must* use at least that 391 // alignment, as it is required by the byval argument (and uses of the 392 // pointer inside the callee). 393 Align = std::max(Align, ByValAlignment); 394 395 Function *Caller = TheCall->getParent()->getParent(); 396 397 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 398 &*Caller->begin()->begin()); 399 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 400 401 // Uses of the argument in the function should use our new alloca 402 // instead. 403 return NewAlloca; 404 } 405 406 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime 407 // intrinsic. 408 static bool isUsedByLifetimeMarker(Value *V) { 409 for (User *U : V->users()) { 410 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 411 switch (II->getIntrinsicID()) { 412 default: break; 413 case Intrinsic::lifetime_start: 414 case Intrinsic::lifetime_end: 415 return true; 416 } 417 } 418 } 419 return false; 420 } 421 422 // hasLifetimeMarkers - Check whether the given alloca already has 423 // lifetime.start or lifetime.end intrinsics. 424 static bool hasLifetimeMarkers(AllocaInst *AI) { 425 Type *Ty = AI->getType(); 426 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 427 Ty->getPointerAddressSpace()); 428 if (Ty == Int8PtrTy) 429 return isUsedByLifetimeMarker(AI); 430 431 // Do a scan to find all the casts to i8*. 432 for (User *U : AI->users()) { 433 if (U->getType() != Int8PtrTy) continue; 434 if (U->stripPointerCasts() != AI) continue; 435 if (isUsedByLifetimeMarker(U)) 436 return true; 437 } 438 return false; 439 } 440 441 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to 442 /// recursively update InlinedAtEntry of a DebugLoc. 443 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL, 444 const DebugLoc &InlinedAtDL, 445 LLVMContext &Ctx) { 446 if (MDNode *IA = DL.getInlinedAt(Ctx)) { 447 DebugLoc NewInlinedAtDL 448 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx); 449 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 450 NewInlinedAtDL.getAsMDNode(Ctx)); 451 } 452 453 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 454 InlinedAtDL.getAsMDNode(Ctx)); 455 } 456 457 /// fixupLineNumbers - Update inlined instructions' line numbers to 458 /// to encode location where these instructions are inlined. 459 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 460 Instruction *TheCall) { 461 DebugLoc TheCallDL = TheCall->getDebugLoc(); 462 if (TheCallDL.isUnknown()) 463 return; 464 465 for (; FI != Fn->end(); ++FI) { 466 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 467 BI != BE; ++BI) { 468 DebugLoc DL = BI->getDebugLoc(); 469 if (DL.isUnknown()) { 470 // If the inlined instruction has no line number, make it look as if it 471 // originates from the call location. This is important for 472 // ((__always_inline__, __nodebug__)) functions which must use caller 473 // location for all instructions in their function body. 474 BI->setDebugLoc(TheCallDL); 475 } else { 476 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext())); 477 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) { 478 LLVMContext &Ctx = BI->getContext(); 479 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx); 480 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(), 481 InlinedAt, Ctx)); 482 } 483 } 484 } 485 } 486 } 487 488 /// Returns a musttail call instruction if one immediately precedes the given 489 /// return instruction with an optional bitcast instruction between them. 490 static CallInst *getPrecedingMustTailCall(ReturnInst *RI) { 491 Instruction *Prev = RI->getPrevNode(); 492 if (!Prev) 493 return nullptr; 494 495 if (Value *RV = RI->getReturnValue()) { 496 if (RV != Prev) 497 return nullptr; 498 499 // Look through the optional bitcast. 500 if (auto *BI = dyn_cast<BitCastInst>(Prev)) { 501 RV = BI->getOperand(0); 502 Prev = BI->getPrevNode(); 503 if (!Prev || RV != Prev) 504 return nullptr; 505 } 506 } 507 508 if (auto *CI = dyn_cast<CallInst>(Prev)) { 509 if (CI->isMustTailCall()) 510 return CI; 511 } 512 return nullptr; 513 } 514 515 /// InlineFunction - This function inlines the called function into the basic 516 /// block of the caller. This returns false if it is not possible to inline 517 /// this call. The program is still in a well defined state if this occurs 518 /// though. 519 /// 520 /// Note that this only does one level of inlining. For example, if the 521 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 522 /// exists in the instruction stream. Similarly this will inline a recursive 523 /// function by one level. 524 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 525 bool InsertLifetime) { 526 Instruction *TheCall = CS.getInstruction(); 527 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 528 "Instruction not in function!"); 529 530 // If IFI has any state in it, zap it before we fill it in. 531 IFI.reset(); 532 533 const Function *CalledFunc = CS.getCalledFunction(); 534 if (!CalledFunc || // Can't inline external function or indirect 535 CalledFunc->isDeclaration() || // call, or call to a vararg function! 536 CalledFunc->getFunctionType()->isVarArg()) return false; 537 538 // If the call to the callee cannot throw, set the 'nounwind' flag on any 539 // calls that we inline. 540 bool MarkNoUnwind = CS.doesNotThrow(); 541 542 BasicBlock *OrigBB = TheCall->getParent(); 543 Function *Caller = OrigBB->getParent(); 544 545 // GC poses two hazards to inlining, which only occur when the callee has GC: 546 // 1. If the caller has no GC, then the callee's GC must be propagated to the 547 // caller. 548 // 2. If the caller has a differing GC, it is invalid to inline. 549 if (CalledFunc->hasGC()) { 550 if (!Caller->hasGC()) 551 Caller->setGC(CalledFunc->getGC()); 552 else if (CalledFunc->getGC() != Caller->getGC()) 553 return false; 554 } 555 556 // Get the personality function from the callee if it contains a landing pad. 557 Value *CalleePersonality = nullptr; 558 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end(); 559 I != E; ++I) 560 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 561 const BasicBlock *BB = II->getUnwindDest(); 562 const LandingPadInst *LP = BB->getLandingPadInst(); 563 CalleePersonality = LP->getPersonalityFn(); 564 break; 565 } 566 567 // Find the personality function used by the landing pads of the caller. If it 568 // exists, then check to see that it matches the personality function used in 569 // the callee. 570 if (CalleePersonality) { 571 for (Function::const_iterator I = Caller->begin(), E = Caller->end(); 572 I != E; ++I) 573 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 574 const BasicBlock *BB = II->getUnwindDest(); 575 const LandingPadInst *LP = BB->getLandingPadInst(); 576 577 // If the personality functions match, then we can perform the 578 // inlining. Otherwise, we can't inline. 579 // TODO: This isn't 100% true. Some personality functions are proper 580 // supersets of others and can be used in place of the other. 581 if (LP->getPersonalityFn() != CalleePersonality) 582 return false; 583 584 break; 585 } 586 } 587 588 // Get an iterator to the last basic block in the function, which will have 589 // the new function inlined after it. 590 Function::iterator LastBlock = &Caller->back(); 591 592 // Make sure to capture all of the return instructions from the cloned 593 // function. 594 SmallVector<ReturnInst*, 8> Returns; 595 ClonedCodeInfo InlinedFunctionInfo; 596 Function::iterator FirstNewBlock; 597 598 { // Scope to destroy VMap after cloning. 599 ValueToValueMapTy VMap; 600 // Keep a list of pair (dst, src) to emit byval initializations. 601 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 602 603 assert(CalledFunc->arg_size() == CS.arg_size() && 604 "No varargs calls can be inlined!"); 605 606 // Calculate the vector of arguments to pass into the function cloner, which 607 // matches up the formal to the actual argument values. 608 CallSite::arg_iterator AI = CS.arg_begin(); 609 unsigned ArgNo = 0; 610 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 611 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 612 Value *ActualArg = *AI; 613 614 // When byval arguments actually inlined, we need to make the copy implied 615 // by them explicit. However, we don't do this if the callee is readonly 616 // or readnone, because the copy would be unneeded: the callee doesn't 617 // modify the struct. 618 if (CS.isByValArgument(ArgNo)) { 619 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 620 CalledFunc->getParamAlignment(ArgNo+1)); 621 if (ActualArg != *AI) 622 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 623 } 624 625 VMap[I] = ActualArg; 626 } 627 628 // We want the inliner to prune the code as it copies. We would LOVE to 629 // have no dead or constant instructions leftover after inlining occurs 630 // (which can happen, e.g., because an argument was constant), but we'll be 631 // happy with whatever the cloner can do. 632 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 633 /*ModuleLevelChanges=*/false, Returns, ".i", 634 &InlinedFunctionInfo, IFI.DL, TheCall); 635 636 // Remember the first block that is newly cloned over. 637 FirstNewBlock = LastBlock; ++FirstNewBlock; 638 639 // Inject byval arguments initialization. 640 for (std::pair<Value*, Value*> &Init : ByValInit) 641 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 642 FirstNewBlock, IFI); 643 644 // Update the callgraph if requested. 645 if (IFI.CG) 646 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 647 648 // Update inlined instructions' line number information. 649 fixupLineNumbers(Caller, FirstNewBlock, TheCall); 650 } 651 652 // If there are any alloca instructions in the block that used to be the entry 653 // block for the callee, move them to the entry block of the caller. First 654 // calculate which instruction they should be inserted before. We insert the 655 // instructions at the end of the current alloca list. 656 { 657 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 658 for (BasicBlock::iterator I = FirstNewBlock->begin(), 659 E = FirstNewBlock->end(); I != E; ) { 660 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 661 if (!AI) continue; 662 663 // If the alloca is now dead, remove it. This often occurs due to code 664 // specialization. 665 if (AI->use_empty()) { 666 AI->eraseFromParent(); 667 continue; 668 } 669 670 if (!isa<Constant>(AI->getArraySize())) 671 continue; 672 673 // Keep track of the static allocas that we inline into the caller. 674 IFI.StaticAllocas.push_back(AI); 675 676 // Scan for the block of allocas that we can move over, and move them 677 // all at once. 678 while (isa<AllocaInst>(I) && 679 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 680 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 681 ++I; 682 } 683 684 // Transfer all of the allocas over in a block. Using splice means 685 // that the instructions aren't removed from the symbol table, then 686 // reinserted. 687 Caller->getEntryBlock().getInstList().splice(InsertPoint, 688 FirstNewBlock->getInstList(), 689 AI, I); 690 } 691 } 692 693 bool InlinedMustTailCalls = false; 694 if (InlinedFunctionInfo.ContainsCalls) { 695 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 696 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 697 CallSiteTailKind = CI->getTailCallKind(); 698 699 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 700 ++BB) { 701 for (Instruction &I : *BB) { 702 CallInst *CI = dyn_cast<CallInst>(&I); 703 if (!CI) 704 continue; 705 706 // We need to reduce the strength of any inlined tail calls. For 707 // musttail, we have to avoid introducing potential unbounded stack 708 // growth. For example, if functions 'f' and 'g' are mutually recursive 709 // with musttail, we can inline 'g' into 'f' so long as we preserve 710 // musttail on the cloned call to 'f'. If either the inlined call site 711 // or the cloned call site is *not* musttail, the program already has 712 // one frame of stack growth, so it's safe to remove musttail. Here is 713 // a table of example transformations: 714 // 715 // f -> musttail g -> musttail f ==> f -> musttail f 716 // f -> musttail g -> tail f ==> f -> tail f 717 // f -> g -> musttail f ==> f -> f 718 // f -> g -> tail f ==> f -> f 719 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 720 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 721 CI->setTailCallKind(ChildTCK); 722 InlinedMustTailCalls |= CI->isMustTailCall(); 723 724 // Calls inlined through a 'nounwind' call site should be marked 725 // 'nounwind'. 726 if (MarkNoUnwind) 727 CI->setDoesNotThrow(); 728 } 729 } 730 } 731 732 // Leave lifetime markers for the static alloca's, scoping them to the 733 // function we just inlined. 734 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 735 IRBuilder<> builder(FirstNewBlock->begin()); 736 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 737 AllocaInst *AI = IFI.StaticAllocas[ai]; 738 739 // If the alloca is already scoped to something smaller than the whole 740 // function then there's no need to add redundant, less accurate markers. 741 if (hasLifetimeMarkers(AI)) 742 continue; 743 744 // Try to determine the size of the allocation. 745 ConstantInt *AllocaSize = nullptr; 746 if (ConstantInt *AIArraySize = 747 dyn_cast<ConstantInt>(AI->getArraySize())) { 748 if (IFI.DL) { 749 Type *AllocaType = AI->getAllocatedType(); 750 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType); 751 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 752 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero"); 753 // Check that array size doesn't saturate uint64_t and doesn't 754 // overflow when it's multiplied by type size. 755 if (AllocaArraySize != ~0ULL && 756 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 757 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 758 AllocaArraySize * AllocaTypeSize); 759 } 760 } 761 } 762 763 builder.CreateLifetimeStart(AI, AllocaSize); 764 for (ReturnInst *RI : Returns) { 765 // Don't insert llvm.lifetime.end calls between a musttail call and a 766 // return. The return kills all local allocas. 767 if (InlinedMustTailCalls && getPrecedingMustTailCall(RI)) 768 continue; 769 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 770 } 771 } 772 } 773 774 // If the inlined code contained dynamic alloca instructions, wrap the inlined 775 // code with llvm.stacksave/llvm.stackrestore intrinsics. 776 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 777 Module *M = Caller->getParent(); 778 // Get the two intrinsics we care about. 779 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 780 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 781 782 // Insert the llvm.stacksave. 783 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin()) 784 .CreateCall(StackSave, "savedstack"); 785 786 // Insert a call to llvm.stackrestore before any return instructions in the 787 // inlined function. 788 for (ReturnInst *RI : Returns) { 789 // Don't insert llvm.stackrestore calls between a musttail call and a 790 // return. The return will restore the stack pointer. 791 if (InlinedMustTailCalls && getPrecedingMustTailCall(RI)) 792 continue; 793 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 794 } 795 } 796 797 // If we are inlining for an invoke instruction, we must make sure to rewrite 798 // any call instructions into invoke instructions. 799 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 800 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 801 802 // Handle any inlined musttail call sites. In order for a new call site to be 803 // musttail, the source of the clone and the inlined call site must have been 804 // musttail. Therefore it's safe to return without merging control into the 805 // phi below. 806 if (InlinedMustTailCalls) { 807 // Check if we need to bitcast the result of any musttail calls. 808 Type *NewRetTy = Caller->getReturnType(); 809 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 810 811 // Handle the returns preceded by musttail calls separately. 812 SmallVector<ReturnInst *, 8> NormalReturns; 813 for (ReturnInst *RI : Returns) { 814 CallInst *ReturnedMustTail = getPrecedingMustTailCall(RI); 815 if (!ReturnedMustTail) { 816 NormalReturns.push_back(RI); 817 continue; 818 } 819 if (!NeedBitCast) 820 continue; 821 822 // Delete the old return and any preceding bitcast. 823 BasicBlock *CurBB = RI->getParent(); 824 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 825 RI->eraseFromParent(); 826 if (OldCast) 827 OldCast->eraseFromParent(); 828 829 // Insert a new bitcast and return with the right type. 830 IRBuilder<> Builder(CurBB); 831 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 832 } 833 834 // Leave behind the normal returns so we can merge control flow. 835 std::swap(Returns, NormalReturns); 836 } 837 838 // If we cloned in _exactly one_ basic block, and if that block ends in a 839 // return instruction, we splice the body of the inlined callee directly into 840 // the calling basic block. 841 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 842 // Move all of the instructions right before the call. 843 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 844 FirstNewBlock->begin(), FirstNewBlock->end()); 845 // Remove the cloned basic block. 846 Caller->getBasicBlockList().pop_back(); 847 848 // If the call site was an invoke instruction, add a branch to the normal 849 // destination. 850 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 851 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 852 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 853 } 854 855 // If the return instruction returned a value, replace uses of the call with 856 // uses of the returned value. 857 if (!TheCall->use_empty()) { 858 ReturnInst *R = Returns[0]; 859 if (TheCall == R->getReturnValue()) 860 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 861 else 862 TheCall->replaceAllUsesWith(R->getReturnValue()); 863 } 864 // Since we are now done with the Call/Invoke, we can delete it. 865 TheCall->eraseFromParent(); 866 867 // Since we are now done with the return instruction, delete it also. 868 Returns[0]->eraseFromParent(); 869 870 // We are now done with the inlining. 871 return true; 872 } 873 874 // Otherwise, we have the normal case, of more than one block to inline or 875 // multiple return sites. 876 877 // We want to clone the entire callee function into the hole between the 878 // "starter" and "ender" blocks. How we accomplish this depends on whether 879 // this is an invoke instruction or a call instruction. 880 BasicBlock *AfterCallBB; 881 BranchInst *CreatedBranchToNormalDest = nullptr; 882 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 883 884 // Add an unconditional branch to make this look like the CallInst case... 885 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 886 887 // Split the basic block. This guarantees that no PHI nodes will have to be 888 // updated due to new incoming edges, and make the invoke case more 889 // symmetric to the call case. 890 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest, 891 CalledFunc->getName()+".exit"); 892 893 } else { // It's a call 894 // If this is a call instruction, we need to split the basic block that 895 // the call lives in. 896 // 897 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 898 CalledFunc->getName()+".exit"); 899 } 900 901 // Change the branch that used to go to AfterCallBB to branch to the first 902 // basic block of the inlined function. 903 // 904 TerminatorInst *Br = OrigBB->getTerminator(); 905 assert(Br && Br->getOpcode() == Instruction::Br && 906 "splitBasicBlock broken!"); 907 Br->setOperand(0, FirstNewBlock); 908 909 910 // Now that the function is correct, make it a little bit nicer. In 911 // particular, move the basic blocks inserted from the end of the function 912 // into the space made by splitting the source basic block. 913 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 914 FirstNewBlock, Caller->end()); 915 916 // Handle all of the return instructions that we just cloned in, and eliminate 917 // any users of the original call/invoke instruction. 918 Type *RTy = CalledFunc->getReturnType(); 919 920 PHINode *PHI = nullptr; 921 if (Returns.size() > 1) { 922 // The PHI node should go at the front of the new basic block to merge all 923 // possible incoming values. 924 if (!TheCall->use_empty()) { 925 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 926 AfterCallBB->begin()); 927 // Anything that used the result of the function call should now use the 928 // PHI node as their operand. 929 TheCall->replaceAllUsesWith(PHI); 930 } 931 932 // Loop over all of the return instructions adding entries to the PHI node 933 // as appropriate. 934 if (PHI) { 935 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 936 ReturnInst *RI = Returns[i]; 937 assert(RI->getReturnValue()->getType() == PHI->getType() && 938 "Ret value not consistent in function!"); 939 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 940 } 941 } 942 943 944 // Add a branch to the merge points and remove return instructions. 945 DebugLoc Loc; 946 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 947 ReturnInst *RI = Returns[i]; 948 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 949 Loc = RI->getDebugLoc(); 950 BI->setDebugLoc(Loc); 951 RI->eraseFromParent(); 952 } 953 // We need to set the debug location to *somewhere* inside the 954 // inlined function. The line number may be nonsensical, but the 955 // instruction will at least be associated with the right 956 // function. 957 if (CreatedBranchToNormalDest) 958 CreatedBranchToNormalDest->setDebugLoc(Loc); 959 } else if (!Returns.empty()) { 960 // Otherwise, if there is exactly one return value, just replace anything 961 // using the return value of the call with the computed value. 962 if (!TheCall->use_empty()) { 963 if (TheCall == Returns[0]->getReturnValue()) 964 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 965 else 966 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 967 } 968 969 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 970 BasicBlock *ReturnBB = Returns[0]->getParent(); 971 ReturnBB->replaceAllUsesWith(AfterCallBB); 972 973 // Splice the code from the return block into the block that it will return 974 // to, which contains the code that was after the call. 975 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 976 ReturnBB->getInstList()); 977 978 if (CreatedBranchToNormalDest) 979 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 980 981 // Delete the return instruction now and empty ReturnBB now. 982 Returns[0]->eraseFromParent(); 983 ReturnBB->eraseFromParent(); 984 } else if (!TheCall->use_empty()) { 985 // No returns, but something is using the return value of the call. Just 986 // nuke the result. 987 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 988 } 989 990 // Since we are now done with the Call/Invoke, we can delete it. 991 TheCall->eraseFromParent(); 992 993 // If we inlined any musttail calls and the original return is now 994 // unreachable, delete it. It can only contain a bitcast and ret. 995 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 996 AfterCallBB->eraseFromParent(); 997 998 // We should always be able to fold the entry block of the function into the 999 // single predecessor of the block... 1000 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 1001 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 1002 1003 // Splice the code entry block into calling block, right before the 1004 // unconditional branch. 1005 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 1006 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 1007 1008 // Remove the unconditional branch. 1009 OrigBB->getInstList().erase(Br); 1010 1011 // Now we can remove the CalleeEntry block, which is now empty. 1012 Caller->getBasicBlockList().erase(CalleeEntry); 1013 1014 // If we inserted a phi node, check to see if it has a single value (e.g. all 1015 // the entries are the same or undef). If so, remove the PHI so it doesn't 1016 // block other optimizations. 1017 if (PHI) { 1018 if (Value *V = SimplifyInstruction(PHI, IFI.DL)) { 1019 PHI->replaceAllUsesWith(V); 1020 PHI->eraseFromParent(); 1021 } 1022 } 1023 1024 return true; 1025 } 1026