1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionTracker.h" 22 #include "llvm/Analysis/CallGraph.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/CallSite.h" 28 #include "llvm/IR/CFG.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DebugInfo.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/MDBuilder.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 #include "llvm/Support/CommandLine.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 static cl::opt<bool> 46 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 47 cl::Hidden, 48 cl::desc("Convert noalias attributes to metadata during inlining.")); 49 50 static cl::opt<bool> 51 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 52 cl::init(true), cl::Hidden, 53 cl::desc("Convert align attributes to assumptions during inlining.")); 54 55 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 56 bool InsertLifetime) { 57 return InlineFunction(CallSite(CI), IFI, InsertLifetime); 58 } 59 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 60 bool InsertLifetime) { 61 return InlineFunction(CallSite(II), IFI, InsertLifetime); 62 } 63 64 namespace { 65 /// A class for recording information about inlining through an invoke. 66 class InvokeInliningInfo { 67 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 68 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 69 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 70 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 71 SmallVector<Value*, 8> UnwindDestPHIValues; 72 73 public: 74 InvokeInliningInfo(InvokeInst *II) 75 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 76 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 77 // If there are PHI nodes in the unwind destination block, we need to keep 78 // track of which values came into them from the invoke before removing 79 // the edge from this block. 80 llvm::BasicBlock *InvokeBB = II->getParent(); 81 BasicBlock::iterator I = OuterResumeDest->begin(); 82 for (; isa<PHINode>(I); ++I) { 83 // Save the value to use for this edge. 84 PHINode *PHI = cast<PHINode>(I); 85 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 86 } 87 88 CallerLPad = cast<LandingPadInst>(I); 89 } 90 91 /// getOuterResumeDest - The outer unwind destination is the target of 92 /// unwind edges introduced for calls within the inlined function. 93 BasicBlock *getOuterResumeDest() const { 94 return OuterResumeDest; 95 } 96 97 BasicBlock *getInnerResumeDest(); 98 99 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 100 101 /// forwardResume - Forward the 'resume' instruction to the caller's landing 102 /// pad block. When the landing pad block has only one predecessor, this is 103 /// a simple branch. When there is more than one predecessor, we need to 104 /// split the landing pad block after the landingpad instruction and jump 105 /// to there. 106 void forwardResume(ResumeInst *RI, 107 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 108 109 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind 110 /// destination block for the given basic block, using the values for the 111 /// original invoke's source block. 112 void addIncomingPHIValuesFor(BasicBlock *BB) const { 113 addIncomingPHIValuesForInto(BB, OuterResumeDest); 114 } 115 116 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 117 BasicBlock::iterator I = dest->begin(); 118 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 119 PHINode *phi = cast<PHINode>(I); 120 phi->addIncoming(UnwindDestPHIValues[i], src); 121 } 122 } 123 }; 124 } 125 126 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts. 127 BasicBlock *InvokeInliningInfo::getInnerResumeDest() { 128 if (InnerResumeDest) return InnerResumeDest; 129 130 // Split the landing pad. 131 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint; 132 InnerResumeDest = 133 OuterResumeDest->splitBasicBlock(SplitPoint, 134 OuterResumeDest->getName() + ".body"); 135 136 // The number of incoming edges we expect to the inner landing pad. 137 const unsigned PHICapacity = 2; 138 139 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 140 BasicBlock::iterator InsertPoint = InnerResumeDest->begin(); 141 BasicBlock::iterator I = OuterResumeDest->begin(); 142 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 143 PHINode *OuterPHI = cast<PHINode>(I); 144 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 145 OuterPHI->getName() + ".lpad-body", 146 InsertPoint); 147 OuterPHI->replaceAllUsesWith(InnerPHI); 148 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 149 } 150 151 // Create a PHI for the exception values. 152 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 153 "eh.lpad-body", InsertPoint); 154 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 155 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 156 157 // All done. 158 return InnerResumeDest; 159 } 160 161 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad 162 /// block. When the landing pad block has only one predecessor, this is a simple 163 /// branch. When there is more than one predecessor, we need to split the 164 /// landing pad block after the landingpad instruction and jump to there. 165 void InvokeInliningInfo::forwardResume(ResumeInst *RI, 166 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) { 167 BasicBlock *Dest = getInnerResumeDest(); 168 BasicBlock *Src = RI->getParent(); 169 170 BranchInst::Create(Dest, Src); 171 172 // Update the PHIs in the destination. They were inserted in an order which 173 // makes this work. 174 addIncomingPHIValuesForInto(Src, Dest); 175 176 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 177 RI->eraseFromParent(); 178 } 179 180 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into 181 /// an invoke, we have to turn all of the calls that can throw into 182 /// invokes. This function analyze BB to see if there are any calls, and if so, 183 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 184 /// nodes in that block with the values specified in InvokeDestPHIValues. 185 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, 186 InvokeInliningInfo &Invoke) { 187 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 188 Instruction *I = BBI++; 189 190 // We only need to check for function calls: inlined invoke 191 // instructions require no special handling. 192 CallInst *CI = dyn_cast<CallInst>(I); 193 194 // If this call cannot unwind, don't convert it to an invoke. 195 // Inline asm calls cannot throw. 196 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 197 continue; 198 199 // Convert this function call into an invoke instruction. First, split the 200 // basic block. 201 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 202 203 // Delete the unconditional branch inserted by splitBasicBlock 204 BB->getInstList().pop_back(); 205 206 // Create the new invoke instruction. 207 ImmutableCallSite CS(CI); 208 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); 209 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, 210 Invoke.getOuterResumeDest(), 211 InvokeArgs, CI->getName(), BB); 212 II->setDebugLoc(CI->getDebugLoc()); 213 II->setCallingConv(CI->getCallingConv()); 214 II->setAttributes(CI->getAttributes()); 215 216 // Make sure that anything using the call now uses the invoke! This also 217 // updates the CallGraph if present, because it uses a WeakVH. 218 CI->replaceAllUsesWith(II); 219 220 // Delete the original call 221 Split->getInstList().pop_front(); 222 223 // Update any PHI nodes in the exceptional block to indicate that there is 224 // now a new entry in them. 225 Invoke.addIncomingPHIValuesFor(BB); 226 return; 227 } 228 } 229 230 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 231 /// in the body of the inlined function into invokes. 232 /// 233 /// II is the invoke instruction being inlined. FirstNewBlock is the first 234 /// block of the inlined code (the last block is the end of the function), 235 /// and InlineCodeInfo is information about the code that got inlined. 236 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 237 ClonedCodeInfo &InlinedCodeInfo) { 238 BasicBlock *InvokeDest = II->getUnwindDest(); 239 240 Function *Caller = FirstNewBlock->getParent(); 241 242 // The inlined code is currently at the end of the function, scan from the 243 // start of the inlined code to its end, checking for stuff we need to 244 // rewrite. 245 InvokeInliningInfo Invoke(II); 246 247 // Get all of the inlined landing pad instructions. 248 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 249 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I) 250 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 251 InlinedLPads.insert(II->getLandingPadInst()); 252 253 // Append the clauses from the outer landing pad instruction into the inlined 254 // landing pad instructions. 255 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 256 for (LandingPadInst *InlinedLPad : InlinedLPads) { 257 unsigned OuterNum = OuterLPad->getNumClauses(); 258 InlinedLPad->reserveClauses(OuterNum); 259 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 260 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 261 if (OuterLPad->isCleanup()) 262 InlinedLPad->setCleanup(true); 263 } 264 265 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ 266 if (InlinedCodeInfo.ContainsCalls) 267 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke); 268 269 // Forward any resumes that are remaining here. 270 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 271 Invoke.forwardResume(RI, InlinedLPads); 272 } 273 274 // Now that everything is happy, we have one final detail. The PHI nodes in 275 // the exception destination block still have entries due to the original 276 // invoke instruction. Eliminate these entries (which might even delete the 277 // PHI node) now. 278 InvokeDest->removePredecessor(II->getParent()); 279 } 280 281 /// CloneAliasScopeMetadata - When inlining a function that contains noalias 282 /// scope metadata, this metadata needs to be cloned so that the inlined blocks 283 /// have different "unqiue scopes" at every call site. Were this not done, then 284 /// aliasing scopes from a function inlined into a caller multiple times could 285 /// not be differentiated (and this would lead to miscompiles because the 286 /// non-aliasing property communicated by the metadata could have 287 /// call-site-specific control dependencies). 288 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 289 const Function *CalledFunc = CS.getCalledFunction(); 290 SetVector<const MDNode *> MD; 291 292 // Note: We could only clone the metadata if it is already used in the 293 // caller. I'm omitting that check here because it might confuse 294 // inter-procedural alias analysis passes. We can revisit this if it becomes 295 // an efficiency or overhead problem. 296 297 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end(); 298 I != IE; ++I) 299 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) { 300 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope)) 301 MD.insert(M); 302 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias)) 303 MD.insert(M); 304 } 305 306 if (MD.empty()) 307 return; 308 309 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 310 // the set. 311 SmallVector<const Value *, 16> Queue(MD.begin(), MD.end()); 312 while (!Queue.empty()) { 313 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 314 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 315 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 316 if (MD.insert(M1)) 317 Queue.push_back(M1); 318 } 319 320 // Now we have a complete set of all metadata in the chains used to specify 321 // the noalias scopes and the lists of those scopes. 322 SmallVector<MDNode *, 16> DummyNodes; 323 DenseMap<const MDNode *, TrackingVH<MDNode> > MDMap; 324 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end(); 325 I != IE; ++I) { 326 MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(), None); 327 DummyNodes.push_back(Dummy); 328 MDMap[*I] = Dummy; 329 } 330 331 // Create new metadata nodes to replace the dummy nodes, replacing old 332 // metadata references with either a dummy node or an already-created new 333 // node. 334 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end(); 335 I != IE; ++I) { 336 SmallVector<Value *, 4> NewOps; 337 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) { 338 const Value *V = (*I)->getOperand(i); 339 if (const MDNode *M = dyn_cast<MDNode>(V)) 340 NewOps.push_back(MDMap[M]); 341 else 342 NewOps.push_back(const_cast<Value *>(V)); 343 } 344 345 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps), 346 *TempM = MDMap[*I]; 347 348 TempM->replaceAllUsesWith(NewM); 349 } 350 351 // Now replace the metadata in the new inlined instructions with the 352 // repacements from the map. 353 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 354 VMI != VMIE; ++VMI) { 355 if (!VMI->second) 356 continue; 357 358 Instruction *NI = dyn_cast<Instruction>(VMI->second); 359 if (!NI) 360 continue; 361 362 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 363 MDNode *NewMD = MDMap[M]; 364 // If the call site also had alias scope metadata (a list of scopes to 365 // which instructions inside it might belong), propagate those scopes to 366 // the inlined instructions. 367 if (MDNode *CSM = 368 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 369 NewMD = MDNode::concatenate(NewMD, CSM); 370 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 371 } else if (NI->mayReadOrWriteMemory()) { 372 if (MDNode *M = 373 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 374 NI->setMetadata(LLVMContext::MD_alias_scope, M); 375 } 376 377 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 378 MDNode *NewMD = MDMap[M]; 379 // If the call site also had noalias metadata (a list of scopes with 380 // which instructions inside it don't alias), propagate those scopes to 381 // the inlined instructions. 382 if (MDNode *CSM = 383 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 384 NewMD = MDNode::concatenate(NewMD, CSM); 385 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 386 } else if (NI->mayReadOrWriteMemory()) { 387 if (MDNode *M = 388 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 389 NI->setMetadata(LLVMContext::MD_noalias, M); 390 } 391 } 392 393 // Now that everything has been replaced, delete the dummy nodes. 394 for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i) 395 MDNode::deleteTemporary(DummyNodes[i]); 396 } 397 398 /// AddAliasScopeMetadata - If the inlined function has noalias arguments, then 399 /// add new alias scopes for each noalias argument, tag the mapped noalias 400 /// parameters with noalias metadata specifying the new scope, and tag all 401 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 402 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 403 const DataLayout *DL, AliasAnalysis *AA) { 404 if (!EnableNoAliasConversion) 405 return; 406 407 const Function *CalledFunc = CS.getCalledFunction(); 408 SmallVector<const Argument *, 4> NoAliasArgs; 409 410 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 411 E = CalledFunc->arg_end(); I != E; ++I) { 412 if (I->hasNoAliasAttr() && !I->hasNUses(0)) 413 NoAliasArgs.push_back(I); 414 } 415 416 if (NoAliasArgs.empty()) 417 return; 418 419 // To do a good job, if a noalias variable is captured, we need to know if 420 // the capture point dominates the particular use we're considering. 421 DominatorTree DT; 422 DT.recalculate(const_cast<Function&>(*CalledFunc)); 423 424 // noalias indicates that pointer values based on the argument do not alias 425 // pointer values which are not based on it. So we add a new "scope" for each 426 // noalias function argument. Accesses using pointers based on that argument 427 // become part of that alias scope, accesses using pointers not based on that 428 // argument are tagged as noalias with that scope. 429 430 DenseMap<const Argument *, MDNode *> NewScopes; 431 MDBuilder MDB(CalledFunc->getContext()); 432 433 // Create a new scope domain for this function. 434 MDNode *NewDomain = 435 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 436 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 437 const Argument *A = NoAliasArgs[i]; 438 439 std::string Name = CalledFunc->getName(); 440 if (A->hasName()) { 441 Name += ": %"; 442 Name += A->getName(); 443 } else { 444 Name += ": argument "; 445 Name += utostr(i); 446 } 447 448 // Note: We always create a new anonymous root here. This is true regardless 449 // of the linkage of the callee because the aliasing "scope" is not just a 450 // property of the callee, but also all control dependencies in the caller. 451 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 452 NewScopes.insert(std::make_pair(A, NewScope)); 453 } 454 455 // Iterate over all new instructions in the map; for all memory-access 456 // instructions, add the alias scope metadata. 457 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 458 VMI != VMIE; ++VMI) { 459 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 460 if (!VMI->second) 461 continue; 462 463 Instruction *NI = dyn_cast<Instruction>(VMI->second); 464 if (!NI) 465 continue; 466 467 bool IsArgMemOnlyCall = false, IsFuncCall = false; 468 SmallVector<const Value *, 2> PtrArgs; 469 470 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 471 PtrArgs.push_back(LI->getPointerOperand()); 472 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 473 PtrArgs.push_back(SI->getPointerOperand()); 474 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 475 PtrArgs.push_back(VAAI->getPointerOperand()); 476 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 477 PtrArgs.push_back(CXI->getPointerOperand()); 478 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 479 PtrArgs.push_back(RMWI->getPointerOperand()); 480 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 481 // If we know that the call does not access memory, then we'll still 482 // know that about the inlined clone of this call site, and we don't 483 // need to add metadata. 484 if (ICS.doesNotAccessMemory()) 485 continue; 486 487 IsFuncCall = true; 488 if (AA) { 489 AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(ICS); 490 if (MRB == AliasAnalysis::OnlyAccessesArgumentPointees || 491 MRB == AliasAnalysis::OnlyReadsArgumentPointees) 492 IsArgMemOnlyCall = true; 493 } 494 495 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(), 496 AE = ICS.arg_end(); AI != AE; ++AI) { 497 // We need to check the underlying objects of all arguments, not just 498 // the pointer arguments, because we might be passing pointers as 499 // integers, etc. 500 // However, if we know that the call only accesses pointer arguments, 501 // then we only need to check the pointer arguments. 502 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy()) 503 continue; 504 505 PtrArgs.push_back(*AI); 506 } 507 } 508 509 // If we found no pointers, then this instruction is not suitable for 510 // pairing with an instruction to receive aliasing metadata. 511 // However, if this is a call, this we might just alias with none of the 512 // noalias arguments. 513 if (PtrArgs.empty() && !IsFuncCall) 514 continue; 515 516 // It is possible that there is only one underlying object, but you 517 // need to go through several PHIs to see it, and thus could be 518 // repeated in the Objects list. 519 SmallPtrSet<const Value *, 4> ObjSet; 520 SmallVector<Value *, 4> Scopes, NoAliases; 521 522 SmallSetVector<const Argument *, 4> NAPtrArgs; 523 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) { 524 SmallVector<Value *, 4> Objects; 525 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]), 526 Objects, DL, /* MaxLookup = */ 0); 527 528 for (Value *O : Objects) 529 ObjSet.insert(O); 530 } 531 532 // Figure out if we're derived from anything that is not a noalias 533 // argument. 534 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 535 for (const Value *V : ObjSet) { 536 // Is this value a constant that cannot be derived from any pointer 537 // value (we need to exclude constant expressions, for example, that 538 // are formed from arithmetic on global symbols). 539 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 540 isa<ConstantPointerNull>(V) || 541 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 542 if (IsNonPtrConst) 543 continue; 544 545 // If this is anything other than a noalias argument, then we cannot 546 // completely describe the aliasing properties using alias.scope 547 // metadata (and, thus, won't add any). 548 if (const Argument *A = dyn_cast<Argument>(V)) { 549 if (!A->hasNoAliasAttr()) 550 UsesAliasingPtr = true; 551 } else { 552 UsesAliasingPtr = true; 553 } 554 555 // If this is not some identified function-local object (which cannot 556 // directly alias a noalias argument), or some other argument (which, 557 // by definition, also cannot alias a noalias argument), then we could 558 // alias a noalias argument that has been captured). 559 if (!isa<Argument>(V) && 560 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 561 CanDeriveViaCapture = true; 562 } 563 564 // A function call can always get captured noalias pointers (via other 565 // parameters, globals, etc.). 566 if (IsFuncCall && !IsArgMemOnlyCall) 567 CanDeriveViaCapture = true; 568 569 // First, we want to figure out all of the sets with which we definitely 570 // don't alias. Iterate over all noalias set, and add those for which: 571 // 1. The noalias argument is not in the set of objects from which we 572 // definitely derive. 573 // 2. The noalias argument has not yet been captured. 574 // An arbitrary function that might load pointers could see captured 575 // noalias arguments via other noalias arguments or globals, and so we 576 // must always check for prior capture. 577 for (const Argument *A : NoAliasArgs) { 578 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 579 // It might be tempting to skip the 580 // PointerMayBeCapturedBefore check if 581 // A->hasNoCaptureAttr() is true, but this is 582 // incorrect because nocapture only guarantees 583 // that no copies outlive the function, not 584 // that the value cannot be locally captured. 585 !PointerMayBeCapturedBefore(A, 586 /* ReturnCaptures */ false, 587 /* StoreCaptures */ false, I, &DT))) 588 NoAliases.push_back(NewScopes[A]); 589 } 590 591 if (!NoAliases.empty()) 592 NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate( 593 NI->getMetadata(LLVMContext::MD_noalias), 594 MDNode::get(CalledFunc->getContext(), NoAliases))); 595 596 // Next, we want to figure out all of the sets to which we might belong. 597 // We might belong to a set if the noalias argument is in the set of 598 // underlying objects. If there is some non-noalias argument in our list 599 // of underlying objects, then we cannot add a scope because the fact 600 // that some access does not alias with any set of our noalias arguments 601 // cannot itself guarantee that it does not alias with this access 602 // (because there is some pointer of unknown origin involved and the 603 // other access might also depend on this pointer). We also cannot add 604 // scopes to arbitrary functions unless we know they don't access any 605 // non-parameter pointer-values. 606 bool CanAddScopes = !UsesAliasingPtr; 607 if (CanAddScopes && IsFuncCall) 608 CanAddScopes = IsArgMemOnlyCall; 609 610 if (CanAddScopes) 611 for (const Argument *A : NoAliasArgs) { 612 if (ObjSet.count(A)) 613 Scopes.push_back(NewScopes[A]); 614 } 615 616 if (!Scopes.empty()) 617 NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate( 618 NI->getMetadata(LLVMContext::MD_alias_scope), 619 MDNode::get(CalledFunc->getContext(), Scopes))); 620 } 621 } 622 } 623 624 /// If the inlined function has non-byval align arguments, then 625 /// add @llvm.assume-based alignment assumptions to preserve this information. 626 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 627 if (!PreserveAlignmentAssumptions || !IFI.DL) 628 return; 629 630 // To avoid inserting redundant assumptions, we should check for assumptions 631 // already in the caller. To do this, we might need a DT of the caller. 632 DominatorTree DT; 633 bool DTCalculated = false; 634 635 const Function *CalledFunc = CS.getCalledFunction(); 636 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 637 E = CalledFunc->arg_end(); I != E; ++I) { 638 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0; 639 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) { 640 if (!DTCalculated) { 641 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent() 642 ->getParent())); 643 DTCalculated = true; 644 } 645 646 // If we can already prove the asserted alignment in the context of the 647 // caller, then don't bother inserting the assumption. 648 Value *Arg = CS.getArgument(I->getArgNo()); 649 if (getKnownAlignment(Arg, IFI.DL, IFI.AT, CS.getInstruction(), 650 &DT) >= Align) 651 continue; 652 653 IRBuilder<>(CS.getInstruction()).CreateAlignmentAssumption(*IFI.DL, Arg, 654 Align); 655 } 656 } 657 } 658 659 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 660 /// into the caller, update the specified callgraph to reflect the changes we 661 /// made. Note that it's possible that not all code was copied over, so only 662 /// some edges of the callgraph may remain. 663 static void UpdateCallGraphAfterInlining(CallSite CS, 664 Function::iterator FirstNewBlock, 665 ValueToValueMapTy &VMap, 666 InlineFunctionInfo &IFI) { 667 CallGraph &CG = *IFI.CG; 668 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 669 const Function *Callee = CS.getCalledFunction(); 670 CallGraphNode *CalleeNode = CG[Callee]; 671 CallGraphNode *CallerNode = CG[Caller]; 672 673 // Since we inlined some uninlined call sites in the callee into the caller, 674 // add edges from the caller to all of the callees of the callee. 675 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 676 677 // Consider the case where CalleeNode == CallerNode. 678 CallGraphNode::CalledFunctionsVector CallCache; 679 if (CalleeNode == CallerNode) { 680 CallCache.assign(I, E); 681 I = CallCache.begin(); 682 E = CallCache.end(); 683 } 684 685 for (; I != E; ++I) { 686 const Value *OrigCall = I->first; 687 688 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 689 // Only copy the edge if the call was inlined! 690 if (VMI == VMap.end() || VMI->second == nullptr) 691 continue; 692 693 // If the call was inlined, but then constant folded, there is no edge to 694 // add. Check for this case. 695 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 696 if (!NewCall) continue; 697 698 // Remember that this call site got inlined for the client of 699 // InlineFunction. 700 IFI.InlinedCalls.push_back(NewCall); 701 702 // It's possible that inlining the callsite will cause it to go from an 703 // indirect to a direct call by resolving a function pointer. If this 704 // happens, set the callee of the new call site to a more precise 705 // destination. This can also happen if the call graph node of the caller 706 // was just unnecessarily imprecise. 707 if (!I->second->getFunction()) 708 if (Function *F = CallSite(NewCall).getCalledFunction()) { 709 // Indirect call site resolved to direct call. 710 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 711 712 continue; 713 } 714 715 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 716 } 717 718 // Update the call graph by deleting the edge from Callee to Caller. We must 719 // do this after the loop above in case Caller and Callee are the same. 720 CallerNode->removeCallEdgeFor(CS); 721 } 722 723 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 724 BasicBlock *InsertBlock, 725 InlineFunctionInfo &IFI) { 726 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 727 IRBuilder<> Builder(InsertBlock->begin()); 728 729 Value *Size; 730 if (IFI.DL == nullptr) 731 Size = ConstantExpr::getSizeOf(AggTy); 732 else 733 Size = Builder.getInt64(IFI.DL->getTypeStoreSize(AggTy)); 734 735 // Always generate a memcpy of alignment 1 here because we don't know 736 // the alignment of the src pointer. Other optimizations can infer 737 // better alignment. 738 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 739 } 740 741 /// HandleByValArgument - When inlining a call site that has a byval argument, 742 /// we have to make the implicit memcpy explicit by adding it. 743 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 744 const Function *CalledFunc, 745 InlineFunctionInfo &IFI, 746 unsigned ByValAlignment) { 747 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 748 Type *AggTy = ArgTy->getElementType(); 749 750 // If the called function is readonly, then it could not mutate the caller's 751 // copy of the byval'd memory. In this case, it is safe to elide the copy and 752 // temporary. 753 if (CalledFunc->onlyReadsMemory()) { 754 // If the byval argument has a specified alignment that is greater than the 755 // passed in pointer, then we either have to round up the input pointer or 756 // give up on this transformation. 757 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 758 return Arg; 759 760 // If the pointer is already known to be sufficiently aligned, or if we can 761 // round it up to a larger alignment, then we don't need a temporary. 762 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, 763 IFI.DL, IFI.AT, TheCall) >= ByValAlignment) 764 return Arg; 765 766 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 767 // for code quality, but rarely happens and is required for correctness. 768 } 769 770 // Create the alloca. If we have DataLayout, use nice alignment. 771 unsigned Align = 1; 772 if (IFI.DL) 773 Align = IFI.DL->getPrefTypeAlignment(AggTy); 774 775 // If the byval had an alignment specified, we *must* use at least that 776 // alignment, as it is required by the byval argument (and uses of the 777 // pointer inside the callee). 778 Align = std::max(Align, ByValAlignment); 779 780 Function *Caller = TheCall->getParent()->getParent(); 781 782 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 783 &*Caller->begin()->begin()); 784 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 785 786 // Uses of the argument in the function should use our new alloca 787 // instead. 788 return NewAlloca; 789 } 790 791 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime 792 // intrinsic. 793 static bool isUsedByLifetimeMarker(Value *V) { 794 for (User *U : V->users()) { 795 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 796 switch (II->getIntrinsicID()) { 797 default: break; 798 case Intrinsic::lifetime_start: 799 case Intrinsic::lifetime_end: 800 return true; 801 } 802 } 803 } 804 return false; 805 } 806 807 // hasLifetimeMarkers - Check whether the given alloca already has 808 // lifetime.start or lifetime.end intrinsics. 809 static bool hasLifetimeMarkers(AllocaInst *AI) { 810 Type *Ty = AI->getType(); 811 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 812 Ty->getPointerAddressSpace()); 813 if (Ty == Int8PtrTy) 814 return isUsedByLifetimeMarker(AI); 815 816 // Do a scan to find all the casts to i8*. 817 for (User *U : AI->users()) { 818 if (U->getType() != Int8PtrTy) continue; 819 if (U->stripPointerCasts() != AI) continue; 820 if (isUsedByLifetimeMarker(U)) 821 return true; 822 } 823 return false; 824 } 825 826 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to 827 /// recursively update InlinedAtEntry of a DebugLoc. 828 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL, 829 const DebugLoc &InlinedAtDL, 830 LLVMContext &Ctx) { 831 if (MDNode *IA = DL.getInlinedAt(Ctx)) { 832 DebugLoc NewInlinedAtDL 833 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx); 834 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 835 NewInlinedAtDL.getAsMDNode(Ctx)); 836 } 837 838 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 839 InlinedAtDL.getAsMDNode(Ctx)); 840 } 841 842 /// fixupLineNumbers - Update inlined instructions' line numbers to 843 /// to encode location where these instructions are inlined. 844 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 845 Instruction *TheCall) { 846 DebugLoc TheCallDL = TheCall->getDebugLoc(); 847 if (TheCallDL.isUnknown()) 848 return; 849 850 for (; FI != Fn->end(); ++FI) { 851 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 852 BI != BE; ++BI) { 853 DebugLoc DL = BI->getDebugLoc(); 854 if (DL.isUnknown()) { 855 // If the inlined instruction has no line number, make it look as if it 856 // originates from the call location. This is important for 857 // ((__always_inline__, __nodebug__)) functions which must use caller 858 // location for all instructions in their function body. 859 BI->setDebugLoc(TheCallDL); 860 } else { 861 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext())); 862 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) { 863 LLVMContext &Ctx = BI->getContext(); 864 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx); 865 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(), 866 InlinedAt, Ctx)); 867 } 868 } 869 } 870 } 871 } 872 873 /// InlineFunction - This function inlines the called function into the basic 874 /// block of the caller. This returns false if it is not possible to inline 875 /// this call. The program is still in a well defined state if this occurs 876 /// though. 877 /// 878 /// Note that this only does one level of inlining. For example, if the 879 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 880 /// exists in the instruction stream. Similarly this will inline a recursive 881 /// function by one level. 882 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 883 bool InsertLifetime) { 884 Instruction *TheCall = CS.getInstruction(); 885 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 886 "Instruction not in function!"); 887 888 // If IFI has any state in it, zap it before we fill it in. 889 IFI.reset(); 890 891 const Function *CalledFunc = CS.getCalledFunction(); 892 if (!CalledFunc || // Can't inline external function or indirect 893 CalledFunc->isDeclaration() || // call, or call to a vararg function! 894 CalledFunc->getFunctionType()->isVarArg()) return false; 895 896 // If the call to the callee cannot throw, set the 'nounwind' flag on any 897 // calls that we inline. 898 bool MarkNoUnwind = CS.doesNotThrow(); 899 900 BasicBlock *OrigBB = TheCall->getParent(); 901 Function *Caller = OrigBB->getParent(); 902 903 // GC poses two hazards to inlining, which only occur when the callee has GC: 904 // 1. If the caller has no GC, then the callee's GC must be propagated to the 905 // caller. 906 // 2. If the caller has a differing GC, it is invalid to inline. 907 if (CalledFunc->hasGC()) { 908 if (!Caller->hasGC()) 909 Caller->setGC(CalledFunc->getGC()); 910 else if (CalledFunc->getGC() != Caller->getGC()) 911 return false; 912 } 913 914 // Get the personality function from the callee if it contains a landing pad. 915 Value *CalleePersonality = nullptr; 916 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end(); 917 I != E; ++I) 918 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 919 const BasicBlock *BB = II->getUnwindDest(); 920 const LandingPadInst *LP = BB->getLandingPadInst(); 921 CalleePersonality = LP->getPersonalityFn(); 922 break; 923 } 924 925 // Find the personality function used by the landing pads of the caller. If it 926 // exists, then check to see that it matches the personality function used in 927 // the callee. 928 if (CalleePersonality) { 929 for (Function::const_iterator I = Caller->begin(), E = Caller->end(); 930 I != E; ++I) 931 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 932 const BasicBlock *BB = II->getUnwindDest(); 933 const LandingPadInst *LP = BB->getLandingPadInst(); 934 935 // If the personality functions match, then we can perform the 936 // inlining. Otherwise, we can't inline. 937 // TODO: This isn't 100% true. Some personality functions are proper 938 // supersets of others and can be used in place of the other. 939 if (LP->getPersonalityFn() != CalleePersonality) 940 return false; 941 942 break; 943 } 944 } 945 946 // Get an iterator to the last basic block in the function, which will have 947 // the new function inlined after it. 948 Function::iterator LastBlock = &Caller->back(); 949 950 // Make sure to capture all of the return instructions from the cloned 951 // function. 952 SmallVector<ReturnInst*, 8> Returns; 953 ClonedCodeInfo InlinedFunctionInfo; 954 Function::iterator FirstNewBlock; 955 956 { // Scope to destroy VMap after cloning. 957 ValueToValueMapTy VMap; 958 // Keep a list of pair (dst, src) to emit byval initializations. 959 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 960 961 assert(CalledFunc->arg_size() == CS.arg_size() && 962 "No varargs calls can be inlined!"); 963 964 // Calculate the vector of arguments to pass into the function cloner, which 965 // matches up the formal to the actual argument values. 966 CallSite::arg_iterator AI = CS.arg_begin(); 967 unsigned ArgNo = 0; 968 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 969 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 970 Value *ActualArg = *AI; 971 972 // When byval arguments actually inlined, we need to make the copy implied 973 // by them explicit. However, we don't do this if the callee is readonly 974 // or readnone, because the copy would be unneeded: the callee doesn't 975 // modify the struct. 976 if (CS.isByValArgument(ArgNo)) { 977 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 978 CalledFunc->getParamAlignment(ArgNo+1)); 979 if (ActualArg != *AI) 980 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 981 } 982 983 VMap[I] = ActualArg; 984 } 985 986 // Add alignment assumptions if necessary. We do this before the inlined 987 // instructions are actually cloned into the caller so that we can easily 988 // check what will be known at the start of the inlined code. 989 AddAlignmentAssumptions(CS, IFI); 990 991 // We want the inliner to prune the code as it copies. We would LOVE to 992 // have no dead or constant instructions leftover after inlining occurs 993 // (which can happen, e.g., because an argument was constant), but we'll be 994 // happy with whatever the cloner can do. 995 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 996 /*ModuleLevelChanges=*/false, Returns, ".i", 997 &InlinedFunctionInfo, IFI.DL, TheCall); 998 999 // Remember the first block that is newly cloned over. 1000 FirstNewBlock = LastBlock; ++FirstNewBlock; 1001 1002 // Inject byval arguments initialization. 1003 for (std::pair<Value*, Value*> &Init : ByValInit) 1004 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1005 FirstNewBlock, IFI); 1006 1007 // Update the callgraph if requested. 1008 if (IFI.CG) 1009 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1010 1011 // Update inlined instructions' line number information. 1012 fixupLineNumbers(Caller, FirstNewBlock, TheCall); 1013 1014 // Clone existing noalias metadata if necessary. 1015 CloneAliasScopeMetadata(CS, VMap); 1016 1017 // Add noalias metadata if necessary. 1018 AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA); 1019 1020 // FIXME: We could register any cloned assumptions instead of clearing the 1021 // whole function's cache. 1022 if (IFI.AT) 1023 IFI.AT->forgetCachedAssumptions(Caller); 1024 } 1025 1026 // If there are any alloca instructions in the block that used to be the entry 1027 // block for the callee, move them to the entry block of the caller. First 1028 // calculate which instruction they should be inserted before. We insert the 1029 // instructions at the end of the current alloca list. 1030 { 1031 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1032 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1033 E = FirstNewBlock->end(); I != E; ) { 1034 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1035 if (!AI) continue; 1036 1037 // If the alloca is now dead, remove it. This often occurs due to code 1038 // specialization. 1039 if (AI->use_empty()) { 1040 AI->eraseFromParent(); 1041 continue; 1042 } 1043 1044 if (!isa<Constant>(AI->getArraySize())) 1045 continue; 1046 1047 // Keep track of the static allocas that we inline into the caller. 1048 IFI.StaticAllocas.push_back(AI); 1049 1050 // Scan for the block of allocas that we can move over, and move them 1051 // all at once. 1052 while (isa<AllocaInst>(I) && 1053 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 1054 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1055 ++I; 1056 } 1057 1058 // Transfer all of the allocas over in a block. Using splice means 1059 // that the instructions aren't removed from the symbol table, then 1060 // reinserted. 1061 Caller->getEntryBlock().getInstList().splice(InsertPoint, 1062 FirstNewBlock->getInstList(), 1063 AI, I); 1064 } 1065 } 1066 1067 bool InlinedMustTailCalls = false; 1068 if (InlinedFunctionInfo.ContainsCalls) { 1069 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1070 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1071 CallSiteTailKind = CI->getTailCallKind(); 1072 1073 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1074 ++BB) { 1075 for (Instruction &I : *BB) { 1076 CallInst *CI = dyn_cast<CallInst>(&I); 1077 if (!CI) 1078 continue; 1079 1080 // We need to reduce the strength of any inlined tail calls. For 1081 // musttail, we have to avoid introducing potential unbounded stack 1082 // growth. For example, if functions 'f' and 'g' are mutually recursive 1083 // with musttail, we can inline 'g' into 'f' so long as we preserve 1084 // musttail on the cloned call to 'f'. If either the inlined call site 1085 // or the cloned call site is *not* musttail, the program already has 1086 // one frame of stack growth, so it's safe to remove musttail. Here is 1087 // a table of example transformations: 1088 // 1089 // f -> musttail g -> musttail f ==> f -> musttail f 1090 // f -> musttail g -> tail f ==> f -> tail f 1091 // f -> g -> musttail f ==> f -> f 1092 // f -> g -> tail f ==> f -> f 1093 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1094 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1095 CI->setTailCallKind(ChildTCK); 1096 InlinedMustTailCalls |= CI->isMustTailCall(); 1097 1098 // Calls inlined through a 'nounwind' call site should be marked 1099 // 'nounwind'. 1100 if (MarkNoUnwind) 1101 CI->setDoesNotThrow(); 1102 } 1103 } 1104 } 1105 1106 // Leave lifetime markers for the static alloca's, scoping them to the 1107 // function we just inlined. 1108 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1109 IRBuilder<> builder(FirstNewBlock->begin()); 1110 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1111 AllocaInst *AI = IFI.StaticAllocas[ai]; 1112 1113 // If the alloca is already scoped to something smaller than the whole 1114 // function then there's no need to add redundant, less accurate markers. 1115 if (hasLifetimeMarkers(AI)) 1116 continue; 1117 1118 // Try to determine the size of the allocation. 1119 ConstantInt *AllocaSize = nullptr; 1120 if (ConstantInt *AIArraySize = 1121 dyn_cast<ConstantInt>(AI->getArraySize())) { 1122 if (IFI.DL) { 1123 Type *AllocaType = AI->getAllocatedType(); 1124 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType); 1125 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1126 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero"); 1127 // Check that array size doesn't saturate uint64_t and doesn't 1128 // overflow when it's multiplied by type size. 1129 if (AllocaArraySize != ~0ULL && 1130 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 1131 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1132 AllocaArraySize * AllocaTypeSize); 1133 } 1134 } 1135 } 1136 1137 builder.CreateLifetimeStart(AI, AllocaSize); 1138 for (ReturnInst *RI : Returns) { 1139 // Don't insert llvm.lifetime.end calls between a musttail call and a 1140 // return. The return kills all local allocas. 1141 if (InlinedMustTailCalls && 1142 RI->getParent()->getTerminatingMustTailCall()) 1143 continue; 1144 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1145 } 1146 } 1147 } 1148 1149 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1150 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1151 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1152 Module *M = Caller->getParent(); 1153 // Get the two intrinsics we care about. 1154 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1155 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1156 1157 // Insert the llvm.stacksave. 1158 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin()) 1159 .CreateCall(StackSave, "savedstack"); 1160 1161 // Insert a call to llvm.stackrestore before any return instructions in the 1162 // inlined function. 1163 for (ReturnInst *RI : Returns) { 1164 // Don't insert llvm.stackrestore calls between a musttail call and a 1165 // return. The return will restore the stack pointer. 1166 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1167 continue; 1168 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1169 } 1170 } 1171 1172 // If we are inlining for an invoke instruction, we must make sure to rewrite 1173 // any call instructions into invoke instructions. 1174 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 1175 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 1176 1177 // Handle any inlined musttail call sites. In order for a new call site to be 1178 // musttail, the source of the clone and the inlined call site must have been 1179 // musttail. Therefore it's safe to return without merging control into the 1180 // phi below. 1181 if (InlinedMustTailCalls) { 1182 // Check if we need to bitcast the result of any musttail calls. 1183 Type *NewRetTy = Caller->getReturnType(); 1184 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 1185 1186 // Handle the returns preceded by musttail calls separately. 1187 SmallVector<ReturnInst *, 8> NormalReturns; 1188 for (ReturnInst *RI : Returns) { 1189 CallInst *ReturnedMustTail = 1190 RI->getParent()->getTerminatingMustTailCall(); 1191 if (!ReturnedMustTail) { 1192 NormalReturns.push_back(RI); 1193 continue; 1194 } 1195 if (!NeedBitCast) 1196 continue; 1197 1198 // Delete the old return and any preceding bitcast. 1199 BasicBlock *CurBB = RI->getParent(); 1200 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 1201 RI->eraseFromParent(); 1202 if (OldCast) 1203 OldCast->eraseFromParent(); 1204 1205 // Insert a new bitcast and return with the right type. 1206 IRBuilder<> Builder(CurBB); 1207 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 1208 } 1209 1210 // Leave behind the normal returns so we can merge control flow. 1211 std::swap(Returns, NormalReturns); 1212 } 1213 1214 // If we cloned in _exactly one_ basic block, and if that block ends in a 1215 // return instruction, we splice the body of the inlined callee directly into 1216 // the calling basic block. 1217 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 1218 // Move all of the instructions right before the call. 1219 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 1220 FirstNewBlock->begin(), FirstNewBlock->end()); 1221 // Remove the cloned basic block. 1222 Caller->getBasicBlockList().pop_back(); 1223 1224 // If the call site was an invoke instruction, add a branch to the normal 1225 // destination. 1226 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 1227 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 1228 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 1229 } 1230 1231 // If the return instruction returned a value, replace uses of the call with 1232 // uses of the returned value. 1233 if (!TheCall->use_empty()) { 1234 ReturnInst *R = Returns[0]; 1235 if (TheCall == R->getReturnValue()) 1236 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1237 else 1238 TheCall->replaceAllUsesWith(R->getReturnValue()); 1239 } 1240 // Since we are now done with the Call/Invoke, we can delete it. 1241 TheCall->eraseFromParent(); 1242 1243 // Since we are now done with the return instruction, delete it also. 1244 Returns[0]->eraseFromParent(); 1245 1246 // We are now done with the inlining. 1247 return true; 1248 } 1249 1250 // Otherwise, we have the normal case, of more than one block to inline or 1251 // multiple return sites. 1252 1253 // We want to clone the entire callee function into the hole between the 1254 // "starter" and "ender" blocks. How we accomplish this depends on whether 1255 // this is an invoke instruction or a call instruction. 1256 BasicBlock *AfterCallBB; 1257 BranchInst *CreatedBranchToNormalDest = nullptr; 1258 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 1259 1260 // Add an unconditional branch to make this look like the CallInst case... 1261 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 1262 1263 // Split the basic block. This guarantees that no PHI nodes will have to be 1264 // updated due to new incoming edges, and make the invoke case more 1265 // symmetric to the call case. 1266 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest, 1267 CalledFunc->getName()+".exit"); 1268 1269 } else { // It's a call 1270 // If this is a call instruction, we need to split the basic block that 1271 // the call lives in. 1272 // 1273 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 1274 CalledFunc->getName()+".exit"); 1275 } 1276 1277 // Change the branch that used to go to AfterCallBB to branch to the first 1278 // basic block of the inlined function. 1279 // 1280 TerminatorInst *Br = OrigBB->getTerminator(); 1281 assert(Br && Br->getOpcode() == Instruction::Br && 1282 "splitBasicBlock broken!"); 1283 Br->setOperand(0, FirstNewBlock); 1284 1285 1286 // Now that the function is correct, make it a little bit nicer. In 1287 // particular, move the basic blocks inserted from the end of the function 1288 // into the space made by splitting the source basic block. 1289 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 1290 FirstNewBlock, Caller->end()); 1291 1292 // Handle all of the return instructions that we just cloned in, and eliminate 1293 // any users of the original call/invoke instruction. 1294 Type *RTy = CalledFunc->getReturnType(); 1295 1296 PHINode *PHI = nullptr; 1297 if (Returns.size() > 1) { 1298 // The PHI node should go at the front of the new basic block to merge all 1299 // possible incoming values. 1300 if (!TheCall->use_empty()) { 1301 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 1302 AfterCallBB->begin()); 1303 // Anything that used the result of the function call should now use the 1304 // PHI node as their operand. 1305 TheCall->replaceAllUsesWith(PHI); 1306 } 1307 1308 // Loop over all of the return instructions adding entries to the PHI node 1309 // as appropriate. 1310 if (PHI) { 1311 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 1312 ReturnInst *RI = Returns[i]; 1313 assert(RI->getReturnValue()->getType() == PHI->getType() && 1314 "Ret value not consistent in function!"); 1315 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 1316 } 1317 } 1318 1319 1320 // Add a branch to the merge points and remove return instructions. 1321 DebugLoc Loc; 1322 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 1323 ReturnInst *RI = Returns[i]; 1324 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 1325 Loc = RI->getDebugLoc(); 1326 BI->setDebugLoc(Loc); 1327 RI->eraseFromParent(); 1328 } 1329 // We need to set the debug location to *somewhere* inside the 1330 // inlined function. The line number may be nonsensical, but the 1331 // instruction will at least be associated with the right 1332 // function. 1333 if (CreatedBranchToNormalDest) 1334 CreatedBranchToNormalDest->setDebugLoc(Loc); 1335 } else if (!Returns.empty()) { 1336 // Otherwise, if there is exactly one return value, just replace anything 1337 // using the return value of the call with the computed value. 1338 if (!TheCall->use_empty()) { 1339 if (TheCall == Returns[0]->getReturnValue()) 1340 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1341 else 1342 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 1343 } 1344 1345 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 1346 BasicBlock *ReturnBB = Returns[0]->getParent(); 1347 ReturnBB->replaceAllUsesWith(AfterCallBB); 1348 1349 // Splice the code from the return block into the block that it will return 1350 // to, which contains the code that was after the call. 1351 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 1352 ReturnBB->getInstList()); 1353 1354 if (CreatedBranchToNormalDest) 1355 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 1356 1357 // Delete the return instruction now and empty ReturnBB now. 1358 Returns[0]->eraseFromParent(); 1359 ReturnBB->eraseFromParent(); 1360 } else if (!TheCall->use_empty()) { 1361 // No returns, but something is using the return value of the call. Just 1362 // nuke the result. 1363 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1364 } 1365 1366 // Since we are now done with the Call/Invoke, we can delete it. 1367 TheCall->eraseFromParent(); 1368 1369 // If we inlined any musttail calls and the original return is now 1370 // unreachable, delete it. It can only contain a bitcast and ret. 1371 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 1372 AfterCallBB->eraseFromParent(); 1373 1374 // We should always be able to fold the entry block of the function into the 1375 // single predecessor of the block... 1376 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 1377 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 1378 1379 // Splice the code entry block into calling block, right before the 1380 // unconditional branch. 1381 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 1382 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 1383 1384 // Remove the unconditional branch. 1385 OrigBB->getInstList().erase(Br); 1386 1387 // Now we can remove the CalleeEntry block, which is now empty. 1388 Caller->getBasicBlockList().erase(CalleeEntry); 1389 1390 // If we inserted a phi node, check to see if it has a single value (e.g. all 1391 // the entries are the same or undef). If so, remove the PHI so it doesn't 1392 // block other optimizations. 1393 if (PHI) { 1394 if (Value *V = SimplifyInstruction(PHI, IFI.DL, nullptr, nullptr, IFI.AT)) { 1395 PHI->replaceAllUsesWith(V); 1396 PHI->eraseFromParent(); 1397 } 1398 } 1399 1400 return true; 1401 } 1402