1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionTracker.h" 22 #include "llvm/Analysis/CallGraph.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/ValueTracking.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/CallSite.h" 28 #include "llvm/IR/CFG.h" 29 #include "llvm/IR/Constants.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/DebugInfo.h" 32 #include "llvm/IR/DerivedTypes.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/IRBuilder.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/MDBuilder.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 #include "llvm/Support/CommandLine.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 static cl::opt<bool> 46 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 47 cl::Hidden, 48 cl::desc("Convert noalias attributes to metadata during inlining.")); 49 50 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 51 bool InsertLifetime) { 52 return InlineFunction(CallSite(CI), IFI, InsertLifetime); 53 } 54 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 55 bool InsertLifetime) { 56 return InlineFunction(CallSite(II), IFI, InsertLifetime); 57 } 58 59 namespace { 60 /// A class for recording information about inlining through an invoke. 61 class InvokeInliningInfo { 62 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 63 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 64 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 65 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 66 SmallVector<Value*, 8> UnwindDestPHIValues; 67 68 public: 69 InvokeInliningInfo(InvokeInst *II) 70 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 71 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 72 // If there are PHI nodes in the unwind destination block, we need to keep 73 // track of which values came into them from the invoke before removing 74 // the edge from this block. 75 llvm::BasicBlock *InvokeBB = II->getParent(); 76 BasicBlock::iterator I = OuterResumeDest->begin(); 77 for (; isa<PHINode>(I); ++I) { 78 // Save the value to use for this edge. 79 PHINode *PHI = cast<PHINode>(I); 80 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 81 } 82 83 CallerLPad = cast<LandingPadInst>(I); 84 } 85 86 /// getOuterResumeDest - The outer unwind destination is the target of 87 /// unwind edges introduced for calls within the inlined function. 88 BasicBlock *getOuterResumeDest() const { 89 return OuterResumeDest; 90 } 91 92 BasicBlock *getInnerResumeDest(); 93 94 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 95 96 /// forwardResume - Forward the 'resume' instruction to the caller's landing 97 /// pad block. When the landing pad block has only one predecessor, this is 98 /// a simple branch. When there is more than one predecessor, we need to 99 /// split the landing pad block after the landingpad instruction and jump 100 /// to there. 101 void forwardResume(ResumeInst *RI, 102 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 103 104 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind 105 /// destination block for the given basic block, using the values for the 106 /// original invoke's source block. 107 void addIncomingPHIValuesFor(BasicBlock *BB) const { 108 addIncomingPHIValuesForInto(BB, OuterResumeDest); 109 } 110 111 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 112 BasicBlock::iterator I = dest->begin(); 113 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 114 PHINode *phi = cast<PHINode>(I); 115 phi->addIncoming(UnwindDestPHIValues[i], src); 116 } 117 } 118 }; 119 } 120 121 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts. 122 BasicBlock *InvokeInliningInfo::getInnerResumeDest() { 123 if (InnerResumeDest) return InnerResumeDest; 124 125 // Split the landing pad. 126 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint; 127 InnerResumeDest = 128 OuterResumeDest->splitBasicBlock(SplitPoint, 129 OuterResumeDest->getName() + ".body"); 130 131 // The number of incoming edges we expect to the inner landing pad. 132 const unsigned PHICapacity = 2; 133 134 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 135 BasicBlock::iterator InsertPoint = InnerResumeDest->begin(); 136 BasicBlock::iterator I = OuterResumeDest->begin(); 137 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 138 PHINode *OuterPHI = cast<PHINode>(I); 139 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 140 OuterPHI->getName() + ".lpad-body", 141 InsertPoint); 142 OuterPHI->replaceAllUsesWith(InnerPHI); 143 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 144 } 145 146 // Create a PHI for the exception values. 147 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 148 "eh.lpad-body", InsertPoint); 149 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 150 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 151 152 // All done. 153 return InnerResumeDest; 154 } 155 156 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad 157 /// block. When the landing pad block has only one predecessor, this is a simple 158 /// branch. When there is more than one predecessor, we need to split the 159 /// landing pad block after the landingpad instruction and jump to there. 160 void InvokeInliningInfo::forwardResume(ResumeInst *RI, 161 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) { 162 BasicBlock *Dest = getInnerResumeDest(); 163 BasicBlock *Src = RI->getParent(); 164 165 BranchInst::Create(Dest, Src); 166 167 // Update the PHIs in the destination. They were inserted in an order which 168 // makes this work. 169 addIncomingPHIValuesForInto(Src, Dest); 170 171 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 172 RI->eraseFromParent(); 173 } 174 175 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into 176 /// an invoke, we have to turn all of the calls that can throw into 177 /// invokes. This function analyze BB to see if there are any calls, and if so, 178 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 179 /// nodes in that block with the values specified in InvokeDestPHIValues. 180 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, 181 InvokeInliningInfo &Invoke) { 182 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 183 Instruction *I = BBI++; 184 185 // We only need to check for function calls: inlined invoke 186 // instructions require no special handling. 187 CallInst *CI = dyn_cast<CallInst>(I); 188 189 // If this call cannot unwind, don't convert it to an invoke. 190 // Inline asm calls cannot throw. 191 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 192 continue; 193 194 // Convert this function call into an invoke instruction. First, split the 195 // basic block. 196 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc"); 197 198 // Delete the unconditional branch inserted by splitBasicBlock 199 BB->getInstList().pop_back(); 200 201 // Create the new invoke instruction. 202 ImmutableCallSite CS(CI); 203 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end()); 204 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split, 205 Invoke.getOuterResumeDest(), 206 InvokeArgs, CI->getName(), BB); 207 II->setDebugLoc(CI->getDebugLoc()); 208 II->setCallingConv(CI->getCallingConv()); 209 II->setAttributes(CI->getAttributes()); 210 211 // Make sure that anything using the call now uses the invoke! This also 212 // updates the CallGraph if present, because it uses a WeakVH. 213 CI->replaceAllUsesWith(II); 214 215 // Delete the original call 216 Split->getInstList().pop_front(); 217 218 // Update any PHI nodes in the exceptional block to indicate that there is 219 // now a new entry in them. 220 Invoke.addIncomingPHIValuesFor(BB); 221 return; 222 } 223 } 224 225 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls 226 /// in the body of the inlined function into invokes. 227 /// 228 /// II is the invoke instruction being inlined. FirstNewBlock is the first 229 /// block of the inlined code (the last block is the end of the function), 230 /// and InlineCodeInfo is information about the code that got inlined. 231 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock, 232 ClonedCodeInfo &InlinedCodeInfo) { 233 BasicBlock *InvokeDest = II->getUnwindDest(); 234 235 Function *Caller = FirstNewBlock->getParent(); 236 237 // The inlined code is currently at the end of the function, scan from the 238 // start of the inlined code to its end, checking for stuff we need to 239 // rewrite. 240 InvokeInliningInfo Invoke(II); 241 242 // Get all of the inlined landing pad instructions. 243 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 244 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I) 245 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 246 InlinedLPads.insert(II->getLandingPadInst()); 247 248 // Append the clauses from the outer landing pad instruction into the inlined 249 // landing pad instructions. 250 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 251 for (LandingPadInst *InlinedLPad : InlinedLPads) { 252 unsigned OuterNum = OuterLPad->getNumClauses(); 253 InlinedLPad->reserveClauses(OuterNum); 254 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 255 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 256 if (OuterLPad->isCleanup()) 257 InlinedLPad->setCleanup(true); 258 } 259 260 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){ 261 if (InlinedCodeInfo.ContainsCalls) 262 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke); 263 264 // Forward any resumes that are remaining here. 265 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 266 Invoke.forwardResume(RI, InlinedLPads); 267 } 268 269 // Now that everything is happy, we have one final detail. The PHI nodes in 270 // the exception destination block still have entries due to the original 271 // invoke instruction. Eliminate these entries (which might even delete the 272 // PHI node) now. 273 InvokeDest->removePredecessor(II->getParent()); 274 } 275 276 /// CloneAliasScopeMetadata - When inlining a function that contains noalias 277 /// scope metadata, this metadata needs to be cloned so that the inlined blocks 278 /// have different "unqiue scopes" at every call site. Were this not done, then 279 /// aliasing scopes from a function inlined into a caller multiple times could 280 /// not be differentiated (and this would lead to miscompiles because the 281 /// non-aliasing property communicated by the metadata could have 282 /// call-site-specific control dependencies). 283 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 284 const Function *CalledFunc = CS.getCalledFunction(); 285 SetVector<const MDNode *> MD; 286 287 // Note: We could only clone the metadata if it is already used in the 288 // caller. I'm omitting that check here because it might confuse 289 // inter-procedural alias analysis passes. We can revisit this if it becomes 290 // an efficiency or overhead problem. 291 292 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end(); 293 I != IE; ++I) 294 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) { 295 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope)) 296 MD.insert(M); 297 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias)) 298 MD.insert(M); 299 } 300 301 if (MD.empty()) 302 return; 303 304 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 305 // the set. 306 SmallVector<const Value *, 16> Queue(MD.begin(), MD.end()); 307 while (!Queue.empty()) { 308 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 309 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 310 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 311 if (MD.insert(M1)) 312 Queue.push_back(M1); 313 } 314 315 // Now we have a complete set of all metadata in the chains used to specify 316 // the noalias scopes and the lists of those scopes. 317 SmallVector<MDNode *, 16> DummyNodes; 318 DenseMap<const MDNode *, TrackingVH<MDNode> > MDMap; 319 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end(); 320 I != IE; ++I) { 321 MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(), None); 322 DummyNodes.push_back(Dummy); 323 MDMap[*I] = Dummy; 324 } 325 326 // Create new metadata nodes to replace the dummy nodes, replacing old 327 // metadata references with either a dummy node or an already-created new 328 // node. 329 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end(); 330 I != IE; ++I) { 331 SmallVector<Value *, 4> NewOps; 332 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) { 333 const Value *V = (*I)->getOperand(i); 334 if (const MDNode *M = dyn_cast<MDNode>(V)) 335 NewOps.push_back(MDMap[M]); 336 else 337 NewOps.push_back(const_cast<Value *>(V)); 338 } 339 340 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps), 341 *TempM = MDMap[*I]; 342 343 TempM->replaceAllUsesWith(NewM); 344 } 345 346 // Now replace the metadata in the new inlined instructions with the 347 // repacements from the map. 348 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 349 VMI != VMIE; ++VMI) { 350 if (!VMI->second) 351 continue; 352 353 Instruction *NI = dyn_cast<Instruction>(VMI->second); 354 if (!NI) 355 continue; 356 357 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 358 MDNode *NewMD = MDMap[M]; 359 // If the call site also had alias scope metadata (a list of scopes to 360 // which instructions inside it might belong), propagate those scopes to 361 // the inlined instructions. 362 if (MDNode *CSM = 363 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 364 NewMD = MDNode::concatenate(NewMD, CSM); 365 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 366 } else if (NI->mayReadOrWriteMemory()) { 367 if (MDNode *M = 368 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 369 NI->setMetadata(LLVMContext::MD_alias_scope, M); 370 } 371 372 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 373 MDNode *NewMD = MDMap[M]; 374 // If the call site also had noalias metadata (a list of scopes with 375 // which instructions inside it don't alias), propagate those scopes to 376 // the inlined instructions. 377 if (MDNode *CSM = 378 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 379 NewMD = MDNode::concatenate(NewMD, CSM); 380 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 381 } else if (NI->mayReadOrWriteMemory()) { 382 if (MDNode *M = 383 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 384 NI->setMetadata(LLVMContext::MD_noalias, M); 385 } 386 } 387 388 // Now that everything has been replaced, delete the dummy nodes. 389 for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i) 390 MDNode::deleteTemporary(DummyNodes[i]); 391 } 392 393 /// AddAliasScopeMetadata - If the inlined function has noalias arguments, then 394 /// add new alias scopes for each noalias argument, tag the mapped noalias 395 /// parameters with noalias metadata specifying the new scope, and tag all 396 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 397 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 398 const DataLayout *DL, AliasAnalysis *AA) { 399 if (!EnableNoAliasConversion) 400 return; 401 402 const Function *CalledFunc = CS.getCalledFunction(); 403 SmallVector<const Argument *, 4> NoAliasArgs; 404 405 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 406 E = CalledFunc->arg_end(); I != E; ++I) { 407 if (I->hasNoAliasAttr() && !I->hasNUses(0)) 408 NoAliasArgs.push_back(I); 409 } 410 411 if (NoAliasArgs.empty()) 412 return; 413 414 // To do a good job, if a noalias variable is captured, we need to know if 415 // the capture point dominates the particular use we're considering. 416 DominatorTree DT; 417 DT.recalculate(const_cast<Function&>(*CalledFunc)); 418 419 // noalias indicates that pointer values based on the argument do not alias 420 // pointer values which are not based on it. So we add a new "scope" for each 421 // noalias function argument. Accesses using pointers based on that argument 422 // become part of that alias scope, accesses using pointers not based on that 423 // argument are tagged as noalias with that scope. 424 425 DenseMap<const Argument *, MDNode *> NewScopes; 426 MDBuilder MDB(CalledFunc->getContext()); 427 428 // Create a new scope domain for this function. 429 MDNode *NewDomain = 430 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 431 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 432 const Argument *A = NoAliasArgs[i]; 433 434 std::string Name = CalledFunc->getName(); 435 if (A->hasName()) { 436 Name += ": %"; 437 Name += A->getName(); 438 } else { 439 Name += ": argument "; 440 Name += utostr(i); 441 } 442 443 // Note: We always create a new anonymous root here. This is true regardless 444 // of the linkage of the callee because the aliasing "scope" is not just a 445 // property of the callee, but also all control dependencies in the caller. 446 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 447 NewScopes.insert(std::make_pair(A, NewScope)); 448 } 449 450 // Iterate over all new instructions in the map; for all memory-access 451 // instructions, add the alias scope metadata. 452 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 453 VMI != VMIE; ++VMI) { 454 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 455 if (!VMI->second) 456 continue; 457 458 Instruction *NI = dyn_cast<Instruction>(VMI->second); 459 if (!NI) 460 continue; 461 462 bool IsArgMemOnlyCall = false, IsFuncCall = false; 463 SmallVector<const Value *, 2> PtrArgs; 464 465 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 466 PtrArgs.push_back(LI->getPointerOperand()); 467 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 468 PtrArgs.push_back(SI->getPointerOperand()); 469 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 470 PtrArgs.push_back(VAAI->getPointerOperand()); 471 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 472 PtrArgs.push_back(CXI->getPointerOperand()); 473 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 474 PtrArgs.push_back(RMWI->getPointerOperand()); 475 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 476 // If we know that the call does not access memory, then we'll still 477 // know that about the inlined clone of this call site, and we don't 478 // need to add metadata. 479 if (ICS.doesNotAccessMemory()) 480 continue; 481 482 IsFuncCall = true; 483 if (AA) { 484 AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(ICS); 485 if (MRB == AliasAnalysis::OnlyAccessesArgumentPointees || 486 MRB == AliasAnalysis::OnlyReadsArgumentPointees) 487 IsArgMemOnlyCall = true; 488 } 489 490 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(), 491 AE = ICS.arg_end(); AI != AE; ++AI) { 492 // We need to check the underlying objects of all arguments, not just 493 // the pointer arguments, because we might be passing pointers as 494 // integers, etc. 495 // However, if we know that the call only accesses pointer arguments, 496 // then we only need to check the pointer arguments. 497 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy()) 498 continue; 499 500 PtrArgs.push_back(*AI); 501 } 502 } 503 504 // If we found no pointers, then this instruction is not suitable for 505 // pairing with an instruction to receive aliasing metadata. 506 // However, if this is a call, this we might just alias with none of the 507 // noalias arguments. 508 if (PtrArgs.empty() && !IsFuncCall) 509 continue; 510 511 // It is possible that there is only one underlying object, but you 512 // need to go through several PHIs to see it, and thus could be 513 // repeated in the Objects list. 514 SmallPtrSet<const Value *, 4> ObjSet; 515 SmallVector<Value *, 4> Scopes, NoAliases; 516 517 SmallSetVector<const Argument *, 4> NAPtrArgs; 518 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) { 519 SmallVector<Value *, 4> Objects; 520 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]), 521 Objects, DL, /* MaxLookup = */ 0); 522 523 for (Value *O : Objects) 524 ObjSet.insert(O); 525 } 526 527 // Figure out if we're derived from anything that is not a noalias 528 // argument. 529 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 530 for (const Value *V : ObjSet) { 531 // Is this value a constant that cannot be derived from any pointer 532 // value (we need to exclude constant expressions, for example, that 533 // are formed from arithmetic on global symbols). 534 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 535 isa<ConstantPointerNull>(V) || 536 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 537 if (IsNonPtrConst) 538 continue; 539 540 // If this is anything other than a noalias argument, then we cannot 541 // completely describe the aliasing properties using alias.scope 542 // metadata (and, thus, won't add any). 543 if (const Argument *A = dyn_cast<Argument>(V)) { 544 if (!A->hasNoAliasAttr()) 545 UsesAliasingPtr = true; 546 } else { 547 UsesAliasingPtr = true; 548 } 549 550 // If this is not some identified function-local object (which cannot 551 // directly alias a noalias argument), or some other argument (which, 552 // by definition, also cannot alias a noalias argument), then we could 553 // alias a noalias argument that has been captured). 554 if (!isa<Argument>(V) && 555 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 556 CanDeriveViaCapture = true; 557 } 558 559 // A function call can always get captured noalias pointers (via other 560 // parameters, globals, etc.). 561 if (IsFuncCall && !IsArgMemOnlyCall) 562 CanDeriveViaCapture = true; 563 564 // First, we want to figure out all of the sets with which we definitely 565 // don't alias. Iterate over all noalias set, and add those for which: 566 // 1. The noalias argument is not in the set of objects from which we 567 // definitely derive. 568 // 2. The noalias argument has not yet been captured. 569 // An arbitrary function that might load pointers could see captured 570 // noalias arguments via other noalias arguments or globals, and so we 571 // must always check for prior capture. 572 for (const Argument *A : NoAliasArgs) { 573 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 574 // It might be tempting to skip the 575 // PointerMayBeCapturedBefore check if 576 // A->hasNoCaptureAttr() is true, but this is 577 // incorrect because nocapture only guarantees 578 // that no copies outlive the function, not 579 // that the value cannot be locally captured. 580 !PointerMayBeCapturedBefore(A, 581 /* ReturnCaptures */ false, 582 /* StoreCaptures */ false, I, &DT))) 583 NoAliases.push_back(NewScopes[A]); 584 } 585 586 if (!NoAliases.empty()) 587 NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate( 588 NI->getMetadata(LLVMContext::MD_noalias), 589 MDNode::get(CalledFunc->getContext(), NoAliases))); 590 591 // Next, we want to figure out all of the sets to which we might belong. 592 // We might belong to a set if the noalias argument is in the set of 593 // underlying objects. If there is some non-noalias argument in our list 594 // of underlying objects, then we cannot add a scope because the fact 595 // that some access does not alias with any set of our noalias arguments 596 // cannot itself guarantee that it does not alias with this access 597 // (because there is some pointer of unknown origin involved and the 598 // other access might also depend on this pointer). We also cannot add 599 // scopes to arbitrary functions unless we know they don't access any 600 // non-parameter pointer-values. 601 bool CanAddScopes = !UsesAliasingPtr; 602 if (CanAddScopes && IsFuncCall) 603 CanAddScopes = IsArgMemOnlyCall; 604 605 if (CanAddScopes) 606 for (const Argument *A : NoAliasArgs) { 607 if (ObjSet.count(A)) 608 Scopes.push_back(NewScopes[A]); 609 } 610 611 if (!Scopes.empty()) 612 NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate( 613 NI->getMetadata(LLVMContext::MD_alias_scope), 614 MDNode::get(CalledFunc->getContext(), Scopes))); 615 } 616 } 617 } 618 619 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee 620 /// into the caller, update the specified callgraph to reflect the changes we 621 /// made. Note that it's possible that not all code was copied over, so only 622 /// some edges of the callgraph may remain. 623 static void UpdateCallGraphAfterInlining(CallSite CS, 624 Function::iterator FirstNewBlock, 625 ValueToValueMapTy &VMap, 626 InlineFunctionInfo &IFI) { 627 CallGraph &CG = *IFI.CG; 628 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 629 const Function *Callee = CS.getCalledFunction(); 630 CallGraphNode *CalleeNode = CG[Callee]; 631 CallGraphNode *CallerNode = CG[Caller]; 632 633 // Since we inlined some uninlined call sites in the callee into the caller, 634 // add edges from the caller to all of the callees of the callee. 635 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 636 637 // Consider the case where CalleeNode == CallerNode. 638 CallGraphNode::CalledFunctionsVector CallCache; 639 if (CalleeNode == CallerNode) { 640 CallCache.assign(I, E); 641 I = CallCache.begin(); 642 E = CallCache.end(); 643 } 644 645 for (; I != E; ++I) { 646 const Value *OrigCall = I->first; 647 648 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 649 // Only copy the edge if the call was inlined! 650 if (VMI == VMap.end() || VMI->second == nullptr) 651 continue; 652 653 // If the call was inlined, but then constant folded, there is no edge to 654 // add. Check for this case. 655 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 656 if (!NewCall) continue; 657 658 // Remember that this call site got inlined for the client of 659 // InlineFunction. 660 IFI.InlinedCalls.push_back(NewCall); 661 662 // It's possible that inlining the callsite will cause it to go from an 663 // indirect to a direct call by resolving a function pointer. If this 664 // happens, set the callee of the new call site to a more precise 665 // destination. This can also happen if the call graph node of the caller 666 // was just unnecessarily imprecise. 667 if (!I->second->getFunction()) 668 if (Function *F = CallSite(NewCall).getCalledFunction()) { 669 // Indirect call site resolved to direct call. 670 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 671 672 continue; 673 } 674 675 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 676 } 677 678 // Update the call graph by deleting the edge from Callee to Caller. We must 679 // do this after the loop above in case Caller and Callee are the same. 680 CallerNode->removeCallEdgeFor(CS); 681 } 682 683 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 684 BasicBlock *InsertBlock, 685 InlineFunctionInfo &IFI) { 686 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 687 IRBuilder<> Builder(InsertBlock->begin()); 688 689 Value *Size; 690 if (IFI.DL == nullptr) 691 Size = ConstantExpr::getSizeOf(AggTy); 692 else 693 Size = Builder.getInt64(IFI.DL->getTypeStoreSize(AggTy)); 694 695 // Always generate a memcpy of alignment 1 here because we don't know 696 // the alignment of the src pointer. Other optimizations can infer 697 // better alignment. 698 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 699 } 700 701 /// HandleByValArgument - When inlining a call site that has a byval argument, 702 /// we have to make the implicit memcpy explicit by adding it. 703 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 704 const Function *CalledFunc, 705 InlineFunctionInfo &IFI, 706 unsigned ByValAlignment) { 707 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 708 Type *AggTy = ArgTy->getElementType(); 709 710 // If the called function is readonly, then it could not mutate the caller's 711 // copy of the byval'd memory. In this case, it is safe to elide the copy and 712 // temporary. 713 if (CalledFunc->onlyReadsMemory()) { 714 // If the byval argument has a specified alignment that is greater than the 715 // passed in pointer, then we either have to round up the input pointer or 716 // give up on this transformation. 717 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 718 return Arg; 719 720 // If the pointer is already known to be sufficiently aligned, or if we can 721 // round it up to a larger alignment, then we don't need a temporary. 722 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, 723 IFI.DL, IFI.AT, TheCall) >= ByValAlignment) 724 return Arg; 725 726 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 727 // for code quality, but rarely happens and is required for correctness. 728 } 729 730 // Create the alloca. If we have DataLayout, use nice alignment. 731 unsigned Align = 1; 732 if (IFI.DL) 733 Align = IFI.DL->getPrefTypeAlignment(AggTy); 734 735 // If the byval had an alignment specified, we *must* use at least that 736 // alignment, as it is required by the byval argument (and uses of the 737 // pointer inside the callee). 738 Align = std::max(Align, ByValAlignment); 739 740 Function *Caller = TheCall->getParent()->getParent(); 741 742 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 743 &*Caller->begin()->begin()); 744 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 745 746 // Uses of the argument in the function should use our new alloca 747 // instead. 748 return NewAlloca; 749 } 750 751 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime 752 // intrinsic. 753 static bool isUsedByLifetimeMarker(Value *V) { 754 for (User *U : V->users()) { 755 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 756 switch (II->getIntrinsicID()) { 757 default: break; 758 case Intrinsic::lifetime_start: 759 case Intrinsic::lifetime_end: 760 return true; 761 } 762 } 763 } 764 return false; 765 } 766 767 // hasLifetimeMarkers - Check whether the given alloca already has 768 // lifetime.start or lifetime.end intrinsics. 769 static bool hasLifetimeMarkers(AllocaInst *AI) { 770 Type *Ty = AI->getType(); 771 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 772 Ty->getPointerAddressSpace()); 773 if (Ty == Int8PtrTy) 774 return isUsedByLifetimeMarker(AI); 775 776 // Do a scan to find all the casts to i8*. 777 for (User *U : AI->users()) { 778 if (U->getType() != Int8PtrTy) continue; 779 if (U->stripPointerCasts() != AI) continue; 780 if (isUsedByLifetimeMarker(U)) 781 return true; 782 } 783 return false; 784 } 785 786 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to 787 /// recursively update InlinedAtEntry of a DebugLoc. 788 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL, 789 const DebugLoc &InlinedAtDL, 790 LLVMContext &Ctx) { 791 if (MDNode *IA = DL.getInlinedAt(Ctx)) { 792 DebugLoc NewInlinedAtDL 793 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx); 794 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 795 NewInlinedAtDL.getAsMDNode(Ctx)); 796 } 797 798 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx), 799 InlinedAtDL.getAsMDNode(Ctx)); 800 } 801 802 /// fixupLineNumbers - Update inlined instructions' line numbers to 803 /// to encode location where these instructions are inlined. 804 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 805 Instruction *TheCall) { 806 DebugLoc TheCallDL = TheCall->getDebugLoc(); 807 if (TheCallDL.isUnknown()) 808 return; 809 810 for (; FI != Fn->end(); ++FI) { 811 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 812 BI != BE; ++BI) { 813 DebugLoc DL = BI->getDebugLoc(); 814 if (DL.isUnknown()) { 815 // If the inlined instruction has no line number, make it look as if it 816 // originates from the call location. This is important for 817 // ((__always_inline__, __nodebug__)) functions which must use caller 818 // location for all instructions in their function body. 819 BI->setDebugLoc(TheCallDL); 820 } else { 821 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext())); 822 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) { 823 LLVMContext &Ctx = BI->getContext(); 824 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx); 825 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(), 826 InlinedAt, Ctx)); 827 } 828 } 829 } 830 } 831 } 832 833 /// InlineFunction - This function inlines the called function into the basic 834 /// block of the caller. This returns false if it is not possible to inline 835 /// this call. The program is still in a well defined state if this occurs 836 /// though. 837 /// 838 /// Note that this only does one level of inlining. For example, if the 839 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 840 /// exists in the instruction stream. Similarly this will inline a recursive 841 /// function by one level. 842 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 843 bool InsertLifetime) { 844 Instruction *TheCall = CS.getInstruction(); 845 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 846 "Instruction not in function!"); 847 848 // If IFI has any state in it, zap it before we fill it in. 849 IFI.reset(); 850 851 const Function *CalledFunc = CS.getCalledFunction(); 852 if (!CalledFunc || // Can't inline external function or indirect 853 CalledFunc->isDeclaration() || // call, or call to a vararg function! 854 CalledFunc->getFunctionType()->isVarArg()) return false; 855 856 // If the call to the callee cannot throw, set the 'nounwind' flag on any 857 // calls that we inline. 858 bool MarkNoUnwind = CS.doesNotThrow(); 859 860 BasicBlock *OrigBB = TheCall->getParent(); 861 Function *Caller = OrigBB->getParent(); 862 863 // GC poses two hazards to inlining, which only occur when the callee has GC: 864 // 1. If the caller has no GC, then the callee's GC must be propagated to the 865 // caller. 866 // 2. If the caller has a differing GC, it is invalid to inline. 867 if (CalledFunc->hasGC()) { 868 if (!Caller->hasGC()) 869 Caller->setGC(CalledFunc->getGC()); 870 else if (CalledFunc->getGC() != Caller->getGC()) 871 return false; 872 } 873 874 // Get the personality function from the callee if it contains a landing pad. 875 Value *CalleePersonality = nullptr; 876 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end(); 877 I != E; ++I) 878 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 879 const BasicBlock *BB = II->getUnwindDest(); 880 const LandingPadInst *LP = BB->getLandingPadInst(); 881 CalleePersonality = LP->getPersonalityFn(); 882 break; 883 } 884 885 // Find the personality function used by the landing pads of the caller. If it 886 // exists, then check to see that it matches the personality function used in 887 // the callee. 888 if (CalleePersonality) { 889 for (Function::const_iterator I = Caller->begin(), E = Caller->end(); 890 I != E; ++I) 891 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) { 892 const BasicBlock *BB = II->getUnwindDest(); 893 const LandingPadInst *LP = BB->getLandingPadInst(); 894 895 // If the personality functions match, then we can perform the 896 // inlining. Otherwise, we can't inline. 897 // TODO: This isn't 100% true. Some personality functions are proper 898 // supersets of others and can be used in place of the other. 899 if (LP->getPersonalityFn() != CalleePersonality) 900 return false; 901 902 break; 903 } 904 } 905 906 // Get an iterator to the last basic block in the function, which will have 907 // the new function inlined after it. 908 Function::iterator LastBlock = &Caller->back(); 909 910 // Make sure to capture all of the return instructions from the cloned 911 // function. 912 SmallVector<ReturnInst*, 8> Returns; 913 ClonedCodeInfo InlinedFunctionInfo; 914 Function::iterator FirstNewBlock; 915 916 { // Scope to destroy VMap after cloning. 917 ValueToValueMapTy VMap; 918 // Keep a list of pair (dst, src) to emit byval initializations. 919 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 920 921 assert(CalledFunc->arg_size() == CS.arg_size() && 922 "No varargs calls can be inlined!"); 923 924 // Calculate the vector of arguments to pass into the function cloner, which 925 // matches up the formal to the actual argument values. 926 CallSite::arg_iterator AI = CS.arg_begin(); 927 unsigned ArgNo = 0; 928 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 929 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 930 Value *ActualArg = *AI; 931 932 // When byval arguments actually inlined, we need to make the copy implied 933 // by them explicit. However, we don't do this if the callee is readonly 934 // or readnone, because the copy would be unneeded: the callee doesn't 935 // modify the struct. 936 if (CS.isByValArgument(ArgNo)) { 937 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 938 CalledFunc->getParamAlignment(ArgNo+1)); 939 if (ActualArg != *AI) 940 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 941 } 942 943 VMap[I] = ActualArg; 944 } 945 946 // We want the inliner to prune the code as it copies. We would LOVE to 947 // have no dead or constant instructions leftover after inlining occurs 948 // (which can happen, e.g., because an argument was constant), but we'll be 949 // happy with whatever the cloner can do. 950 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 951 /*ModuleLevelChanges=*/false, Returns, ".i", 952 &InlinedFunctionInfo, IFI.DL, TheCall); 953 954 // Remember the first block that is newly cloned over. 955 FirstNewBlock = LastBlock; ++FirstNewBlock; 956 957 // Inject byval arguments initialization. 958 for (std::pair<Value*, Value*> &Init : ByValInit) 959 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 960 FirstNewBlock, IFI); 961 962 // Update the callgraph if requested. 963 if (IFI.CG) 964 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 965 966 // Update inlined instructions' line number information. 967 fixupLineNumbers(Caller, FirstNewBlock, TheCall); 968 969 // Clone existing noalias metadata if necessary. 970 CloneAliasScopeMetadata(CS, VMap); 971 972 // Add noalias metadata if necessary. 973 AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA); 974 975 // FIXME: We could register any cloned assumptions instead of clearing the 976 // whole function's cache. 977 if (IFI.AT) 978 IFI.AT->forgetCachedAssumptions(Caller); 979 } 980 981 // If there are any alloca instructions in the block that used to be the entry 982 // block for the callee, move them to the entry block of the caller. First 983 // calculate which instruction they should be inserted before. We insert the 984 // instructions at the end of the current alloca list. 985 { 986 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 987 for (BasicBlock::iterator I = FirstNewBlock->begin(), 988 E = FirstNewBlock->end(); I != E; ) { 989 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 990 if (!AI) continue; 991 992 // If the alloca is now dead, remove it. This often occurs due to code 993 // specialization. 994 if (AI->use_empty()) { 995 AI->eraseFromParent(); 996 continue; 997 } 998 999 if (!isa<Constant>(AI->getArraySize())) 1000 continue; 1001 1002 // Keep track of the static allocas that we inline into the caller. 1003 IFI.StaticAllocas.push_back(AI); 1004 1005 // Scan for the block of allocas that we can move over, and move them 1006 // all at once. 1007 while (isa<AllocaInst>(I) && 1008 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 1009 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1010 ++I; 1011 } 1012 1013 // Transfer all of the allocas over in a block. Using splice means 1014 // that the instructions aren't removed from the symbol table, then 1015 // reinserted. 1016 Caller->getEntryBlock().getInstList().splice(InsertPoint, 1017 FirstNewBlock->getInstList(), 1018 AI, I); 1019 } 1020 } 1021 1022 bool InlinedMustTailCalls = false; 1023 if (InlinedFunctionInfo.ContainsCalls) { 1024 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1025 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1026 CallSiteTailKind = CI->getTailCallKind(); 1027 1028 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1029 ++BB) { 1030 for (Instruction &I : *BB) { 1031 CallInst *CI = dyn_cast<CallInst>(&I); 1032 if (!CI) 1033 continue; 1034 1035 // We need to reduce the strength of any inlined tail calls. For 1036 // musttail, we have to avoid introducing potential unbounded stack 1037 // growth. For example, if functions 'f' and 'g' are mutually recursive 1038 // with musttail, we can inline 'g' into 'f' so long as we preserve 1039 // musttail on the cloned call to 'f'. If either the inlined call site 1040 // or the cloned call site is *not* musttail, the program already has 1041 // one frame of stack growth, so it's safe to remove musttail. Here is 1042 // a table of example transformations: 1043 // 1044 // f -> musttail g -> musttail f ==> f -> musttail f 1045 // f -> musttail g -> tail f ==> f -> tail f 1046 // f -> g -> musttail f ==> f -> f 1047 // f -> g -> tail f ==> f -> f 1048 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1049 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1050 CI->setTailCallKind(ChildTCK); 1051 InlinedMustTailCalls |= CI->isMustTailCall(); 1052 1053 // Calls inlined through a 'nounwind' call site should be marked 1054 // 'nounwind'. 1055 if (MarkNoUnwind) 1056 CI->setDoesNotThrow(); 1057 } 1058 } 1059 } 1060 1061 // Leave lifetime markers for the static alloca's, scoping them to the 1062 // function we just inlined. 1063 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1064 IRBuilder<> builder(FirstNewBlock->begin()); 1065 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1066 AllocaInst *AI = IFI.StaticAllocas[ai]; 1067 1068 // If the alloca is already scoped to something smaller than the whole 1069 // function then there's no need to add redundant, less accurate markers. 1070 if (hasLifetimeMarkers(AI)) 1071 continue; 1072 1073 // Try to determine the size of the allocation. 1074 ConstantInt *AllocaSize = nullptr; 1075 if (ConstantInt *AIArraySize = 1076 dyn_cast<ConstantInt>(AI->getArraySize())) { 1077 if (IFI.DL) { 1078 Type *AllocaType = AI->getAllocatedType(); 1079 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType); 1080 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1081 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero"); 1082 // Check that array size doesn't saturate uint64_t and doesn't 1083 // overflow when it's multiplied by type size. 1084 if (AllocaArraySize != ~0ULL && 1085 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 1086 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1087 AllocaArraySize * AllocaTypeSize); 1088 } 1089 } 1090 } 1091 1092 builder.CreateLifetimeStart(AI, AllocaSize); 1093 for (ReturnInst *RI : Returns) { 1094 // Don't insert llvm.lifetime.end calls between a musttail call and a 1095 // return. The return kills all local allocas. 1096 if (InlinedMustTailCalls && 1097 RI->getParent()->getTerminatingMustTailCall()) 1098 continue; 1099 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1100 } 1101 } 1102 } 1103 1104 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1105 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1106 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1107 Module *M = Caller->getParent(); 1108 // Get the two intrinsics we care about. 1109 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1110 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1111 1112 // Insert the llvm.stacksave. 1113 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin()) 1114 .CreateCall(StackSave, "savedstack"); 1115 1116 // Insert a call to llvm.stackrestore before any return instructions in the 1117 // inlined function. 1118 for (ReturnInst *RI : Returns) { 1119 // Don't insert llvm.stackrestore calls between a musttail call and a 1120 // return. The return will restore the stack pointer. 1121 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1122 continue; 1123 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1124 } 1125 } 1126 1127 // If we are inlining for an invoke instruction, we must make sure to rewrite 1128 // any call instructions into invoke instructions. 1129 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) 1130 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo); 1131 1132 // Handle any inlined musttail call sites. In order for a new call site to be 1133 // musttail, the source of the clone and the inlined call site must have been 1134 // musttail. Therefore it's safe to return without merging control into the 1135 // phi below. 1136 if (InlinedMustTailCalls) { 1137 // Check if we need to bitcast the result of any musttail calls. 1138 Type *NewRetTy = Caller->getReturnType(); 1139 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 1140 1141 // Handle the returns preceded by musttail calls separately. 1142 SmallVector<ReturnInst *, 8> NormalReturns; 1143 for (ReturnInst *RI : Returns) { 1144 CallInst *ReturnedMustTail = 1145 RI->getParent()->getTerminatingMustTailCall(); 1146 if (!ReturnedMustTail) { 1147 NormalReturns.push_back(RI); 1148 continue; 1149 } 1150 if (!NeedBitCast) 1151 continue; 1152 1153 // Delete the old return and any preceding bitcast. 1154 BasicBlock *CurBB = RI->getParent(); 1155 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 1156 RI->eraseFromParent(); 1157 if (OldCast) 1158 OldCast->eraseFromParent(); 1159 1160 // Insert a new bitcast and return with the right type. 1161 IRBuilder<> Builder(CurBB); 1162 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 1163 } 1164 1165 // Leave behind the normal returns so we can merge control flow. 1166 std::swap(Returns, NormalReturns); 1167 } 1168 1169 // If we cloned in _exactly one_ basic block, and if that block ends in a 1170 // return instruction, we splice the body of the inlined callee directly into 1171 // the calling basic block. 1172 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 1173 // Move all of the instructions right before the call. 1174 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(), 1175 FirstNewBlock->begin(), FirstNewBlock->end()); 1176 // Remove the cloned basic block. 1177 Caller->getBasicBlockList().pop_back(); 1178 1179 // If the call site was an invoke instruction, add a branch to the normal 1180 // destination. 1181 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 1182 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 1183 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 1184 } 1185 1186 // If the return instruction returned a value, replace uses of the call with 1187 // uses of the returned value. 1188 if (!TheCall->use_empty()) { 1189 ReturnInst *R = Returns[0]; 1190 if (TheCall == R->getReturnValue()) 1191 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1192 else 1193 TheCall->replaceAllUsesWith(R->getReturnValue()); 1194 } 1195 // Since we are now done with the Call/Invoke, we can delete it. 1196 TheCall->eraseFromParent(); 1197 1198 // Since we are now done with the return instruction, delete it also. 1199 Returns[0]->eraseFromParent(); 1200 1201 // We are now done with the inlining. 1202 return true; 1203 } 1204 1205 // Otherwise, we have the normal case, of more than one block to inline or 1206 // multiple return sites. 1207 1208 // We want to clone the entire callee function into the hole between the 1209 // "starter" and "ender" blocks. How we accomplish this depends on whether 1210 // this is an invoke instruction or a call instruction. 1211 BasicBlock *AfterCallBB; 1212 BranchInst *CreatedBranchToNormalDest = nullptr; 1213 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 1214 1215 // Add an unconditional branch to make this look like the CallInst case... 1216 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 1217 1218 // Split the basic block. This guarantees that no PHI nodes will have to be 1219 // updated due to new incoming edges, and make the invoke case more 1220 // symmetric to the call case. 1221 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest, 1222 CalledFunc->getName()+".exit"); 1223 1224 } else { // It's a call 1225 // If this is a call instruction, we need to split the basic block that 1226 // the call lives in. 1227 // 1228 AfterCallBB = OrigBB->splitBasicBlock(TheCall, 1229 CalledFunc->getName()+".exit"); 1230 } 1231 1232 // Change the branch that used to go to AfterCallBB to branch to the first 1233 // basic block of the inlined function. 1234 // 1235 TerminatorInst *Br = OrigBB->getTerminator(); 1236 assert(Br && Br->getOpcode() == Instruction::Br && 1237 "splitBasicBlock broken!"); 1238 Br->setOperand(0, FirstNewBlock); 1239 1240 1241 // Now that the function is correct, make it a little bit nicer. In 1242 // particular, move the basic blocks inserted from the end of the function 1243 // into the space made by splitting the source basic block. 1244 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(), 1245 FirstNewBlock, Caller->end()); 1246 1247 // Handle all of the return instructions that we just cloned in, and eliminate 1248 // any users of the original call/invoke instruction. 1249 Type *RTy = CalledFunc->getReturnType(); 1250 1251 PHINode *PHI = nullptr; 1252 if (Returns.size() > 1) { 1253 // The PHI node should go at the front of the new basic block to merge all 1254 // possible incoming values. 1255 if (!TheCall->use_empty()) { 1256 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 1257 AfterCallBB->begin()); 1258 // Anything that used the result of the function call should now use the 1259 // PHI node as their operand. 1260 TheCall->replaceAllUsesWith(PHI); 1261 } 1262 1263 // Loop over all of the return instructions adding entries to the PHI node 1264 // as appropriate. 1265 if (PHI) { 1266 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 1267 ReturnInst *RI = Returns[i]; 1268 assert(RI->getReturnValue()->getType() == PHI->getType() && 1269 "Ret value not consistent in function!"); 1270 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 1271 } 1272 } 1273 1274 1275 // Add a branch to the merge points and remove return instructions. 1276 DebugLoc Loc; 1277 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 1278 ReturnInst *RI = Returns[i]; 1279 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 1280 Loc = RI->getDebugLoc(); 1281 BI->setDebugLoc(Loc); 1282 RI->eraseFromParent(); 1283 } 1284 // We need to set the debug location to *somewhere* inside the 1285 // inlined function. The line number may be nonsensical, but the 1286 // instruction will at least be associated with the right 1287 // function. 1288 if (CreatedBranchToNormalDest) 1289 CreatedBranchToNormalDest->setDebugLoc(Loc); 1290 } else if (!Returns.empty()) { 1291 // Otherwise, if there is exactly one return value, just replace anything 1292 // using the return value of the call with the computed value. 1293 if (!TheCall->use_empty()) { 1294 if (TheCall == Returns[0]->getReturnValue()) 1295 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1296 else 1297 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 1298 } 1299 1300 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 1301 BasicBlock *ReturnBB = Returns[0]->getParent(); 1302 ReturnBB->replaceAllUsesWith(AfterCallBB); 1303 1304 // Splice the code from the return block into the block that it will return 1305 // to, which contains the code that was after the call. 1306 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 1307 ReturnBB->getInstList()); 1308 1309 if (CreatedBranchToNormalDest) 1310 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 1311 1312 // Delete the return instruction now and empty ReturnBB now. 1313 Returns[0]->eraseFromParent(); 1314 ReturnBB->eraseFromParent(); 1315 } else if (!TheCall->use_empty()) { 1316 // No returns, but something is using the return value of the call. Just 1317 // nuke the result. 1318 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1319 } 1320 1321 // Since we are now done with the Call/Invoke, we can delete it. 1322 TheCall->eraseFromParent(); 1323 1324 // If we inlined any musttail calls and the original return is now 1325 // unreachable, delete it. It can only contain a bitcast and ret. 1326 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 1327 AfterCallBB->eraseFromParent(); 1328 1329 // We should always be able to fold the entry block of the function into the 1330 // single predecessor of the block... 1331 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 1332 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 1333 1334 // Splice the code entry block into calling block, right before the 1335 // unconditional branch. 1336 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 1337 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList()); 1338 1339 // Remove the unconditional branch. 1340 OrigBB->getInstList().erase(Br); 1341 1342 // Now we can remove the CalleeEntry block, which is now empty. 1343 Caller->getBasicBlockList().erase(CalleeEntry); 1344 1345 // If we inserted a phi node, check to see if it has a single value (e.g. all 1346 // the entries are the same or undef). If so, remove the PHI so it doesn't 1347 // block other optimizations. 1348 if (PHI) { 1349 if (Value *V = SimplifyInstruction(PHI, IFI.DL, nullptr, nullptr, IFI.AT)) { 1350 PHI->replaceAllUsesWith(V); 1351 PHI->eraseFromParent(); 1352 } 1353 } 1354 1355 return true; 1356 } 1357