1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/StringExtras.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/CallGraph.h" 23 #include "llvm/Analysis/CaptureTracking.h" 24 #include "llvm/Analysis/EHPersonalities.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/IR/Attributes.h" 28 #include "llvm/IR/CallSite.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DataLayout.h" 32 #include "llvm/IR/DebugInfo.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/DIBuilder.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/IRBuilder.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/MDBuilder.h" 41 #include "llvm/IR/Module.h" 42 #include "llvm/Transforms/Utils/Local.h" 43 #include "llvm/Support/CommandLine.h" 44 #include <algorithm> 45 46 using namespace llvm; 47 48 static cl::opt<bool> 49 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 50 cl::Hidden, 51 cl::desc("Convert noalias attributes to metadata during inlining.")); 52 53 static cl::opt<bool> 54 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 55 cl::init(true), cl::Hidden, 56 cl::desc("Convert align attributes to assumptions during inlining.")); 57 58 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 59 AAResults *CalleeAAR, bool InsertLifetime) { 60 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime); 61 } 62 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 63 AAResults *CalleeAAR, bool InsertLifetime) { 64 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime); 65 } 66 67 namespace { 68 /// A class for recording information about inlining a landing pad. 69 class LandingPadInliningInfo { 70 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 71 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 72 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 73 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 74 SmallVector<Value*, 8> UnwindDestPHIValues; 75 76 public: 77 LandingPadInliningInfo(InvokeInst *II) 78 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 79 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 80 // If there are PHI nodes in the unwind destination block, we need to keep 81 // track of which values came into them from the invoke before removing 82 // the edge from this block. 83 llvm::BasicBlock *InvokeBB = II->getParent(); 84 BasicBlock::iterator I = OuterResumeDest->begin(); 85 for (; isa<PHINode>(I); ++I) { 86 // Save the value to use for this edge. 87 PHINode *PHI = cast<PHINode>(I); 88 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 89 } 90 91 CallerLPad = cast<LandingPadInst>(I); 92 } 93 94 /// The outer unwind destination is the target of 95 /// unwind edges introduced for calls within the inlined function. 96 BasicBlock *getOuterResumeDest() const { 97 return OuterResumeDest; 98 } 99 100 BasicBlock *getInnerResumeDest(); 101 102 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 103 104 /// Forward the 'resume' instruction to the caller's landing pad block. 105 /// When the landing pad block has only one predecessor, this is 106 /// a simple branch. When there is more than one predecessor, we need to 107 /// split the landing pad block after the landingpad instruction and jump 108 /// to there. 109 void forwardResume(ResumeInst *RI, 110 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 111 112 /// Add incoming-PHI values to the unwind destination block for the given 113 /// basic block, using the values for the original invoke's source block. 114 void addIncomingPHIValuesFor(BasicBlock *BB) const { 115 addIncomingPHIValuesForInto(BB, OuterResumeDest); 116 } 117 118 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 119 BasicBlock::iterator I = dest->begin(); 120 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 121 PHINode *phi = cast<PHINode>(I); 122 phi->addIncoming(UnwindDestPHIValues[i], src); 123 } 124 } 125 }; 126 } // anonymous namespace 127 128 /// Get or create a target for the branch from ResumeInsts. 129 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 130 if (InnerResumeDest) return InnerResumeDest; 131 132 // Split the landing pad. 133 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 134 InnerResumeDest = 135 OuterResumeDest->splitBasicBlock(SplitPoint, 136 OuterResumeDest->getName() + ".body"); 137 138 // The number of incoming edges we expect to the inner landing pad. 139 const unsigned PHICapacity = 2; 140 141 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 142 Instruction *InsertPoint = &InnerResumeDest->front(); 143 BasicBlock::iterator I = OuterResumeDest->begin(); 144 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 145 PHINode *OuterPHI = cast<PHINode>(I); 146 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 147 OuterPHI->getName() + ".lpad-body", 148 InsertPoint); 149 OuterPHI->replaceAllUsesWith(InnerPHI); 150 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 151 } 152 153 // Create a PHI for the exception values. 154 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 155 "eh.lpad-body", InsertPoint); 156 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 157 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 158 159 // All done. 160 return InnerResumeDest; 161 } 162 163 /// Forward the 'resume' instruction to the caller's landing pad block. 164 /// When the landing pad block has only one predecessor, this is a simple 165 /// branch. When there is more than one predecessor, we need to split the 166 /// landing pad block after the landingpad instruction and jump to there. 167 void LandingPadInliningInfo::forwardResume( 168 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 169 BasicBlock *Dest = getInnerResumeDest(); 170 BasicBlock *Src = RI->getParent(); 171 172 BranchInst::Create(Dest, Src); 173 174 // Update the PHIs in the destination. They were inserted in an order which 175 // makes this work. 176 addIncomingPHIValuesForInto(Src, Dest); 177 178 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 179 RI->eraseFromParent(); 180 } 181 182 /// When we inline a basic block into an invoke, 183 /// we have to turn all of the calls that can throw into invokes. 184 /// This function analyze BB to see if there are any calls, and if so, 185 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 186 /// nodes in that block with the values specified in InvokeDestPHIValues. 187 static BasicBlock * 188 HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge) { 189 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 190 Instruction *I = &*BBI++; 191 192 // We only need to check for function calls: inlined invoke 193 // instructions require no special handling. 194 CallInst *CI = dyn_cast<CallInst>(I); 195 196 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 197 continue; 198 199 // Convert this function call into an invoke instruction. First, split the 200 // basic block. 201 BasicBlock *Split = 202 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc"); 203 204 // Delete the unconditional branch inserted by splitBasicBlock 205 BB->getInstList().pop_back(); 206 207 // Create the new invoke instruction. 208 SmallVector<Value*, 8> InvokeArgs(CI->arg_begin(), CI->arg_end()); 209 SmallVector<OperandBundleDef, 1> OpBundles; 210 211 CI->getOperandBundlesAsDefs(OpBundles); 212 213 // Note: we're round tripping operand bundles through memory here, and that 214 // can potentially be avoided with a cleverer API design that we do not have 215 // as of this time. 216 217 InvokeInst *II = 218 InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge, InvokeArgs, 219 OpBundles, CI->getName(), BB); 220 II->setDebugLoc(CI->getDebugLoc()); 221 II->setCallingConv(CI->getCallingConv()); 222 II->setAttributes(CI->getAttributes()); 223 224 // Make sure that anything using the call now uses the invoke! This also 225 // updates the CallGraph if present, because it uses a WeakVH. 226 CI->replaceAllUsesWith(II); 227 228 // Delete the original call 229 Split->getInstList().pop_front(); 230 return BB; 231 } 232 return nullptr; 233 } 234 235 /// If we inlined an invoke site, we need to convert calls 236 /// in the body of the inlined function into invokes. 237 /// 238 /// II is the invoke instruction being inlined. FirstNewBlock is the first 239 /// block of the inlined code (the last block is the end of the function), 240 /// and InlineCodeInfo is information about the code that got inlined. 241 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 242 ClonedCodeInfo &InlinedCodeInfo) { 243 BasicBlock *InvokeDest = II->getUnwindDest(); 244 245 Function *Caller = FirstNewBlock->getParent(); 246 247 // The inlined code is currently at the end of the function, scan from the 248 // start of the inlined code to its end, checking for stuff we need to 249 // rewrite. 250 LandingPadInliningInfo Invoke(II); 251 252 // Get all of the inlined landing pad instructions. 253 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 254 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 255 I != E; ++I) 256 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 257 InlinedLPads.insert(II->getLandingPadInst()); 258 259 // Append the clauses from the outer landing pad instruction into the inlined 260 // landing pad instructions. 261 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 262 for (LandingPadInst *InlinedLPad : InlinedLPads) { 263 unsigned OuterNum = OuterLPad->getNumClauses(); 264 InlinedLPad->reserveClauses(OuterNum); 265 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 266 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 267 if (OuterLPad->isCleanup()) 268 InlinedLPad->setCleanup(true); 269 } 270 271 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 272 BB != E; ++BB) { 273 if (InlinedCodeInfo.ContainsCalls) 274 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 275 &*BB, Invoke.getOuterResumeDest())) 276 // Update any PHI nodes in the exceptional block to indicate that there 277 // is now a new entry in them. 278 Invoke.addIncomingPHIValuesFor(NewBB); 279 280 // Forward any resumes that are remaining here. 281 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 282 Invoke.forwardResume(RI, InlinedLPads); 283 } 284 285 // Now that everything is happy, we have one final detail. The PHI nodes in 286 // the exception destination block still have entries due to the original 287 // invoke instruction. Eliminate these entries (which might even delete the 288 // PHI node) now. 289 InvokeDest->removePredecessor(II->getParent()); 290 } 291 292 /// If we inlined an invoke site, we need to convert calls 293 /// in the body of the inlined function into invokes. 294 /// 295 /// II is the invoke instruction being inlined. FirstNewBlock is the first 296 /// block of the inlined code (the last block is the end of the function), 297 /// and InlineCodeInfo is information about the code that got inlined. 298 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 299 ClonedCodeInfo &InlinedCodeInfo) { 300 BasicBlock *UnwindDest = II->getUnwindDest(); 301 Function *Caller = FirstNewBlock->getParent(); 302 303 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 304 305 // If there are PHI nodes in the unwind destination block, we need to keep 306 // track of which values came into them from the invoke before removing the 307 // edge from this block. 308 SmallVector<Value *, 8> UnwindDestPHIValues; 309 llvm::BasicBlock *InvokeBB = II->getParent(); 310 for (Instruction &I : *UnwindDest) { 311 // Save the value to use for this edge. 312 PHINode *PHI = dyn_cast<PHINode>(&I); 313 if (!PHI) 314 break; 315 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 316 } 317 318 // Add incoming-PHI values to the unwind destination block for the given basic 319 // block, using the values for the original invoke's source block. 320 auto UpdatePHINodes = [&](BasicBlock *Src) { 321 BasicBlock::iterator I = UnwindDest->begin(); 322 for (Value *V : UnwindDestPHIValues) { 323 PHINode *PHI = cast<PHINode>(I); 324 PHI->addIncoming(V, Src); 325 ++I; 326 } 327 }; 328 329 // This connects all the instructions which 'unwind to caller' to the invoke 330 // destination. 331 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 332 BB != E; ++BB) { 333 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 334 if (CRI->unwindsToCaller()) { 335 CleanupReturnInst::Create(CRI->getCleanupPad(), UnwindDest, CRI); 336 CRI->eraseFromParent(); 337 UpdatePHINodes(&*BB); 338 } 339 } 340 341 Instruction *I = BB->getFirstNonPHI(); 342 if (!I->isEHPad()) 343 continue; 344 345 Instruction *Replacement = nullptr; 346 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 347 if (CatchSwitch->unwindsToCaller()) { 348 auto *NewCatchSwitch = CatchSwitchInst::Create( 349 CatchSwitch->getParentPad(), UnwindDest, 350 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 351 CatchSwitch); 352 for (BasicBlock *PadBB : CatchSwitch->handlers()) 353 NewCatchSwitch->addHandler(PadBB); 354 Replacement = NewCatchSwitch; 355 } 356 } else if (!isa<FuncletPadInst>(I)) { 357 llvm_unreachable("unexpected EHPad!"); 358 } 359 360 if (Replacement) { 361 Replacement->takeName(I); 362 I->replaceAllUsesWith(Replacement); 363 I->eraseFromParent(); 364 UpdatePHINodes(&*BB); 365 } 366 } 367 368 if (InlinedCodeInfo.ContainsCalls) 369 for (Function::iterator BB = FirstNewBlock->getIterator(), 370 E = Caller->end(); 371 BB != E; ++BB) 372 if (BasicBlock *NewBB = 373 HandleCallsInBlockInlinedThroughInvoke(&*BB, UnwindDest)) 374 // Update any PHI nodes in the exceptional block to indicate that there 375 // is now a new entry in them. 376 UpdatePHINodes(NewBB); 377 378 // Now that everything is happy, we have one final detail. The PHI nodes in 379 // the exception destination block still have entries due to the original 380 // invoke instruction. Eliminate these entries (which might even delete the 381 // PHI node) now. 382 UnwindDest->removePredecessor(InvokeBB); 383 } 384 385 /// When inlining a function that contains noalias scope metadata, 386 /// this metadata needs to be cloned so that the inlined blocks 387 /// have different "unqiue scopes" at every call site. Were this not done, then 388 /// aliasing scopes from a function inlined into a caller multiple times could 389 /// not be differentiated (and this would lead to miscompiles because the 390 /// non-aliasing property communicated by the metadata could have 391 /// call-site-specific control dependencies). 392 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 393 const Function *CalledFunc = CS.getCalledFunction(); 394 SetVector<const MDNode *> MD; 395 396 // Note: We could only clone the metadata if it is already used in the 397 // caller. I'm omitting that check here because it might confuse 398 // inter-procedural alias analysis passes. We can revisit this if it becomes 399 // an efficiency or overhead problem. 400 401 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end(); 402 I != IE; ++I) 403 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) { 404 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope)) 405 MD.insert(M); 406 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias)) 407 MD.insert(M); 408 } 409 410 if (MD.empty()) 411 return; 412 413 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 414 // the set. 415 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 416 while (!Queue.empty()) { 417 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 418 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 419 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 420 if (MD.insert(M1)) 421 Queue.push_back(M1); 422 } 423 424 // Now we have a complete set of all metadata in the chains used to specify 425 // the noalias scopes and the lists of those scopes. 426 SmallVector<TempMDTuple, 16> DummyNodes; 427 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap; 428 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end(); 429 I != IE; ++I) { 430 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None)); 431 MDMap[*I].reset(DummyNodes.back().get()); 432 } 433 434 // Create new metadata nodes to replace the dummy nodes, replacing old 435 // metadata references with either a dummy node or an already-created new 436 // node. 437 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end(); 438 I != IE; ++I) { 439 SmallVector<Metadata *, 4> NewOps; 440 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) { 441 const Metadata *V = (*I)->getOperand(i); 442 if (const MDNode *M = dyn_cast<MDNode>(V)) 443 NewOps.push_back(MDMap[M]); 444 else 445 NewOps.push_back(const_cast<Metadata *>(V)); 446 } 447 448 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps); 449 MDTuple *TempM = cast<MDTuple>(MDMap[*I]); 450 assert(TempM->isTemporary() && "Expected temporary node"); 451 452 TempM->replaceAllUsesWith(NewM); 453 } 454 455 // Now replace the metadata in the new inlined instructions with the 456 // repacements from the map. 457 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 458 VMI != VMIE; ++VMI) { 459 if (!VMI->second) 460 continue; 461 462 Instruction *NI = dyn_cast<Instruction>(VMI->second); 463 if (!NI) 464 continue; 465 466 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 467 MDNode *NewMD = MDMap[M]; 468 // If the call site also had alias scope metadata (a list of scopes to 469 // which instructions inside it might belong), propagate those scopes to 470 // the inlined instructions. 471 if (MDNode *CSM = 472 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 473 NewMD = MDNode::concatenate(NewMD, CSM); 474 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 475 } else if (NI->mayReadOrWriteMemory()) { 476 if (MDNode *M = 477 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 478 NI->setMetadata(LLVMContext::MD_alias_scope, M); 479 } 480 481 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 482 MDNode *NewMD = MDMap[M]; 483 // If the call site also had noalias metadata (a list of scopes with 484 // which instructions inside it don't alias), propagate those scopes to 485 // the inlined instructions. 486 if (MDNode *CSM = 487 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 488 NewMD = MDNode::concatenate(NewMD, CSM); 489 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 490 } else if (NI->mayReadOrWriteMemory()) { 491 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 492 NI->setMetadata(LLVMContext::MD_noalias, M); 493 } 494 } 495 } 496 497 /// If the inlined function has noalias arguments, 498 /// then add new alias scopes for each noalias argument, tag the mapped noalias 499 /// parameters with noalias metadata specifying the new scope, and tag all 500 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 501 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 502 const DataLayout &DL, AAResults *CalleeAAR) { 503 if (!EnableNoAliasConversion) 504 return; 505 506 const Function *CalledFunc = CS.getCalledFunction(); 507 SmallVector<const Argument *, 4> NoAliasArgs; 508 509 for (const Argument &I : CalledFunc->args()) { 510 if (I.hasNoAliasAttr() && !I.hasNUses(0)) 511 NoAliasArgs.push_back(&I); 512 } 513 514 if (NoAliasArgs.empty()) 515 return; 516 517 // To do a good job, if a noalias variable is captured, we need to know if 518 // the capture point dominates the particular use we're considering. 519 DominatorTree DT; 520 DT.recalculate(const_cast<Function&>(*CalledFunc)); 521 522 // noalias indicates that pointer values based on the argument do not alias 523 // pointer values which are not based on it. So we add a new "scope" for each 524 // noalias function argument. Accesses using pointers based on that argument 525 // become part of that alias scope, accesses using pointers not based on that 526 // argument are tagged as noalias with that scope. 527 528 DenseMap<const Argument *, MDNode *> NewScopes; 529 MDBuilder MDB(CalledFunc->getContext()); 530 531 // Create a new scope domain for this function. 532 MDNode *NewDomain = 533 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 534 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 535 const Argument *A = NoAliasArgs[i]; 536 537 std::string Name = CalledFunc->getName(); 538 if (A->hasName()) { 539 Name += ": %"; 540 Name += A->getName(); 541 } else { 542 Name += ": argument "; 543 Name += utostr(i); 544 } 545 546 // Note: We always create a new anonymous root here. This is true regardless 547 // of the linkage of the callee because the aliasing "scope" is not just a 548 // property of the callee, but also all control dependencies in the caller. 549 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 550 NewScopes.insert(std::make_pair(A, NewScope)); 551 } 552 553 // Iterate over all new instructions in the map; for all memory-access 554 // instructions, add the alias scope metadata. 555 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 556 VMI != VMIE; ++VMI) { 557 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 558 if (!VMI->second) 559 continue; 560 561 Instruction *NI = dyn_cast<Instruction>(VMI->second); 562 if (!NI) 563 continue; 564 565 bool IsArgMemOnlyCall = false, IsFuncCall = false; 566 SmallVector<const Value *, 2> PtrArgs; 567 568 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 569 PtrArgs.push_back(LI->getPointerOperand()); 570 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 571 PtrArgs.push_back(SI->getPointerOperand()); 572 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 573 PtrArgs.push_back(VAAI->getPointerOperand()); 574 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 575 PtrArgs.push_back(CXI->getPointerOperand()); 576 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 577 PtrArgs.push_back(RMWI->getPointerOperand()); 578 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 579 // If we know that the call does not access memory, then we'll still 580 // know that about the inlined clone of this call site, and we don't 581 // need to add metadata. 582 if (ICS.doesNotAccessMemory()) 583 continue; 584 585 IsFuncCall = true; 586 if (CalleeAAR) { 587 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); 588 if (MRB == FMRB_OnlyAccessesArgumentPointees || 589 MRB == FMRB_OnlyReadsArgumentPointees) 590 IsArgMemOnlyCall = true; 591 } 592 593 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(), 594 AE = ICS.arg_end(); AI != AE; ++AI) { 595 // We need to check the underlying objects of all arguments, not just 596 // the pointer arguments, because we might be passing pointers as 597 // integers, etc. 598 // However, if we know that the call only accesses pointer arguments, 599 // then we only need to check the pointer arguments. 600 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy()) 601 continue; 602 603 PtrArgs.push_back(*AI); 604 } 605 } 606 607 // If we found no pointers, then this instruction is not suitable for 608 // pairing with an instruction to receive aliasing metadata. 609 // However, if this is a call, this we might just alias with none of the 610 // noalias arguments. 611 if (PtrArgs.empty() && !IsFuncCall) 612 continue; 613 614 // It is possible that there is only one underlying object, but you 615 // need to go through several PHIs to see it, and thus could be 616 // repeated in the Objects list. 617 SmallPtrSet<const Value *, 4> ObjSet; 618 SmallVector<Metadata *, 4> Scopes, NoAliases; 619 620 SmallSetVector<const Argument *, 4> NAPtrArgs; 621 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) { 622 SmallVector<Value *, 4> Objects; 623 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]), 624 Objects, DL, /* LI = */ nullptr); 625 626 for (Value *O : Objects) 627 ObjSet.insert(O); 628 } 629 630 // Figure out if we're derived from anything that is not a noalias 631 // argument. 632 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 633 for (const Value *V : ObjSet) { 634 // Is this value a constant that cannot be derived from any pointer 635 // value (we need to exclude constant expressions, for example, that 636 // are formed from arithmetic on global symbols). 637 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 638 isa<ConstantPointerNull>(V) || 639 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 640 if (IsNonPtrConst) 641 continue; 642 643 // If this is anything other than a noalias argument, then we cannot 644 // completely describe the aliasing properties using alias.scope 645 // metadata (and, thus, won't add any). 646 if (const Argument *A = dyn_cast<Argument>(V)) { 647 if (!A->hasNoAliasAttr()) 648 UsesAliasingPtr = true; 649 } else { 650 UsesAliasingPtr = true; 651 } 652 653 // If this is not some identified function-local object (which cannot 654 // directly alias a noalias argument), or some other argument (which, 655 // by definition, also cannot alias a noalias argument), then we could 656 // alias a noalias argument that has been captured). 657 if (!isa<Argument>(V) && 658 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 659 CanDeriveViaCapture = true; 660 } 661 662 // A function call can always get captured noalias pointers (via other 663 // parameters, globals, etc.). 664 if (IsFuncCall && !IsArgMemOnlyCall) 665 CanDeriveViaCapture = true; 666 667 // First, we want to figure out all of the sets with which we definitely 668 // don't alias. Iterate over all noalias set, and add those for which: 669 // 1. The noalias argument is not in the set of objects from which we 670 // definitely derive. 671 // 2. The noalias argument has not yet been captured. 672 // An arbitrary function that might load pointers could see captured 673 // noalias arguments via other noalias arguments or globals, and so we 674 // must always check for prior capture. 675 for (const Argument *A : NoAliasArgs) { 676 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 677 // It might be tempting to skip the 678 // PointerMayBeCapturedBefore check if 679 // A->hasNoCaptureAttr() is true, but this is 680 // incorrect because nocapture only guarantees 681 // that no copies outlive the function, not 682 // that the value cannot be locally captured. 683 !PointerMayBeCapturedBefore(A, 684 /* ReturnCaptures */ false, 685 /* StoreCaptures */ false, I, &DT))) 686 NoAliases.push_back(NewScopes[A]); 687 } 688 689 if (!NoAliases.empty()) 690 NI->setMetadata(LLVMContext::MD_noalias, 691 MDNode::concatenate( 692 NI->getMetadata(LLVMContext::MD_noalias), 693 MDNode::get(CalledFunc->getContext(), NoAliases))); 694 695 // Next, we want to figure out all of the sets to which we might belong. 696 // We might belong to a set if the noalias argument is in the set of 697 // underlying objects. If there is some non-noalias argument in our list 698 // of underlying objects, then we cannot add a scope because the fact 699 // that some access does not alias with any set of our noalias arguments 700 // cannot itself guarantee that it does not alias with this access 701 // (because there is some pointer of unknown origin involved and the 702 // other access might also depend on this pointer). We also cannot add 703 // scopes to arbitrary functions unless we know they don't access any 704 // non-parameter pointer-values. 705 bool CanAddScopes = !UsesAliasingPtr; 706 if (CanAddScopes && IsFuncCall) 707 CanAddScopes = IsArgMemOnlyCall; 708 709 if (CanAddScopes) 710 for (const Argument *A : NoAliasArgs) { 711 if (ObjSet.count(A)) 712 Scopes.push_back(NewScopes[A]); 713 } 714 715 if (!Scopes.empty()) 716 NI->setMetadata( 717 LLVMContext::MD_alias_scope, 718 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 719 MDNode::get(CalledFunc->getContext(), Scopes))); 720 } 721 } 722 } 723 724 /// If the inlined function has non-byval align arguments, then 725 /// add @llvm.assume-based alignment assumptions to preserve this information. 726 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 727 if (!PreserveAlignmentAssumptions) 728 return; 729 auto &DL = CS.getCaller()->getParent()->getDataLayout(); 730 731 // To avoid inserting redundant assumptions, we should check for assumptions 732 // already in the caller. To do this, we might need a DT of the caller. 733 DominatorTree DT; 734 bool DTCalculated = false; 735 736 Function *CalledFunc = CS.getCalledFunction(); 737 for (Function::arg_iterator I = CalledFunc->arg_begin(), 738 E = CalledFunc->arg_end(); 739 I != E; ++I) { 740 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0; 741 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) { 742 if (!DTCalculated) { 743 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent() 744 ->getParent())); 745 DTCalculated = true; 746 } 747 748 // If we can already prove the asserted alignment in the context of the 749 // caller, then don't bother inserting the assumption. 750 Value *Arg = CS.getArgument(I->getArgNo()); 751 if (getKnownAlignment(Arg, DL, CS.getInstruction(), 752 &IFI.ACT->getAssumptionCache(*CS.getCaller()), 753 &DT) >= Align) 754 continue; 755 756 IRBuilder<>(CS.getInstruction()) 757 .CreateAlignmentAssumption(DL, Arg, Align); 758 } 759 } 760 } 761 762 /// Once we have cloned code over from a callee into the caller, 763 /// update the specified callgraph to reflect the changes we made. 764 /// Note that it's possible that not all code was copied over, so only 765 /// some edges of the callgraph may remain. 766 static void UpdateCallGraphAfterInlining(CallSite CS, 767 Function::iterator FirstNewBlock, 768 ValueToValueMapTy &VMap, 769 InlineFunctionInfo &IFI) { 770 CallGraph &CG = *IFI.CG; 771 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 772 const Function *Callee = CS.getCalledFunction(); 773 CallGraphNode *CalleeNode = CG[Callee]; 774 CallGraphNode *CallerNode = CG[Caller]; 775 776 // Since we inlined some uninlined call sites in the callee into the caller, 777 // add edges from the caller to all of the callees of the callee. 778 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 779 780 // Consider the case where CalleeNode == CallerNode. 781 CallGraphNode::CalledFunctionsVector CallCache; 782 if (CalleeNode == CallerNode) { 783 CallCache.assign(I, E); 784 I = CallCache.begin(); 785 E = CallCache.end(); 786 } 787 788 for (; I != E; ++I) { 789 const Value *OrigCall = I->first; 790 791 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 792 // Only copy the edge if the call was inlined! 793 if (VMI == VMap.end() || VMI->second == nullptr) 794 continue; 795 796 // If the call was inlined, but then constant folded, there is no edge to 797 // add. Check for this case. 798 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 799 if (!NewCall) 800 continue; 801 802 // We do not treat intrinsic calls like real function calls because we 803 // expect them to become inline code; do not add an edge for an intrinsic. 804 CallSite CS = CallSite(NewCall); 805 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic()) 806 continue; 807 808 // Remember that this call site got inlined for the client of 809 // InlineFunction. 810 IFI.InlinedCalls.push_back(NewCall); 811 812 // It's possible that inlining the callsite will cause it to go from an 813 // indirect to a direct call by resolving a function pointer. If this 814 // happens, set the callee of the new call site to a more precise 815 // destination. This can also happen if the call graph node of the caller 816 // was just unnecessarily imprecise. 817 if (!I->second->getFunction()) 818 if (Function *F = CallSite(NewCall).getCalledFunction()) { 819 // Indirect call site resolved to direct call. 820 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 821 822 continue; 823 } 824 825 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 826 } 827 828 // Update the call graph by deleting the edge from Callee to Caller. We must 829 // do this after the loop above in case Caller and Callee are the same. 830 CallerNode->removeCallEdgeFor(CS); 831 } 832 833 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 834 BasicBlock *InsertBlock, 835 InlineFunctionInfo &IFI) { 836 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 837 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 838 839 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); 840 841 // Always generate a memcpy of alignment 1 here because we don't know 842 // the alignment of the src pointer. Other optimizations can infer 843 // better alignment. 844 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 845 } 846 847 /// When inlining a call site that has a byval argument, 848 /// we have to make the implicit memcpy explicit by adding it. 849 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 850 const Function *CalledFunc, 851 InlineFunctionInfo &IFI, 852 unsigned ByValAlignment) { 853 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 854 Type *AggTy = ArgTy->getElementType(); 855 856 Function *Caller = TheCall->getParent()->getParent(); 857 858 // If the called function is readonly, then it could not mutate the caller's 859 // copy of the byval'd memory. In this case, it is safe to elide the copy and 860 // temporary. 861 if (CalledFunc->onlyReadsMemory()) { 862 // If the byval argument has a specified alignment that is greater than the 863 // passed in pointer, then we either have to round up the input pointer or 864 // give up on this transformation. 865 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 866 return Arg; 867 868 const DataLayout &DL = Caller->getParent()->getDataLayout(); 869 870 // If the pointer is already known to be sufficiently aligned, or if we can 871 // round it up to a larger alignment, then we don't need a temporary. 872 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, 873 &IFI.ACT->getAssumptionCache(*Caller)) >= 874 ByValAlignment) 875 return Arg; 876 877 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 878 // for code quality, but rarely happens and is required for correctness. 879 } 880 881 // Create the alloca. If we have DataLayout, use nice alignment. 882 unsigned Align = 883 Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy); 884 885 // If the byval had an alignment specified, we *must* use at least that 886 // alignment, as it is required by the byval argument (and uses of the 887 // pointer inside the callee). 888 Align = std::max(Align, ByValAlignment); 889 890 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 891 &*Caller->begin()->begin()); 892 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 893 894 // Uses of the argument in the function should use our new alloca 895 // instead. 896 return NewAlloca; 897 } 898 899 // Check whether this Value is used by a lifetime intrinsic. 900 static bool isUsedByLifetimeMarker(Value *V) { 901 for (User *U : V->users()) { 902 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 903 switch (II->getIntrinsicID()) { 904 default: break; 905 case Intrinsic::lifetime_start: 906 case Intrinsic::lifetime_end: 907 return true; 908 } 909 } 910 } 911 return false; 912 } 913 914 // Check whether the given alloca already has 915 // lifetime.start or lifetime.end intrinsics. 916 static bool hasLifetimeMarkers(AllocaInst *AI) { 917 Type *Ty = AI->getType(); 918 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 919 Ty->getPointerAddressSpace()); 920 if (Ty == Int8PtrTy) 921 return isUsedByLifetimeMarker(AI); 922 923 // Do a scan to find all the casts to i8*. 924 for (User *U : AI->users()) { 925 if (U->getType() != Int8PtrTy) continue; 926 if (U->stripPointerCasts() != AI) continue; 927 if (isUsedByLifetimeMarker(U)) 928 return true; 929 } 930 return false; 931 } 932 933 /// Rebuild the entire inlined-at chain for this instruction so that the top of 934 /// the chain now is inlined-at the new call site. 935 static DebugLoc 936 updateInlinedAtInfo(DebugLoc DL, DILocation *InlinedAtNode, LLVMContext &Ctx, 937 DenseMap<const DILocation *, DILocation *> &IANodes) { 938 SmallVector<DILocation *, 3> InlinedAtLocations; 939 DILocation *Last = InlinedAtNode; 940 DILocation *CurInlinedAt = DL; 941 942 // Gather all the inlined-at nodes 943 while (DILocation *IA = CurInlinedAt->getInlinedAt()) { 944 // Skip any we've already built nodes for 945 if (DILocation *Found = IANodes[IA]) { 946 Last = Found; 947 break; 948 } 949 950 InlinedAtLocations.push_back(IA); 951 CurInlinedAt = IA; 952 } 953 954 // Starting from the top, rebuild the nodes to point to the new inlined-at 955 // location (then rebuilding the rest of the chain behind it) and update the 956 // map of already-constructed inlined-at nodes. 957 for (const DILocation *MD : make_range(InlinedAtLocations.rbegin(), 958 InlinedAtLocations.rend())) { 959 Last = IANodes[MD] = DILocation::getDistinct( 960 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last); 961 } 962 963 // And finally create the normal location for this instruction, referring to 964 // the new inlined-at chain. 965 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last); 966 } 967 968 /// Update inlined instructions' line numbers to 969 /// to encode location where these instructions are inlined. 970 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 971 Instruction *TheCall) { 972 DebugLoc TheCallDL = TheCall->getDebugLoc(); 973 if (!TheCallDL) 974 return; 975 976 auto &Ctx = Fn->getContext(); 977 DILocation *InlinedAtNode = TheCallDL; 978 979 // Create a unique call site, not to be confused with any other call from the 980 // same location. 981 InlinedAtNode = DILocation::getDistinct( 982 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 983 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 984 985 // Cache the inlined-at nodes as they're built so they are reused, without 986 // this every instruction's inlined-at chain would become distinct from each 987 // other. 988 DenseMap<const DILocation *, DILocation *> IANodes; 989 990 for (; FI != Fn->end(); ++FI) { 991 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 992 BI != BE; ++BI) { 993 DebugLoc DL = BI->getDebugLoc(); 994 if (!DL) { 995 // If the inlined instruction has no line number, make it look as if it 996 // originates from the call location. This is important for 997 // ((__always_inline__, __nodebug__)) functions which must use caller 998 // location for all instructions in their function body. 999 1000 // Don't update static allocas, as they may get moved later. 1001 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1002 if (isa<Constant>(AI->getArraySize())) 1003 continue; 1004 1005 BI->setDebugLoc(TheCallDL); 1006 } else { 1007 BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes)); 1008 } 1009 } 1010 } 1011 } 1012 1013 /// This function inlines the called function into the basic block of the 1014 /// caller. This returns false if it is not possible to inline this call. 1015 /// The program is still in a well defined state if this occurs though. 1016 /// 1017 /// Note that this only does one level of inlining. For example, if the 1018 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1019 /// exists in the instruction stream. Similarly this will inline a recursive 1020 /// function by one level. 1021 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 1022 AAResults *CalleeAAR, bool InsertLifetime) { 1023 Instruction *TheCall = CS.getInstruction(); 1024 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 1025 "Instruction not in function!"); 1026 1027 // If IFI has any state in it, zap it before we fill it in. 1028 IFI.reset(); 1029 1030 const Function *CalledFunc = CS.getCalledFunction(); 1031 if (!CalledFunc || // Can't inline external function or indirect 1032 CalledFunc->isDeclaration() || // call, or call to a vararg function! 1033 CalledFunc->getFunctionType()->isVarArg()) return false; 1034 1035 // The inliner does not know how to inline through calls with operand bundles 1036 // in general ... 1037 if (CS.hasOperandBundles()) { 1038 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { 1039 uint32_t Tag = CS.getOperandBundleAt(i).getTagID(); 1040 // ... but it knows how to inline through "deopt" operand bundles ... 1041 if (Tag == LLVMContext::OB_deopt) 1042 continue; 1043 // ... and "funclet" operand bundles. 1044 if (Tag == LLVMContext::OB_funclet) 1045 continue; 1046 1047 return false; 1048 } 1049 } 1050 1051 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1052 // calls that we inline. 1053 bool MarkNoUnwind = CS.doesNotThrow(); 1054 1055 BasicBlock *OrigBB = TheCall->getParent(); 1056 Function *Caller = OrigBB->getParent(); 1057 1058 // GC poses two hazards to inlining, which only occur when the callee has GC: 1059 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1060 // caller. 1061 // 2. If the caller has a differing GC, it is invalid to inline. 1062 if (CalledFunc->hasGC()) { 1063 if (!Caller->hasGC()) 1064 Caller->setGC(CalledFunc->getGC()); 1065 else if (CalledFunc->getGC() != Caller->getGC()) 1066 return false; 1067 } 1068 1069 // Get the personality function from the callee if it contains a landing pad. 1070 Constant *CalledPersonality = 1071 CalledFunc->hasPersonalityFn() 1072 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1073 : nullptr; 1074 1075 // Find the personality function used by the landing pads of the caller. If it 1076 // exists, then check to see that it matches the personality function used in 1077 // the callee. 1078 Constant *CallerPersonality = 1079 Caller->hasPersonalityFn() 1080 ? Caller->getPersonalityFn()->stripPointerCasts() 1081 : nullptr; 1082 if (CalledPersonality) { 1083 if (!CallerPersonality) 1084 Caller->setPersonalityFn(CalledPersonality); 1085 // If the personality functions match, then we can perform the 1086 // inlining. Otherwise, we can't inline. 1087 // TODO: This isn't 100% true. Some personality functions are proper 1088 // supersets of others and can be used in place of the other. 1089 else if (CalledPersonality != CallerPersonality) 1090 return false; 1091 } 1092 1093 // We need to figure out which funclet the callsite was in so that we may 1094 // properly nest the callee. 1095 Instruction *CallSiteEHPad = nullptr; 1096 if (CallerPersonality) { 1097 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1098 if (isFuncletEHPersonality(Personality)) { 1099 Optional<OperandBundleUse> ParentFunclet = 1100 CS.getOperandBundle(LLVMContext::OB_funclet); 1101 if (ParentFunclet) 1102 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1103 1104 // OK, the inlining site is legal. What about the target function? 1105 1106 if (CallSiteEHPad) { 1107 if (Personality == EHPersonality::MSVC_CXX) { 1108 // The MSVC personality cannot tolerate catches getting inlined into 1109 // cleanup funclets. 1110 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1111 // Ok, the call site is within a cleanuppad. Let's check the callee 1112 // for catchpads. 1113 for (const BasicBlock &CalledBB : *CalledFunc) { 1114 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1115 return false; 1116 } 1117 } 1118 } else if (isAsynchronousEHPersonality(Personality)) { 1119 // SEH is even less tolerant, there may not be any sort of exceptional 1120 // funclet in the callee. 1121 for (const BasicBlock &CalledBB : *CalledFunc) { 1122 if (CalledBB.isEHPad()) 1123 return false; 1124 } 1125 } 1126 } 1127 } 1128 } 1129 1130 // Get an iterator to the last basic block in the function, which will have 1131 // the new function inlined after it. 1132 Function::iterator LastBlock = --Caller->end(); 1133 1134 // Make sure to capture all of the return instructions from the cloned 1135 // function. 1136 SmallVector<ReturnInst*, 8> Returns; 1137 ClonedCodeInfo InlinedFunctionInfo; 1138 Function::iterator FirstNewBlock; 1139 1140 { // Scope to destroy VMap after cloning. 1141 ValueToValueMapTy VMap; 1142 // Keep a list of pair (dst, src) to emit byval initializations. 1143 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 1144 1145 auto &DL = Caller->getParent()->getDataLayout(); 1146 1147 assert(CalledFunc->arg_size() == CS.arg_size() && 1148 "No varargs calls can be inlined!"); 1149 1150 // Calculate the vector of arguments to pass into the function cloner, which 1151 // matches up the formal to the actual argument values. 1152 CallSite::arg_iterator AI = CS.arg_begin(); 1153 unsigned ArgNo = 0; 1154 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 1155 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1156 Value *ActualArg = *AI; 1157 1158 // When byval arguments actually inlined, we need to make the copy implied 1159 // by them explicit. However, we don't do this if the callee is readonly 1160 // or readnone, because the copy would be unneeded: the callee doesn't 1161 // modify the struct. 1162 if (CS.isByValArgument(ArgNo)) { 1163 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 1164 CalledFunc->getParamAlignment(ArgNo+1)); 1165 if (ActualArg != *AI) 1166 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 1167 } 1168 1169 VMap[&*I] = ActualArg; 1170 } 1171 1172 // Add alignment assumptions if necessary. We do this before the inlined 1173 // instructions are actually cloned into the caller so that we can easily 1174 // check what will be known at the start of the inlined code. 1175 AddAlignmentAssumptions(CS, IFI); 1176 1177 // We want the inliner to prune the code as it copies. We would LOVE to 1178 // have no dead or constant instructions leftover after inlining occurs 1179 // (which can happen, e.g., because an argument was constant), but we'll be 1180 // happy with whatever the cloner can do. 1181 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1182 /*ModuleLevelChanges=*/false, Returns, ".i", 1183 &InlinedFunctionInfo, TheCall); 1184 1185 // Remember the first block that is newly cloned over. 1186 FirstNewBlock = LastBlock; ++FirstNewBlock; 1187 1188 // Inject byval arguments initialization. 1189 for (std::pair<Value*, Value*> &Init : ByValInit) 1190 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1191 &*FirstNewBlock, IFI); 1192 1193 Optional<OperandBundleUse> ParentDeopt = 1194 CS.getOperandBundle(LLVMContext::OB_deopt); 1195 if (ParentDeopt) { 1196 SmallVector<OperandBundleDef, 2> OpDefs; 1197 1198 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1199 Instruction *I = dyn_cast_or_null<Instruction>(VH); 1200 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef 1201 1202 OpDefs.clear(); 1203 1204 CallSite ICS(I); 1205 OpDefs.reserve(ICS.getNumOperandBundles()); 1206 1207 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) { 1208 auto ChildOB = ICS.getOperandBundleAt(i); 1209 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1210 // If the inlined call has other operand bundles, let them be 1211 OpDefs.emplace_back(ChildOB); 1212 continue; 1213 } 1214 1215 // It may be useful to separate this logic (of handling operand 1216 // bundles) out to a separate "policy" component if this gets crowded. 1217 // Prepend the parent's deoptimization continuation to the newly 1218 // inlined call's deoptimization continuation. 1219 std::vector<Value *> MergedDeoptArgs; 1220 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 1221 ChildOB.Inputs.size()); 1222 1223 MergedDeoptArgs.insert(MergedDeoptArgs.end(), 1224 ParentDeopt->Inputs.begin(), 1225 ParentDeopt->Inputs.end()); 1226 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(), 1227 ChildOB.Inputs.end()); 1228 1229 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 1230 } 1231 1232 Instruction *NewI = nullptr; 1233 if (isa<CallInst>(I)) 1234 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); 1235 else 1236 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); 1237 1238 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 1239 // this even if the call returns void. 1240 I->replaceAllUsesWith(NewI); 1241 1242 VH = nullptr; 1243 I->eraseFromParent(); 1244 } 1245 } 1246 1247 // Update the callgraph if requested. 1248 if (IFI.CG) 1249 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1250 1251 // Update inlined instructions' line number information. 1252 fixupLineNumbers(Caller, FirstNewBlock, TheCall); 1253 1254 // Clone existing noalias metadata if necessary. 1255 CloneAliasScopeMetadata(CS, VMap); 1256 1257 // Add noalias metadata if necessary. 1258 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR); 1259 1260 // FIXME: We could register any cloned assumptions instead of clearing the 1261 // whole function's cache. 1262 if (IFI.ACT) 1263 IFI.ACT->getAssumptionCache(*Caller).clear(); 1264 } 1265 1266 // If there are any alloca instructions in the block that used to be the entry 1267 // block for the callee, move them to the entry block of the caller. First 1268 // calculate which instruction they should be inserted before. We insert the 1269 // instructions at the end of the current alloca list. 1270 { 1271 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1272 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1273 E = FirstNewBlock->end(); I != E; ) { 1274 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1275 if (!AI) continue; 1276 1277 // If the alloca is now dead, remove it. This often occurs due to code 1278 // specialization. 1279 if (AI->use_empty()) { 1280 AI->eraseFromParent(); 1281 continue; 1282 } 1283 1284 if (!isa<Constant>(AI->getArraySize())) 1285 continue; 1286 1287 // Keep track of the static allocas that we inline into the caller. 1288 IFI.StaticAllocas.push_back(AI); 1289 1290 // Scan for the block of allocas that we can move over, and move them 1291 // all at once. 1292 while (isa<AllocaInst>(I) && 1293 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) { 1294 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1295 ++I; 1296 } 1297 1298 // Transfer all of the allocas over in a block. Using splice means 1299 // that the instructions aren't removed from the symbol table, then 1300 // reinserted. 1301 Caller->getEntryBlock().getInstList().splice( 1302 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 1303 } 1304 // Move any dbg.declares describing the allocas into the entry basic block. 1305 DIBuilder DIB(*Caller->getParent()); 1306 for (auto &AI : IFI.StaticAllocas) 1307 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false); 1308 } 1309 1310 bool InlinedMustTailCalls = false; 1311 if (InlinedFunctionInfo.ContainsCalls) { 1312 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1313 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1314 CallSiteTailKind = CI->getTailCallKind(); 1315 1316 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1317 ++BB) { 1318 for (Instruction &I : *BB) { 1319 CallInst *CI = dyn_cast<CallInst>(&I); 1320 if (!CI) 1321 continue; 1322 1323 // We need to reduce the strength of any inlined tail calls. For 1324 // musttail, we have to avoid introducing potential unbounded stack 1325 // growth. For example, if functions 'f' and 'g' are mutually recursive 1326 // with musttail, we can inline 'g' into 'f' so long as we preserve 1327 // musttail on the cloned call to 'f'. If either the inlined call site 1328 // or the cloned call site is *not* musttail, the program already has 1329 // one frame of stack growth, so it's safe to remove musttail. Here is 1330 // a table of example transformations: 1331 // 1332 // f -> musttail g -> musttail f ==> f -> musttail f 1333 // f -> musttail g -> tail f ==> f -> tail f 1334 // f -> g -> musttail f ==> f -> f 1335 // f -> g -> tail f ==> f -> f 1336 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1337 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1338 CI->setTailCallKind(ChildTCK); 1339 InlinedMustTailCalls |= CI->isMustTailCall(); 1340 1341 // Calls inlined through a 'nounwind' call site should be marked 1342 // 'nounwind'. 1343 if (MarkNoUnwind) 1344 CI->setDoesNotThrow(); 1345 } 1346 } 1347 } 1348 1349 // Leave lifetime markers for the static alloca's, scoping them to the 1350 // function we just inlined. 1351 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1352 IRBuilder<> builder(&FirstNewBlock->front()); 1353 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1354 AllocaInst *AI = IFI.StaticAllocas[ai]; 1355 1356 // If the alloca is already scoped to something smaller than the whole 1357 // function then there's no need to add redundant, less accurate markers. 1358 if (hasLifetimeMarkers(AI)) 1359 continue; 1360 1361 // Try to determine the size of the allocation. 1362 ConstantInt *AllocaSize = nullptr; 1363 if (ConstantInt *AIArraySize = 1364 dyn_cast<ConstantInt>(AI->getArraySize())) { 1365 auto &DL = Caller->getParent()->getDataLayout(); 1366 Type *AllocaType = AI->getAllocatedType(); 1367 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 1368 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1369 1370 // Don't add markers for zero-sized allocas. 1371 if (AllocaArraySize == 0) 1372 continue; 1373 1374 // Check that array size doesn't saturate uint64_t and doesn't 1375 // overflow when it's multiplied by type size. 1376 if (AllocaArraySize != ~0ULL && 1377 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 1378 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1379 AllocaArraySize * AllocaTypeSize); 1380 } 1381 } 1382 1383 builder.CreateLifetimeStart(AI, AllocaSize); 1384 for (ReturnInst *RI : Returns) { 1385 // Don't insert llvm.lifetime.end calls between a musttail call and a 1386 // return. The return kills all local allocas. 1387 if (InlinedMustTailCalls && 1388 RI->getParent()->getTerminatingMustTailCall()) 1389 continue; 1390 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1391 } 1392 } 1393 } 1394 1395 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1396 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1397 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1398 Module *M = Caller->getParent(); 1399 // Get the two intrinsics we care about. 1400 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1401 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1402 1403 // Insert the llvm.stacksave. 1404 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 1405 .CreateCall(StackSave, {}, "savedstack"); 1406 1407 // Insert a call to llvm.stackrestore before any return instructions in the 1408 // inlined function. 1409 for (ReturnInst *RI : Returns) { 1410 // Don't insert llvm.stackrestore calls between a musttail call and a 1411 // return. The return will restore the stack pointer. 1412 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1413 continue; 1414 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1415 } 1416 } 1417 1418 // Update the lexical scopes of the new funclets and callsites. 1419 // Anything that had 'none' as its parent is now nested inside the callsite's 1420 // EHPad. 1421 1422 if (CallSiteEHPad) { 1423 for (Function::iterator BB = FirstNewBlock->getIterator(), 1424 E = Caller->end(); 1425 BB != E; ++BB) { 1426 // Add bundle operands to any top-level call sites. 1427 SmallVector<OperandBundleDef, 1> OpBundles; 1428 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { 1429 Instruction *I = &*BBI++; 1430 CallSite CS(I); 1431 if (!CS) 1432 continue; 1433 1434 // Skip call sites which are nounwind intrinsics. 1435 auto *CalledFn = 1436 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1437 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow()) 1438 continue; 1439 1440 // Skip call sites which already have a "funclet" bundle. 1441 if (CS.getOperandBundle(LLVMContext::OB_funclet)) 1442 continue; 1443 1444 CS.getOperandBundlesAsDefs(OpBundles); 1445 OpBundles.emplace_back("funclet", CallSiteEHPad); 1446 1447 Instruction *NewInst; 1448 if (CS.isCall()) 1449 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); 1450 else 1451 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); 1452 NewInst->setDebugLoc(I->getDebugLoc()); 1453 NewInst->takeName(I); 1454 I->replaceAllUsesWith(NewInst); 1455 I->eraseFromParent(); 1456 1457 OpBundles.clear(); 1458 } 1459 1460 Instruction *I = BB->getFirstNonPHI(); 1461 if (!I->isEHPad()) 1462 continue; 1463 1464 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 1465 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 1466 CatchSwitch->setParentPad(CallSiteEHPad); 1467 } else { 1468 auto *FPI = cast<FuncletPadInst>(I); 1469 if (isa<ConstantTokenNone>(FPI->getParentPad())) 1470 FPI->setParentPad(CallSiteEHPad); 1471 } 1472 } 1473 } 1474 1475 // If we are inlining for an invoke instruction, we must make sure to rewrite 1476 // any call instructions into invoke instructions. 1477 if (auto *II = dyn_cast<InvokeInst>(TheCall)) { 1478 BasicBlock *UnwindDest = II->getUnwindDest(); 1479 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 1480 if (isa<LandingPadInst>(FirstNonPHI)) { 1481 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1482 } else { 1483 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1484 } 1485 } 1486 1487 // Handle any inlined musttail call sites. In order for a new call site to be 1488 // musttail, the source of the clone and the inlined call site must have been 1489 // musttail. Therefore it's safe to return without merging control into the 1490 // phi below. 1491 if (InlinedMustTailCalls) { 1492 // Check if we need to bitcast the result of any musttail calls. 1493 Type *NewRetTy = Caller->getReturnType(); 1494 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 1495 1496 // Handle the returns preceded by musttail calls separately. 1497 SmallVector<ReturnInst *, 8> NormalReturns; 1498 for (ReturnInst *RI : Returns) { 1499 CallInst *ReturnedMustTail = 1500 RI->getParent()->getTerminatingMustTailCall(); 1501 if (!ReturnedMustTail) { 1502 NormalReturns.push_back(RI); 1503 continue; 1504 } 1505 if (!NeedBitCast) 1506 continue; 1507 1508 // Delete the old return and any preceding bitcast. 1509 BasicBlock *CurBB = RI->getParent(); 1510 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 1511 RI->eraseFromParent(); 1512 if (OldCast) 1513 OldCast->eraseFromParent(); 1514 1515 // Insert a new bitcast and return with the right type. 1516 IRBuilder<> Builder(CurBB); 1517 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 1518 } 1519 1520 // Leave behind the normal returns so we can merge control flow. 1521 std::swap(Returns, NormalReturns); 1522 } 1523 1524 // If we cloned in _exactly one_ basic block, and if that block ends in a 1525 // return instruction, we splice the body of the inlined callee directly into 1526 // the calling basic block. 1527 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 1528 // Move all of the instructions right before the call. 1529 OrigBB->getInstList().splice(TheCall->getIterator(), 1530 FirstNewBlock->getInstList(), 1531 FirstNewBlock->begin(), FirstNewBlock->end()); 1532 // Remove the cloned basic block. 1533 Caller->getBasicBlockList().pop_back(); 1534 1535 // If the call site was an invoke instruction, add a branch to the normal 1536 // destination. 1537 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 1538 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 1539 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 1540 } 1541 1542 // If the return instruction returned a value, replace uses of the call with 1543 // uses of the returned value. 1544 if (!TheCall->use_empty()) { 1545 ReturnInst *R = Returns[0]; 1546 if (TheCall == R->getReturnValue()) 1547 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1548 else 1549 TheCall->replaceAllUsesWith(R->getReturnValue()); 1550 } 1551 // Since we are now done with the Call/Invoke, we can delete it. 1552 TheCall->eraseFromParent(); 1553 1554 // Since we are now done with the return instruction, delete it also. 1555 Returns[0]->eraseFromParent(); 1556 1557 // We are now done with the inlining. 1558 return true; 1559 } 1560 1561 // Otherwise, we have the normal case, of more than one block to inline or 1562 // multiple return sites. 1563 1564 // We want to clone the entire callee function into the hole between the 1565 // "starter" and "ender" blocks. How we accomplish this depends on whether 1566 // this is an invoke instruction or a call instruction. 1567 BasicBlock *AfterCallBB; 1568 BranchInst *CreatedBranchToNormalDest = nullptr; 1569 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 1570 1571 // Add an unconditional branch to make this look like the CallInst case... 1572 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 1573 1574 // Split the basic block. This guarantees that no PHI nodes will have to be 1575 // updated due to new incoming edges, and make the invoke case more 1576 // symmetric to the call case. 1577 AfterCallBB = 1578 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 1579 CalledFunc->getName() + ".exit"); 1580 1581 } else { // It's a call 1582 // If this is a call instruction, we need to split the basic block that 1583 // the call lives in. 1584 // 1585 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(), 1586 CalledFunc->getName() + ".exit"); 1587 } 1588 1589 // Change the branch that used to go to AfterCallBB to branch to the first 1590 // basic block of the inlined function. 1591 // 1592 TerminatorInst *Br = OrigBB->getTerminator(); 1593 assert(Br && Br->getOpcode() == Instruction::Br && 1594 "splitBasicBlock broken!"); 1595 Br->setOperand(0, &*FirstNewBlock); 1596 1597 // Now that the function is correct, make it a little bit nicer. In 1598 // particular, move the basic blocks inserted from the end of the function 1599 // into the space made by splitting the source basic block. 1600 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 1601 Caller->getBasicBlockList(), FirstNewBlock, 1602 Caller->end()); 1603 1604 // Handle all of the return instructions that we just cloned in, and eliminate 1605 // any users of the original call/invoke instruction. 1606 Type *RTy = CalledFunc->getReturnType(); 1607 1608 PHINode *PHI = nullptr; 1609 if (Returns.size() > 1) { 1610 // The PHI node should go at the front of the new basic block to merge all 1611 // possible incoming values. 1612 if (!TheCall->use_empty()) { 1613 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 1614 &AfterCallBB->front()); 1615 // Anything that used the result of the function call should now use the 1616 // PHI node as their operand. 1617 TheCall->replaceAllUsesWith(PHI); 1618 } 1619 1620 // Loop over all of the return instructions adding entries to the PHI node 1621 // as appropriate. 1622 if (PHI) { 1623 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 1624 ReturnInst *RI = Returns[i]; 1625 assert(RI->getReturnValue()->getType() == PHI->getType() && 1626 "Ret value not consistent in function!"); 1627 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 1628 } 1629 } 1630 1631 // Add a branch to the merge points and remove return instructions. 1632 DebugLoc Loc; 1633 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 1634 ReturnInst *RI = Returns[i]; 1635 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 1636 Loc = RI->getDebugLoc(); 1637 BI->setDebugLoc(Loc); 1638 RI->eraseFromParent(); 1639 } 1640 // We need to set the debug location to *somewhere* inside the 1641 // inlined function. The line number may be nonsensical, but the 1642 // instruction will at least be associated with the right 1643 // function. 1644 if (CreatedBranchToNormalDest) 1645 CreatedBranchToNormalDest->setDebugLoc(Loc); 1646 } else if (!Returns.empty()) { 1647 // Otherwise, if there is exactly one return value, just replace anything 1648 // using the return value of the call with the computed value. 1649 if (!TheCall->use_empty()) { 1650 if (TheCall == Returns[0]->getReturnValue()) 1651 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1652 else 1653 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 1654 } 1655 1656 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 1657 BasicBlock *ReturnBB = Returns[0]->getParent(); 1658 ReturnBB->replaceAllUsesWith(AfterCallBB); 1659 1660 // Splice the code from the return block into the block that it will return 1661 // to, which contains the code that was after the call. 1662 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 1663 ReturnBB->getInstList()); 1664 1665 if (CreatedBranchToNormalDest) 1666 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 1667 1668 // Delete the return instruction now and empty ReturnBB now. 1669 Returns[0]->eraseFromParent(); 1670 ReturnBB->eraseFromParent(); 1671 } else if (!TheCall->use_empty()) { 1672 // No returns, but something is using the return value of the call. Just 1673 // nuke the result. 1674 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 1675 } 1676 1677 // Since we are now done with the Call/Invoke, we can delete it. 1678 TheCall->eraseFromParent(); 1679 1680 // If we inlined any musttail calls and the original return is now 1681 // unreachable, delete it. It can only contain a bitcast and ret. 1682 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 1683 AfterCallBB->eraseFromParent(); 1684 1685 // We should always be able to fold the entry block of the function into the 1686 // single predecessor of the block... 1687 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 1688 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 1689 1690 // Splice the code entry block into calling block, right before the 1691 // unconditional branch. 1692 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 1693 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 1694 1695 // Remove the unconditional branch. 1696 OrigBB->getInstList().erase(Br); 1697 1698 // Now we can remove the CalleeEntry block, which is now empty. 1699 Caller->getBasicBlockList().erase(CalleeEntry); 1700 1701 // If we inserted a phi node, check to see if it has a single value (e.g. all 1702 // the entries are the same or undef). If so, remove the PHI so it doesn't 1703 // block other optimizations. 1704 if (PHI) { 1705 auto &DL = Caller->getParent()->getDataLayout(); 1706 if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, 1707 &IFI.ACT->getAssumptionCache(*Caller))) { 1708 PHI->replaceAllUsesWith(V); 1709 PHI->eraseFromParent(); 1710 } 1711 } 1712 1713 return true; 1714 } 1715