1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/CallGraph.h" 25 #include "llvm/Analysis/CaptureTracking.h" 26 #include "llvm/Analysis/EHPersonalities.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/CallSite.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DebugInfo.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/DIBuilder.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Intrinsics.h" 42 #include "llvm/IR/MDBuilder.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 #include <algorithm> 47 48 using namespace llvm; 49 50 static cl::opt<bool> 51 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 52 cl::Hidden, 53 cl::desc("Convert noalias attributes to metadata during inlining.")); 54 55 static cl::opt<bool> 56 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 57 cl::init(true), cl::Hidden, 58 cl::desc("Convert align attributes to assumptions during inlining.")); 59 60 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 61 AAResults *CalleeAAR, bool InsertLifetime) { 62 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime); 63 } 64 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 65 AAResults *CalleeAAR, bool InsertLifetime) { 66 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime); 67 } 68 69 namespace { 70 /// A class for recording information about inlining a landing pad. 71 class LandingPadInliningInfo { 72 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 73 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 74 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 75 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 76 SmallVector<Value*, 8> UnwindDestPHIValues; 77 78 public: 79 LandingPadInliningInfo(InvokeInst *II) 80 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 81 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 82 // If there are PHI nodes in the unwind destination block, we need to keep 83 // track of which values came into them from the invoke before removing 84 // the edge from this block. 85 llvm::BasicBlock *InvokeBB = II->getParent(); 86 BasicBlock::iterator I = OuterResumeDest->begin(); 87 for (; isa<PHINode>(I); ++I) { 88 // Save the value to use for this edge. 89 PHINode *PHI = cast<PHINode>(I); 90 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 91 } 92 93 CallerLPad = cast<LandingPadInst>(I); 94 } 95 96 /// The outer unwind destination is the target of 97 /// unwind edges introduced for calls within the inlined function. 98 BasicBlock *getOuterResumeDest() const { 99 return OuterResumeDest; 100 } 101 102 BasicBlock *getInnerResumeDest(); 103 104 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 105 106 /// Forward the 'resume' instruction to the caller's landing pad block. 107 /// When the landing pad block has only one predecessor, this is 108 /// a simple branch. When there is more than one predecessor, we need to 109 /// split the landing pad block after the landingpad instruction and jump 110 /// to there. 111 void forwardResume(ResumeInst *RI, 112 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 113 114 /// Add incoming-PHI values to the unwind destination block for the given 115 /// basic block, using the values for the original invoke's source block. 116 void addIncomingPHIValuesFor(BasicBlock *BB) const { 117 addIncomingPHIValuesForInto(BB, OuterResumeDest); 118 } 119 120 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 121 BasicBlock::iterator I = dest->begin(); 122 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 123 PHINode *phi = cast<PHINode>(I); 124 phi->addIncoming(UnwindDestPHIValues[i], src); 125 } 126 } 127 }; 128 } // anonymous namespace 129 130 /// Get or create a target for the branch from ResumeInsts. 131 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 132 if (InnerResumeDest) return InnerResumeDest; 133 134 // Split the landing pad. 135 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 136 InnerResumeDest = 137 OuterResumeDest->splitBasicBlock(SplitPoint, 138 OuterResumeDest->getName() + ".body"); 139 140 // The number of incoming edges we expect to the inner landing pad. 141 const unsigned PHICapacity = 2; 142 143 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 144 Instruction *InsertPoint = &InnerResumeDest->front(); 145 BasicBlock::iterator I = OuterResumeDest->begin(); 146 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 147 PHINode *OuterPHI = cast<PHINode>(I); 148 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 149 OuterPHI->getName() + ".lpad-body", 150 InsertPoint); 151 OuterPHI->replaceAllUsesWith(InnerPHI); 152 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 153 } 154 155 // Create a PHI for the exception values. 156 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 157 "eh.lpad-body", InsertPoint); 158 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 159 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 160 161 // All done. 162 return InnerResumeDest; 163 } 164 165 /// Forward the 'resume' instruction to the caller's landing pad block. 166 /// When the landing pad block has only one predecessor, this is a simple 167 /// branch. When there is more than one predecessor, we need to split the 168 /// landing pad block after the landingpad instruction and jump to there. 169 void LandingPadInliningInfo::forwardResume( 170 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 171 BasicBlock *Dest = getInnerResumeDest(); 172 BasicBlock *Src = RI->getParent(); 173 174 BranchInst::Create(Dest, Src); 175 176 // Update the PHIs in the destination. They were inserted in an order which 177 // makes this work. 178 addIncomingPHIValuesForInto(Src, Dest); 179 180 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 181 RI->eraseFromParent(); 182 } 183 184 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 185 static Value *getParentPad(Value *EHPad) { 186 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 187 return FPI->getParentPad(); 188 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 189 } 190 191 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy; 192 193 /// Helper for getUnwindDestToken that does the descendant-ward part of 194 /// the search. 195 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 196 UnwindDestMemoTy &MemoMap) { 197 SmallVector<Instruction *, 8> Worklist(1, EHPad); 198 199 while (!Worklist.empty()) { 200 Instruction *CurrentPad = Worklist.pop_back_val(); 201 // We only put pads on the worklist that aren't in the MemoMap. When 202 // we find an unwind dest for a pad we may update its ancestors, but 203 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 204 // so they should never get updated while queued on the worklist. 205 assert(!MemoMap.count(CurrentPad)); 206 Value *UnwindDestToken = nullptr; 207 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 208 if (CatchSwitch->hasUnwindDest()) { 209 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 210 } else { 211 // Catchswitch doesn't have a 'nounwind' variant, and one might be 212 // annotated as "unwinds to caller" when really it's nounwind (see 213 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 214 // parent's unwind dest from this. We can check its catchpads' 215 // descendants, since they might include a cleanuppad with an 216 // "unwinds to caller" cleanupret, which can be trusted. 217 for (auto HI = CatchSwitch->handler_begin(), 218 HE = CatchSwitch->handler_end(); 219 HI != HE && !UnwindDestToken; ++HI) { 220 BasicBlock *HandlerBlock = *HI; 221 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 222 for (User *Child : CatchPad->users()) { 223 // Intentionally ignore invokes here -- since the catchswitch is 224 // marked "unwind to caller", it would be a verifier error if it 225 // contained an invoke which unwinds out of it, so any invoke we'd 226 // encounter must unwind to some child of the catch. 227 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 228 continue; 229 230 Instruction *ChildPad = cast<Instruction>(Child); 231 auto Memo = MemoMap.find(ChildPad); 232 if (Memo == MemoMap.end()) { 233 // Haven't figured out this child pad yet; queue it. 234 Worklist.push_back(ChildPad); 235 continue; 236 } 237 // We've already checked this child, but might have found that 238 // it offers no proof either way. 239 Value *ChildUnwindDestToken = Memo->second; 240 if (!ChildUnwindDestToken) 241 continue; 242 // We already know the child's unwind dest, which can either 243 // be ConstantTokenNone to indicate unwind to caller, or can 244 // be another child of the catchpad. Only the former indicates 245 // the unwind dest of the catchswitch. 246 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 247 UnwindDestToken = ChildUnwindDestToken; 248 break; 249 } 250 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 251 } 252 } 253 } 254 } else { 255 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 256 for (User *U : CleanupPad->users()) { 257 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 258 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 259 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 260 else 261 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 262 break; 263 } 264 Value *ChildUnwindDestToken; 265 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 266 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 267 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 268 Instruction *ChildPad = cast<Instruction>(U); 269 auto Memo = MemoMap.find(ChildPad); 270 if (Memo == MemoMap.end()) { 271 // Haven't resolved this child yet; queue it and keep searching. 272 Worklist.push_back(ChildPad); 273 continue; 274 } 275 // We've checked this child, but still need to ignore it if it 276 // had no proof either way. 277 ChildUnwindDestToken = Memo->second; 278 if (!ChildUnwindDestToken) 279 continue; 280 } else { 281 // Not a relevant user of the cleanuppad 282 continue; 283 } 284 // In a well-formed program, the child/invoke must either unwind to 285 // an(other) child of the cleanup, or exit the cleanup. In the 286 // first case, continue searching. 287 if (isa<Instruction>(ChildUnwindDestToken) && 288 getParentPad(ChildUnwindDestToken) == CleanupPad) 289 continue; 290 UnwindDestToken = ChildUnwindDestToken; 291 break; 292 } 293 } 294 // If we haven't found an unwind dest for CurrentPad, we may have queued its 295 // children, so move on to the next in the worklist. 296 if (!UnwindDestToken) 297 continue; 298 299 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 300 // any ancestors of CurrentPad up to but not including UnwindDestToken's 301 // parent pad. Record this in the memo map, and check to see if the 302 // original EHPad being queried is one of the ones exited. 303 Value *UnwindParent; 304 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 305 UnwindParent = getParentPad(UnwindPad); 306 else 307 UnwindParent = nullptr; 308 bool ExitedOriginalPad = false; 309 for (Instruction *ExitedPad = CurrentPad; 310 ExitedPad && ExitedPad != UnwindParent; 311 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 312 // Skip over catchpads since they just follow their catchswitches. 313 if (isa<CatchPadInst>(ExitedPad)) 314 continue; 315 MemoMap[ExitedPad] = UnwindDestToken; 316 ExitedOriginalPad |= (ExitedPad == EHPad); 317 } 318 319 if (ExitedOriginalPad) 320 return UnwindDestToken; 321 322 // Continue the search. 323 } 324 325 // No definitive information is contained within this funclet. 326 return nullptr; 327 } 328 329 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 330 /// return that pad instruction. If it unwinds to caller, return 331 /// ConstantTokenNone. If it does not have a definitive unwind destination, 332 /// return nullptr. 333 /// 334 /// This routine gets invoked for calls in funclets in inlinees when inlining 335 /// an invoke. Since many funclets don't have calls inside them, it's queried 336 /// on-demand rather than building a map of pads to unwind dests up front. 337 /// Determining a funclet's unwind dest may require recursively searching its 338 /// descendants, and also ancestors and cousins if the descendants don't provide 339 /// an answer. Since most funclets will have their unwind dest immediately 340 /// available as the unwind dest of a catchswitch or cleanupret, this routine 341 /// searches top-down from the given pad and then up. To avoid worst-case 342 /// quadratic run-time given that approach, it uses a memo map to avoid 343 /// re-processing funclet trees. The callers that rewrite the IR as they go 344 /// take advantage of this, for correctness, by checking/forcing rewritten 345 /// pads' entries to match the original callee view. 346 static Value *getUnwindDestToken(Instruction *EHPad, 347 UnwindDestMemoTy &MemoMap) { 348 // Catchpads unwind to the same place as their catchswitch; 349 // redirct any queries on catchpads so the code below can 350 // deal with just catchswitches and cleanuppads. 351 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 352 EHPad = CPI->getCatchSwitch(); 353 354 // Check if we've already determined the unwind dest for this pad. 355 auto Memo = MemoMap.find(EHPad); 356 if (Memo != MemoMap.end()) 357 return Memo->second; 358 359 // Search EHPad and, if necessary, its descendants. 360 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 361 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 362 if (UnwindDestToken) 363 return UnwindDestToken; 364 365 // No information is available for this EHPad from itself or any of its 366 // descendants. An unwind all the way out to a pad in the caller would 367 // need also to agree with the unwind dest of the parent funclet, so 368 // search up the chain to try to find a funclet with information. Put 369 // null entries in the memo map to avoid re-processing as we go up. 370 MemoMap[EHPad] = nullptr; 371 #ifndef NDEBUG 372 SmallPtrSet<Instruction *, 4> TempMemos; 373 TempMemos.insert(EHPad); 374 #endif 375 Instruction *LastUselessPad = EHPad; 376 Value *AncestorToken; 377 for (AncestorToken = getParentPad(EHPad); 378 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 379 AncestorToken = getParentPad(AncestorToken)) { 380 // Skip over catchpads since they just follow their catchswitches. 381 if (isa<CatchPadInst>(AncestorPad)) 382 continue; 383 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 384 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 385 // call to getUnwindDestToken, that would mean that AncestorPad had no 386 // information in itself, its descendants, or its ancestors. If that 387 // were the case, then we should also have recorded the lack of information 388 // for the descendant that we're coming from. So assert that we don't 389 // find a null entry in the MemoMap for AncestorPad. 390 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 391 auto AncestorMemo = MemoMap.find(AncestorPad); 392 if (AncestorMemo == MemoMap.end()) { 393 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 394 } else { 395 UnwindDestToken = AncestorMemo->second; 396 } 397 if (UnwindDestToken) 398 break; 399 LastUselessPad = AncestorPad; 400 MemoMap[LastUselessPad] = nullptr; 401 #ifndef NDEBUG 402 TempMemos.insert(LastUselessPad); 403 #endif 404 } 405 406 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 407 // returned nullptr (and likewise for EHPad and any of its ancestors up to 408 // LastUselessPad), so LastUselessPad has no information from below. Since 409 // getUnwindDestTokenHelper must investigate all downward paths through 410 // no-information nodes to prove that a node has no information like this, 411 // and since any time it finds information it records it in the MemoMap for 412 // not just the immediately-containing funclet but also any ancestors also 413 // exited, it must be the case that, walking downward from LastUselessPad, 414 // visiting just those nodes which have not been mapped to an unwind dest 415 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 416 // they are just used to keep getUnwindDestTokenHelper from repeating work), 417 // any node visited must have been exhaustively searched with no information 418 // for it found. 419 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 420 while (!Worklist.empty()) { 421 Instruction *UselessPad = Worklist.pop_back_val(); 422 auto Memo = MemoMap.find(UselessPad); 423 if (Memo != MemoMap.end() && Memo->second) { 424 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 425 // that it is a funclet that does have information about unwinding to 426 // a particular destination; its parent was a useless pad. 427 // Since its parent has no information, the unwind edge must not escape 428 // the parent, and must target a sibling of this pad. This local unwind 429 // gives us no information about EHPad. Leave it and the subtree rooted 430 // at it alone. 431 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 432 continue; 433 } 434 // We know we don't have information for UselesPad. If it has an entry in 435 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 436 // added on this invocation of getUnwindDestToken; if a previous invocation 437 // recorded nullptr, it would have had to prove that the ancestors of 438 // UselessPad, which include LastUselessPad, had no information, and that 439 // in turn would have required proving that the descendants of 440 // LastUselesPad, which include EHPad, have no information about 441 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 442 // the MemoMap on that invocation, which isn't the case if we got here. 443 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 444 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 445 // information that we'd be contradicting by making a map entry for it 446 // (which is something that getUnwindDestTokenHelper must have proved for 447 // us to get here). Just assert on is direct users here; the checks in 448 // this downward walk at its descendants will verify that they don't have 449 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 450 // unwind edges or unwind to a sibling). 451 MemoMap[UselessPad] = UnwindDestToken; 452 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 453 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 454 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 455 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 456 for (User *U : CatchPad->users()) { 457 assert( 458 (!isa<InvokeInst>(U) || 459 (getParentPad( 460 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 461 CatchPad)) && 462 "Expected useless pad"); 463 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 464 Worklist.push_back(cast<Instruction>(U)); 465 } 466 } 467 } else { 468 assert(isa<CleanupPadInst>(UselessPad)); 469 for (User *U : UselessPad->users()) { 470 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 471 assert((!isa<InvokeInst>(U) || 472 (getParentPad( 473 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 474 UselessPad)) && 475 "Expected useless pad"); 476 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 477 Worklist.push_back(cast<Instruction>(U)); 478 } 479 } 480 } 481 482 return UnwindDestToken; 483 } 484 485 /// When we inline a basic block into an invoke, 486 /// we have to turn all of the calls that can throw into invokes. 487 /// This function analyze BB to see if there are any calls, and if so, 488 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 489 /// nodes in that block with the values specified in InvokeDestPHIValues. 490 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 491 BasicBlock *BB, BasicBlock *UnwindEdge, 492 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 493 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 494 Instruction *I = &*BBI++; 495 496 // We only need to check for function calls: inlined invoke 497 // instructions require no special handling. 498 CallInst *CI = dyn_cast<CallInst>(I); 499 500 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 501 continue; 502 503 // We do not need to (and in fact, cannot) convert possibly throwing calls 504 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 505 // invokes. The caller's "segment" of the deoptimization continuation 506 // attached to the newly inlined @llvm.experimental_deoptimize 507 // (resp. @llvm.experimental.guard) call should contain the exception 508 // handling logic, if any. 509 if (auto *F = CI->getCalledFunction()) 510 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 511 F->getIntrinsicID() == Intrinsic::experimental_guard) 512 continue; 513 514 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 515 // This call is nested inside a funclet. If that funclet has an unwind 516 // destination within the inlinee, then unwinding out of this call would 517 // be UB. Rewriting this call to an invoke which targets the inlined 518 // invoke's unwind dest would give the call's parent funclet multiple 519 // unwind destinations, which is something that subsequent EH table 520 // generation can't handle and that the veirifer rejects. So when we 521 // see such a call, leave it as a call. 522 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 523 Value *UnwindDestToken = 524 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 525 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 526 continue; 527 #ifndef NDEBUG 528 Instruction *MemoKey; 529 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 530 MemoKey = CatchPad->getCatchSwitch(); 531 else 532 MemoKey = FuncletPad; 533 assert(FuncletUnwindMap->count(MemoKey) && 534 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 535 "must get memoized to avoid confusing later searches"); 536 #endif // NDEBUG 537 } 538 539 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge); 540 return BB; 541 } 542 return nullptr; 543 } 544 545 /// If we inlined an invoke site, we need to convert calls 546 /// in the body of the inlined function into invokes. 547 /// 548 /// II is the invoke instruction being inlined. FirstNewBlock is the first 549 /// block of the inlined code (the last block is the end of the function), 550 /// and InlineCodeInfo is information about the code that got inlined. 551 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 552 ClonedCodeInfo &InlinedCodeInfo) { 553 BasicBlock *InvokeDest = II->getUnwindDest(); 554 555 Function *Caller = FirstNewBlock->getParent(); 556 557 // The inlined code is currently at the end of the function, scan from the 558 // start of the inlined code to its end, checking for stuff we need to 559 // rewrite. 560 LandingPadInliningInfo Invoke(II); 561 562 // Get all of the inlined landing pad instructions. 563 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 564 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 565 I != E; ++I) 566 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 567 InlinedLPads.insert(II->getLandingPadInst()); 568 569 // Append the clauses from the outer landing pad instruction into the inlined 570 // landing pad instructions. 571 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 572 for (LandingPadInst *InlinedLPad : InlinedLPads) { 573 unsigned OuterNum = OuterLPad->getNumClauses(); 574 InlinedLPad->reserveClauses(OuterNum); 575 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 576 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 577 if (OuterLPad->isCleanup()) 578 InlinedLPad->setCleanup(true); 579 } 580 581 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 582 BB != E; ++BB) { 583 if (InlinedCodeInfo.ContainsCalls) 584 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 585 &*BB, Invoke.getOuterResumeDest())) 586 // Update any PHI nodes in the exceptional block to indicate that there 587 // is now a new entry in them. 588 Invoke.addIncomingPHIValuesFor(NewBB); 589 590 // Forward any resumes that are remaining here. 591 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 592 Invoke.forwardResume(RI, InlinedLPads); 593 } 594 595 // Now that everything is happy, we have one final detail. The PHI nodes in 596 // the exception destination block still have entries due to the original 597 // invoke instruction. Eliminate these entries (which might even delete the 598 // PHI node) now. 599 InvokeDest->removePredecessor(II->getParent()); 600 } 601 602 /// If we inlined an invoke site, we need to convert calls 603 /// in the body of the inlined function into invokes. 604 /// 605 /// II is the invoke instruction being inlined. FirstNewBlock is the first 606 /// block of the inlined code (the last block is the end of the function), 607 /// and InlineCodeInfo is information about the code that got inlined. 608 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 609 ClonedCodeInfo &InlinedCodeInfo) { 610 BasicBlock *UnwindDest = II->getUnwindDest(); 611 Function *Caller = FirstNewBlock->getParent(); 612 613 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 614 615 // If there are PHI nodes in the unwind destination block, we need to keep 616 // track of which values came into them from the invoke before removing the 617 // edge from this block. 618 SmallVector<Value *, 8> UnwindDestPHIValues; 619 llvm::BasicBlock *InvokeBB = II->getParent(); 620 for (Instruction &I : *UnwindDest) { 621 // Save the value to use for this edge. 622 PHINode *PHI = dyn_cast<PHINode>(&I); 623 if (!PHI) 624 break; 625 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 626 } 627 628 // Add incoming-PHI values to the unwind destination block for the given basic 629 // block, using the values for the original invoke's source block. 630 auto UpdatePHINodes = [&](BasicBlock *Src) { 631 BasicBlock::iterator I = UnwindDest->begin(); 632 for (Value *V : UnwindDestPHIValues) { 633 PHINode *PHI = cast<PHINode>(I); 634 PHI->addIncoming(V, Src); 635 ++I; 636 } 637 }; 638 639 // This connects all the instructions which 'unwind to caller' to the invoke 640 // destination. 641 UnwindDestMemoTy FuncletUnwindMap; 642 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 643 BB != E; ++BB) { 644 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 645 if (CRI->unwindsToCaller()) { 646 auto *CleanupPad = CRI->getCleanupPad(); 647 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 648 CRI->eraseFromParent(); 649 UpdatePHINodes(&*BB); 650 // Finding a cleanupret with an unwind destination would confuse 651 // subsequent calls to getUnwindDestToken, so map the cleanuppad 652 // to short-circuit any such calls and recognize this as an "unwind 653 // to caller" cleanup. 654 assert(!FuncletUnwindMap.count(CleanupPad) || 655 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 656 FuncletUnwindMap[CleanupPad] = 657 ConstantTokenNone::get(Caller->getContext()); 658 } 659 } 660 661 Instruction *I = BB->getFirstNonPHI(); 662 if (!I->isEHPad()) 663 continue; 664 665 Instruction *Replacement = nullptr; 666 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 667 if (CatchSwitch->unwindsToCaller()) { 668 Value *UnwindDestToken; 669 if (auto *ParentPad = 670 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 671 // This catchswitch is nested inside another funclet. If that 672 // funclet has an unwind destination within the inlinee, then 673 // unwinding out of this catchswitch would be UB. Rewriting this 674 // catchswitch to unwind to the inlined invoke's unwind dest would 675 // give the parent funclet multiple unwind destinations, which is 676 // something that subsequent EH table generation can't handle and 677 // that the veirifer rejects. So when we see such a call, leave it 678 // as "unwind to caller". 679 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 680 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 681 continue; 682 } else { 683 // This catchswitch has no parent to inherit constraints from, and 684 // none of its descendants can have an unwind edge that exits it and 685 // targets another funclet in the inlinee. It may or may not have a 686 // descendant that definitively has an unwind to caller. In either 687 // case, we'll have to assume that any unwinds out of it may need to 688 // be routed to the caller, so treat it as though it has a definitive 689 // unwind to caller. 690 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 691 } 692 auto *NewCatchSwitch = CatchSwitchInst::Create( 693 CatchSwitch->getParentPad(), UnwindDest, 694 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 695 CatchSwitch); 696 for (BasicBlock *PadBB : CatchSwitch->handlers()) 697 NewCatchSwitch->addHandler(PadBB); 698 // Propagate info for the old catchswitch over to the new one in 699 // the unwind map. This also serves to short-circuit any subsequent 700 // checks for the unwind dest of this catchswitch, which would get 701 // confused if they found the outer handler in the callee. 702 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 703 Replacement = NewCatchSwitch; 704 } 705 } else if (!isa<FuncletPadInst>(I)) { 706 llvm_unreachable("unexpected EHPad!"); 707 } 708 709 if (Replacement) { 710 Replacement->takeName(I); 711 I->replaceAllUsesWith(Replacement); 712 I->eraseFromParent(); 713 UpdatePHINodes(&*BB); 714 } 715 } 716 717 if (InlinedCodeInfo.ContainsCalls) 718 for (Function::iterator BB = FirstNewBlock->getIterator(), 719 E = Caller->end(); 720 BB != E; ++BB) 721 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 722 &*BB, UnwindDest, &FuncletUnwindMap)) 723 // Update any PHI nodes in the exceptional block to indicate that there 724 // is now a new entry in them. 725 UpdatePHINodes(NewBB); 726 727 // Now that everything is happy, we have one final detail. The PHI nodes in 728 // the exception destination block still have entries due to the original 729 // invoke instruction. Eliminate these entries (which might even delete the 730 // PHI node) now. 731 UnwindDest->removePredecessor(InvokeBB); 732 } 733 734 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata, 735 /// that metadata should be propagated to all memory-accessing cloned 736 /// instructions. 737 static void PropagateParallelLoopAccessMetadata(CallSite CS, 738 ValueToValueMapTy &VMap) { 739 MDNode *M = 740 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 741 if (!M) 742 return; 743 744 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 745 VMI != VMIE; ++VMI) { 746 if (!VMI->second) 747 continue; 748 749 Instruction *NI = dyn_cast<Instruction>(VMI->second); 750 if (!NI) 751 continue; 752 753 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) { 754 M = MDNode::concatenate(PM, M); 755 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 756 } else if (NI->mayReadOrWriteMemory()) { 757 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 758 } 759 } 760 } 761 762 /// When inlining a function that contains noalias scope metadata, 763 /// this metadata needs to be cloned so that the inlined blocks 764 /// have different "unique scopes" at every call site. Were this not done, then 765 /// aliasing scopes from a function inlined into a caller multiple times could 766 /// not be differentiated (and this would lead to miscompiles because the 767 /// non-aliasing property communicated by the metadata could have 768 /// call-site-specific control dependencies). 769 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 770 const Function *CalledFunc = CS.getCalledFunction(); 771 SetVector<const MDNode *> MD; 772 773 // Note: We could only clone the metadata if it is already used in the 774 // caller. I'm omitting that check here because it might confuse 775 // inter-procedural alias analysis passes. We can revisit this if it becomes 776 // an efficiency or overhead problem. 777 778 for (const BasicBlock &I : *CalledFunc) 779 for (const Instruction &J : I) { 780 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope)) 781 MD.insert(M); 782 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias)) 783 MD.insert(M); 784 } 785 786 if (MD.empty()) 787 return; 788 789 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 790 // the set. 791 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 792 while (!Queue.empty()) { 793 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 794 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 795 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 796 if (MD.insert(M1)) 797 Queue.push_back(M1); 798 } 799 800 // Now we have a complete set of all metadata in the chains used to specify 801 // the noalias scopes and the lists of those scopes. 802 SmallVector<TempMDTuple, 16> DummyNodes; 803 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap; 804 for (const MDNode *I : MD) { 805 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None)); 806 MDMap[I].reset(DummyNodes.back().get()); 807 } 808 809 // Create new metadata nodes to replace the dummy nodes, replacing old 810 // metadata references with either a dummy node or an already-created new 811 // node. 812 for (const MDNode *I : MD) { 813 SmallVector<Metadata *, 4> NewOps; 814 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) { 815 const Metadata *V = I->getOperand(i); 816 if (const MDNode *M = dyn_cast<MDNode>(V)) 817 NewOps.push_back(MDMap[M]); 818 else 819 NewOps.push_back(const_cast<Metadata *>(V)); 820 } 821 822 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps); 823 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 824 assert(TempM->isTemporary() && "Expected temporary node"); 825 826 TempM->replaceAllUsesWith(NewM); 827 } 828 829 // Now replace the metadata in the new inlined instructions with the 830 // repacements from the map. 831 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 832 VMI != VMIE; ++VMI) { 833 if (!VMI->second) 834 continue; 835 836 Instruction *NI = dyn_cast<Instruction>(VMI->second); 837 if (!NI) 838 continue; 839 840 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 841 MDNode *NewMD = MDMap[M]; 842 // If the call site also had alias scope metadata (a list of scopes to 843 // which instructions inside it might belong), propagate those scopes to 844 // the inlined instructions. 845 if (MDNode *CSM = 846 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 847 NewMD = MDNode::concatenate(NewMD, CSM); 848 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 849 } else if (NI->mayReadOrWriteMemory()) { 850 if (MDNode *M = 851 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 852 NI->setMetadata(LLVMContext::MD_alias_scope, M); 853 } 854 855 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 856 MDNode *NewMD = MDMap[M]; 857 // If the call site also had noalias metadata (a list of scopes with 858 // which instructions inside it don't alias), propagate those scopes to 859 // the inlined instructions. 860 if (MDNode *CSM = 861 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 862 NewMD = MDNode::concatenate(NewMD, CSM); 863 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 864 } else if (NI->mayReadOrWriteMemory()) { 865 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 866 NI->setMetadata(LLVMContext::MD_noalias, M); 867 } 868 } 869 } 870 871 /// If the inlined function has noalias arguments, 872 /// then add new alias scopes for each noalias argument, tag the mapped noalias 873 /// parameters with noalias metadata specifying the new scope, and tag all 874 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 875 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 876 const DataLayout &DL, AAResults *CalleeAAR) { 877 if (!EnableNoAliasConversion) 878 return; 879 880 const Function *CalledFunc = CS.getCalledFunction(); 881 SmallVector<const Argument *, 4> NoAliasArgs; 882 883 for (const Argument &Arg : CalledFunc->args()) 884 if (Arg.hasNoAliasAttr() && !Arg.use_empty()) 885 NoAliasArgs.push_back(&Arg); 886 887 if (NoAliasArgs.empty()) 888 return; 889 890 // To do a good job, if a noalias variable is captured, we need to know if 891 // the capture point dominates the particular use we're considering. 892 DominatorTree DT; 893 DT.recalculate(const_cast<Function&>(*CalledFunc)); 894 895 // noalias indicates that pointer values based on the argument do not alias 896 // pointer values which are not based on it. So we add a new "scope" for each 897 // noalias function argument. Accesses using pointers based on that argument 898 // become part of that alias scope, accesses using pointers not based on that 899 // argument are tagged as noalias with that scope. 900 901 DenseMap<const Argument *, MDNode *> NewScopes; 902 MDBuilder MDB(CalledFunc->getContext()); 903 904 // Create a new scope domain for this function. 905 MDNode *NewDomain = 906 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 907 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 908 const Argument *A = NoAliasArgs[i]; 909 910 std::string Name = CalledFunc->getName(); 911 if (A->hasName()) { 912 Name += ": %"; 913 Name += A->getName(); 914 } else { 915 Name += ": argument "; 916 Name += utostr(i); 917 } 918 919 // Note: We always create a new anonymous root here. This is true regardless 920 // of the linkage of the callee because the aliasing "scope" is not just a 921 // property of the callee, but also all control dependencies in the caller. 922 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 923 NewScopes.insert(std::make_pair(A, NewScope)); 924 } 925 926 // Iterate over all new instructions in the map; for all memory-access 927 // instructions, add the alias scope metadata. 928 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 929 VMI != VMIE; ++VMI) { 930 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 931 if (!VMI->second) 932 continue; 933 934 Instruction *NI = dyn_cast<Instruction>(VMI->second); 935 if (!NI) 936 continue; 937 938 bool IsArgMemOnlyCall = false, IsFuncCall = false; 939 SmallVector<const Value *, 2> PtrArgs; 940 941 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 942 PtrArgs.push_back(LI->getPointerOperand()); 943 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 944 PtrArgs.push_back(SI->getPointerOperand()); 945 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 946 PtrArgs.push_back(VAAI->getPointerOperand()); 947 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 948 PtrArgs.push_back(CXI->getPointerOperand()); 949 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 950 PtrArgs.push_back(RMWI->getPointerOperand()); 951 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 952 // If we know that the call does not access memory, then we'll still 953 // know that about the inlined clone of this call site, and we don't 954 // need to add metadata. 955 if (ICS.doesNotAccessMemory()) 956 continue; 957 958 IsFuncCall = true; 959 if (CalleeAAR) { 960 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); 961 if (MRB == FMRB_OnlyAccessesArgumentPointees || 962 MRB == FMRB_OnlyReadsArgumentPointees) 963 IsArgMemOnlyCall = true; 964 } 965 966 for (Value *Arg : ICS.args()) { 967 // We need to check the underlying objects of all arguments, not just 968 // the pointer arguments, because we might be passing pointers as 969 // integers, etc. 970 // However, if we know that the call only accesses pointer arguments, 971 // then we only need to check the pointer arguments. 972 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy()) 973 continue; 974 975 PtrArgs.push_back(Arg); 976 } 977 } 978 979 // If we found no pointers, then this instruction is not suitable for 980 // pairing with an instruction to receive aliasing metadata. 981 // However, if this is a call, this we might just alias with none of the 982 // noalias arguments. 983 if (PtrArgs.empty() && !IsFuncCall) 984 continue; 985 986 // It is possible that there is only one underlying object, but you 987 // need to go through several PHIs to see it, and thus could be 988 // repeated in the Objects list. 989 SmallPtrSet<const Value *, 4> ObjSet; 990 SmallVector<Metadata *, 4> Scopes, NoAliases; 991 992 SmallSetVector<const Argument *, 4> NAPtrArgs; 993 for (const Value *V : PtrArgs) { 994 SmallVector<Value *, 4> Objects; 995 GetUnderlyingObjects(const_cast<Value*>(V), 996 Objects, DL, /* LI = */ nullptr); 997 998 for (Value *O : Objects) 999 ObjSet.insert(O); 1000 } 1001 1002 // Figure out if we're derived from anything that is not a noalias 1003 // argument. 1004 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 1005 for (const Value *V : ObjSet) { 1006 // Is this value a constant that cannot be derived from any pointer 1007 // value (we need to exclude constant expressions, for example, that 1008 // are formed from arithmetic on global symbols). 1009 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1010 isa<ConstantPointerNull>(V) || 1011 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1012 if (IsNonPtrConst) 1013 continue; 1014 1015 // If this is anything other than a noalias argument, then we cannot 1016 // completely describe the aliasing properties using alias.scope 1017 // metadata (and, thus, won't add any). 1018 if (const Argument *A = dyn_cast<Argument>(V)) { 1019 if (!A->hasNoAliasAttr()) 1020 UsesAliasingPtr = true; 1021 } else { 1022 UsesAliasingPtr = true; 1023 } 1024 1025 // If this is not some identified function-local object (which cannot 1026 // directly alias a noalias argument), or some other argument (which, 1027 // by definition, also cannot alias a noalias argument), then we could 1028 // alias a noalias argument that has been captured). 1029 if (!isa<Argument>(V) && 1030 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 1031 CanDeriveViaCapture = true; 1032 } 1033 1034 // A function call can always get captured noalias pointers (via other 1035 // parameters, globals, etc.). 1036 if (IsFuncCall && !IsArgMemOnlyCall) 1037 CanDeriveViaCapture = true; 1038 1039 // First, we want to figure out all of the sets with which we definitely 1040 // don't alias. Iterate over all noalias set, and add those for which: 1041 // 1. The noalias argument is not in the set of objects from which we 1042 // definitely derive. 1043 // 2. The noalias argument has not yet been captured. 1044 // An arbitrary function that might load pointers could see captured 1045 // noalias arguments via other noalias arguments or globals, and so we 1046 // must always check for prior capture. 1047 for (const Argument *A : NoAliasArgs) { 1048 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 1049 // It might be tempting to skip the 1050 // PointerMayBeCapturedBefore check if 1051 // A->hasNoCaptureAttr() is true, but this is 1052 // incorrect because nocapture only guarantees 1053 // that no copies outlive the function, not 1054 // that the value cannot be locally captured. 1055 !PointerMayBeCapturedBefore(A, 1056 /* ReturnCaptures */ false, 1057 /* StoreCaptures */ false, I, &DT))) 1058 NoAliases.push_back(NewScopes[A]); 1059 } 1060 1061 if (!NoAliases.empty()) 1062 NI->setMetadata(LLVMContext::MD_noalias, 1063 MDNode::concatenate( 1064 NI->getMetadata(LLVMContext::MD_noalias), 1065 MDNode::get(CalledFunc->getContext(), NoAliases))); 1066 1067 // Next, we want to figure out all of the sets to which we might belong. 1068 // We might belong to a set if the noalias argument is in the set of 1069 // underlying objects. If there is some non-noalias argument in our list 1070 // of underlying objects, then we cannot add a scope because the fact 1071 // that some access does not alias with any set of our noalias arguments 1072 // cannot itself guarantee that it does not alias with this access 1073 // (because there is some pointer of unknown origin involved and the 1074 // other access might also depend on this pointer). We also cannot add 1075 // scopes to arbitrary functions unless we know they don't access any 1076 // non-parameter pointer-values. 1077 bool CanAddScopes = !UsesAliasingPtr; 1078 if (CanAddScopes && IsFuncCall) 1079 CanAddScopes = IsArgMemOnlyCall; 1080 1081 if (CanAddScopes) 1082 for (const Argument *A : NoAliasArgs) { 1083 if (ObjSet.count(A)) 1084 Scopes.push_back(NewScopes[A]); 1085 } 1086 1087 if (!Scopes.empty()) 1088 NI->setMetadata( 1089 LLVMContext::MD_alias_scope, 1090 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1091 MDNode::get(CalledFunc->getContext(), Scopes))); 1092 } 1093 } 1094 } 1095 1096 /// If the inlined function has non-byval align arguments, then 1097 /// add @llvm.assume-based alignment assumptions to preserve this information. 1098 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 1099 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1100 return; 1101 1102 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller()); 1103 auto &DL = CS.getCaller()->getParent()->getDataLayout(); 1104 1105 // To avoid inserting redundant assumptions, we should check for assumptions 1106 // already in the caller. To do this, we might need a DT of the caller. 1107 DominatorTree DT; 1108 bool DTCalculated = false; 1109 1110 Function *CalledFunc = CS.getCalledFunction(); 1111 for (Function::arg_iterator I = CalledFunc->arg_begin(), 1112 E = CalledFunc->arg_end(); 1113 I != E; ++I) { 1114 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0; 1115 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) { 1116 if (!DTCalculated) { 1117 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent() 1118 ->getParent())); 1119 DTCalculated = true; 1120 } 1121 1122 // If we can already prove the asserted alignment in the context of the 1123 // caller, then don't bother inserting the assumption. 1124 Value *Arg = CS.getArgument(I->getArgNo()); 1125 if (getKnownAlignment(Arg, DL, CS.getInstruction(), AC, &DT) >= Align) 1126 continue; 1127 1128 CallInst *NewAssumption = IRBuilder<>(CS.getInstruction()) 1129 .CreateAlignmentAssumption(DL, Arg, Align); 1130 AC->registerAssumption(NewAssumption); 1131 } 1132 } 1133 } 1134 1135 /// Once we have cloned code over from a callee into the caller, 1136 /// update the specified callgraph to reflect the changes we made. 1137 /// Note that it's possible that not all code was copied over, so only 1138 /// some edges of the callgraph may remain. 1139 static void UpdateCallGraphAfterInlining(CallSite CS, 1140 Function::iterator FirstNewBlock, 1141 ValueToValueMapTy &VMap, 1142 InlineFunctionInfo &IFI) { 1143 CallGraph &CG = *IFI.CG; 1144 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 1145 const Function *Callee = CS.getCalledFunction(); 1146 CallGraphNode *CalleeNode = CG[Callee]; 1147 CallGraphNode *CallerNode = CG[Caller]; 1148 1149 // Since we inlined some uninlined call sites in the callee into the caller, 1150 // add edges from the caller to all of the callees of the callee. 1151 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 1152 1153 // Consider the case where CalleeNode == CallerNode. 1154 CallGraphNode::CalledFunctionsVector CallCache; 1155 if (CalleeNode == CallerNode) { 1156 CallCache.assign(I, E); 1157 I = CallCache.begin(); 1158 E = CallCache.end(); 1159 } 1160 1161 for (; I != E; ++I) { 1162 const Value *OrigCall = I->first; 1163 1164 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 1165 // Only copy the edge if the call was inlined! 1166 if (VMI == VMap.end() || VMI->second == nullptr) 1167 continue; 1168 1169 // If the call was inlined, but then constant folded, there is no edge to 1170 // add. Check for this case. 1171 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 1172 if (!NewCall) 1173 continue; 1174 1175 // We do not treat intrinsic calls like real function calls because we 1176 // expect them to become inline code; do not add an edge for an intrinsic. 1177 CallSite CS = CallSite(NewCall); 1178 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic()) 1179 continue; 1180 1181 // Remember that this call site got inlined for the client of 1182 // InlineFunction. 1183 IFI.InlinedCalls.push_back(NewCall); 1184 1185 // It's possible that inlining the callsite will cause it to go from an 1186 // indirect to a direct call by resolving a function pointer. If this 1187 // happens, set the callee of the new call site to a more precise 1188 // destination. This can also happen if the call graph node of the caller 1189 // was just unnecessarily imprecise. 1190 if (!I->second->getFunction()) 1191 if (Function *F = CallSite(NewCall).getCalledFunction()) { 1192 // Indirect call site resolved to direct call. 1193 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 1194 1195 continue; 1196 } 1197 1198 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 1199 } 1200 1201 // Update the call graph by deleting the edge from Callee to Caller. We must 1202 // do this after the loop above in case Caller and Callee are the same. 1203 CallerNode->removeCallEdgeFor(CS); 1204 } 1205 1206 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 1207 BasicBlock *InsertBlock, 1208 InlineFunctionInfo &IFI) { 1209 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 1210 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1211 1212 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); 1213 1214 // Always generate a memcpy of alignment 1 here because we don't know 1215 // the alignment of the src pointer. Other optimizations can infer 1216 // better alignment. 1217 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 1218 } 1219 1220 /// When inlining a call site that has a byval argument, 1221 /// we have to make the implicit memcpy explicit by adding it. 1222 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 1223 const Function *CalledFunc, 1224 InlineFunctionInfo &IFI, 1225 unsigned ByValAlignment) { 1226 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 1227 Type *AggTy = ArgTy->getElementType(); 1228 1229 Function *Caller = TheCall->getParent()->getParent(); 1230 1231 // If the called function is readonly, then it could not mutate the caller's 1232 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1233 // temporary. 1234 if (CalledFunc->onlyReadsMemory()) { 1235 // If the byval argument has a specified alignment that is greater than the 1236 // passed in pointer, then we either have to round up the input pointer or 1237 // give up on this transformation. 1238 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 1239 return Arg; 1240 1241 AssumptionCache *AC = 1242 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 1243 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1244 1245 // If the pointer is already known to be sufficiently aligned, or if we can 1246 // round it up to a larger alignment, then we don't need a temporary. 1247 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >= 1248 ByValAlignment) 1249 return Arg; 1250 1251 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1252 // for code quality, but rarely happens and is required for correctness. 1253 } 1254 1255 // Create the alloca. If we have DataLayout, use nice alignment. 1256 unsigned Align = 1257 Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy); 1258 1259 // If the byval had an alignment specified, we *must* use at least that 1260 // alignment, as it is required by the byval argument (and uses of the 1261 // pointer inside the callee). 1262 Align = std::max(Align, ByValAlignment); 1263 1264 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 1265 &*Caller->begin()->begin()); 1266 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1267 1268 // Uses of the argument in the function should use our new alloca 1269 // instead. 1270 return NewAlloca; 1271 } 1272 1273 // Check whether this Value is used by a lifetime intrinsic. 1274 static bool isUsedByLifetimeMarker(Value *V) { 1275 for (User *U : V->users()) { 1276 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1277 switch (II->getIntrinsicID()) { 1278 default: break; 1279 case Intrinsic::lifetime_start: 1280 case Intrinsic::lifetime_end: 1281 return true; 1282 } 1283 } 1284 } 1285 return false; 1286 } 1287 1288 // Check whether the given alloca already has 1289 // lifetime.start or lifetime.end intrinsics. 1290 static bool hasLifetimeMarkers(AllocaInst *AI) { 1291 Type *Ty = AI->getType(); 1292 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1293 Ty->getPointerAddressSpace()); 1294 if (Ty == Int8PtrTy) 1295 return isUsedByLifetimeMarker(AI); 1296 1297 // Do a scan to find all the casts to i8*. 1298 for (User *U : AI->users()) { 1299 if (U->getType() != Int8PtrTy) continue; 1300 if (U->stripPointerCasts() != AI) continue; 1301 if (isUsedByLifetimeMarker(U)) 1302 return true; 1303 } 1304 return false; 1305 } 1306 1307 /// Rebuild the entire inlined-at chain for this instruction so that the top of 1308 /// the chain now is inlined-at the new call site. 1309 static DebugLoc 1310 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode, 1311 LLVMContext &Ctx, 1312 DenseMap<const DILocation *, DILocation *> &IANodes) { 1313 SmallVector<DILocation *, 3> InlinedAtLocations; 1314 DILocation *Last = InlinedAtNode; 1315 DILocation *CurInlinedAt = DL; 1316 1317 // Gather all the inlined-at nodes 1318 while (DILocation *IA = CurInlinedAt->getInlinedAt()) { 1319 // Skip any we've already built nodes for 1320 if (DILocation *Found = IANodes[IA]) { 1321 Last = Found; 1322 break; 1323 } 1324 1325 InlinedAtLocations.push_back(IA); 1326 CurInlinedAt = IA; 1327 } 1328 1329 // Starting from the top, rebuild the nodes to point to the new inlined-at 1330 // location (then rebuilding the rest of the chain behind it) and update the 1331 // map of already-constructed inlined-at nodes. 1332 for (const DILocation *MD : reverse(InlinedAtLocations)) { 1333 Last = IANodes[MD] = DILocation::getDistinct( 1334 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last); 1335 } 1336 1337 // And finally create the normal location for this instruction, referring to 1338 // the new inlined-at chain. 1339 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last); 1340 } 1341 1342 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1343 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1344 /// cannot be static. 1345 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1346 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1347 } 1348 1349 /// Update inlined instructions' line numbers to 1350 /// to encode location where these instructions are inlined. 1351 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 1352 Instruction *TheCall, bool CalleeHasDebugInfo) { 1353 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1354 if (!TheCallDL) 1355 return; 1356 1357 auto &Ctx = Fn->getContext(); 1358 DILocation *InlinedAtNode = TheCallDL; 1359 1360 // Create a unique call site, not to be confused with any other call from the 1361 // same location. 1362 InlinedAtNode = DILocation::getDistinct( 1363 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 1364 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 1365 1366 // Cache the inlined-at nodes as they're built so they are reused, without 1367 // this every instruction's inlined-at chain would become distinct from each 1368 // other. 1369 DenseMap<const DILocation *, DILocation *> IANodes; 1370 1371 for (; FI != Fn->end(); ++FI) { 1372 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1373 BI != BE; ++BI) { 1374 if (DebugLoc DL = BI->getDebugLoc()) { 1375 BI->setDebugLoc( 1376 updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes)); 1377 continue; 1378 } 1379 1380 if (CalleeHasDebugInfo) 1381 continue; 1382 1383 // If the inlined instruction has no line number, make it look as if it 1384 // originates from the call location. This is important for 1385 // ((__always_inline__, __nodebug__)) functions which must use caller 1386 // location for all instructions in their function body. 1387 1388 // Don't update static allocas, as they may get moved later. 1389 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1390 if (allocaWouldBeStaticInEntry(AI)) 1391 continue; 1392 1393 BI->setDebugLoc(TheCallDL); 1394 } 1395 } 1396 } 1397 /// Update the block frequencies of the caller after a callee has been inlined. 1398 /// 1399 /// Each block cloned into the caller has its block frequency scaled by the 1400 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 1401 /// callee's entry block gets the same frequency as the callsite block and the 1402 /// relative frequencies of all cloned blocks remain the same after cloning. 1403 static void updateCallerBFI(BasicBlock *CallSiteBlock, 1404 const ValueToValueMapTy &VMap, 1405 BlockFrequencyInfo *CallerBFI, 1406 BlockFrequencyInfo *CalleeBFI, 1407 const BasicBlock &CalleeEntryBlock) { 1408 SmallPtrSet<BasicBlock *, 16> ClonedBBs; 1409 for (auto const &Entry : VMap) { 1410 if (!isa<BasicBlock>(Entry.first) || !Entry.second) 1411 continue; 1412 auto *OrigBB = cast<BasicBlock>(Entry.first); 1413 auto *ClonedBB = cast<BasicBlock>(Entry.second); 1414 ClonedBBs.insert(ClonedBB); 1415 CallerBFI->setBlockFreq(ClonedBB, 1416 CalleeBFI->getBlockFreq(OrigBB).getFrequency()); 1417 } 1418 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock)); 1419 CallerBFI->setBlockFreqAndScale( 1420 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(), 1421 ClonedBBs); 1422 } 1423 1424 /// Update the entry count of callee after inlining. 1425 /// 1426 /// The callsite's block count is subtracted from the callee's function entry 1427 /// count. 1428 static void updateCalleeCount(BlockFrequencyInfo &CallerBFI, BasicBlock *CallBB, 1429 Function *Callee) { 1430 // If the callee has a original count of N, and the estimated count of 1431 // callsite is M, the new callee count is set to N - M. M is estimated from 1432 // the caller's entry count, its entry block frequency and the block frequency 1433 // of the callsite. 1434 Optional<uint64_t> CalleeCount = Callee->getEntryCount(); 1435 if (!CalleeCount) 1436 return; 1437 Optional<uint64_t> CallSiteCount = CallerBFI.getBlockProfileCount(CallBB); 1438 if (!CallSiteCount) 1439 return; 1440 // Since CallSiteCount is an estimate, it could exceed the original callee 1441 // count and has to be set to 0. 1442 if (CallSiteCount.getValue() > CalleeCount.getValue()) 1443 Callee->setEntryCount(0); 1444 else 1445 Callee->setEntryCount(CalleeCount.getValue() - CallSiteCount.getValue()); 1446 } 1447 1448 /// This function inlines the called function into the basic block of the 1449 /// caller. This returns false if it is not possible to inline this call. 1450 /// The program is still in a well defined state if this occurs though. 1451 /// 1452 /// Note that this only does one level of inlining. For example, if the 1453 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1454 /// exists in the instruction stream. Similarly this will inline a recursive 1455 /// function by one level. 1456 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 1457 AAResults *CalleeAAR, bool InsertLifetime) { 1458 Instruction *TheCall = CS.getInstruction(); 1459 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 1460 "Instruction not in function!"); 1461 1462 // If IFI has any state in it, zap it before we fill it in. 1463 IFI.reset(); 1464 1465 Function *CalledFunc = CS.getCalledFunction(); 1466 if (!CalledFunc || // Can't inline external function or indirect 1467 CalledFunc->isDeclaration() || // call, or call to a vararg function! 1468 CalledFunc->getFunctionType()->isVarArg()) return false; 1469 1470 // The inliner does not know how to inline through calls with operand bundles 1471 // in general ... 1472 if (CS.hasOperandBundles()) { 1473 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { 1474 uint32_t Tag = CS.getOperandBundleAt(i).getTagID(); 1475 // ... but it knows how to inline through "deopt" operand bundles ... 1476 if (Tag == LLVMContext::OB_deopt) 1477 continue; 1478 // ... and "funclet" operand bundles. 1479 if (Tag == LLVMContext::OB_funclet) 1480 continue; 1481 1482 return false; 1483 } 1484 } 1485 1486 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1487 // calls that we inline. 1488 bool MarkNoUnwind = CS.doesNotThrow(); 1489 1490 BasicBlock *OrigBB = TheCall->getParent(); 1491 Function *Caller = OrigBB->getParent(); 1492 1493 // GC poses two hazards to inlining, which only occur when the callee has GC: 1494 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1495 // caller. 1496 // 2. If the caller has a differing GC, it is invalid to inline. 1497 if (CalledFunc->hasGC()) { 1498 if (!Caller->hasGC()) 1499 Caller->setGC(CalledFunc->getGC()); 1500 else if (CalledFunc->getGC() != Caller->getGC()) 1501 return false; 1502 } 1503 1504 // Get the personality function from the callee if it contains a landing pad. 1505 Constant *CalledPersonality = 1506 CalledFunc->hasPersonalityFn() 1507 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1508 : nullptr; 1509 1510 // Find the personality function used by the landing pads of the caller. If it 1511 // exists, then check to see that it matches the personality function used in 1512 // the callee. 1513 Constant *CallerPersonality = 1514 Caller->hasPersonalityFn() 1515 ? Caller->getPersonalityFn()->stripPointerCasts() 1516 : nullptr; 1517 if (CalledPersonality) { 1518 if (!CallerPersonality) 1519 Caller->setPersonalityFn(CalledPersonality); 1520 // If the personality functions match, then we can perform the 1521 // inlining. Otherwise, we can't inline. 1522 // TODO: This isn't 100% true. Some personality functions are proper 1523 // supersets of others and can be used in place of the other. 1524 else if (CalledPersonality != CallerPersonality) 1525 return false; 1526 } 1527 1528 // We need to figure out which funclet the callsite was in so that we may 1529 // properly nest the callee. 1530 Instruction *CallSiteEHPad = nullptr; 1531 if (CallerPersonality) { 1532 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1533 if (isFuncletEHPersonality(Personality)) { 1534 Optional<OperandBundleUse> ParentFunclet = 1535 CS.getOperandBundle(LLVMContext::OB_funclet); 1536 if (ParentFunclet) 1537 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1538 1539 // OK, the inlining site is legal. What about the target function? 1540 1541 if (CallSiteEHPad) { 1542 if (Personality == EHPersonality::MSVC_CXX) { 1543 // The MSVC personality cannot tolerate catches getting inlined into 1544 // cleanup funclets. 1545 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1546 // Ok, the call site is within a cleanuppad. Let's check the callee 1547 // for catchpads. 1548 for (const BasicBlock &CalledBB : *CalledFunc) { 1549 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1550 return false; 1551 } 1552 } 1553 } else if (isAsynchronousEHPersonality(Personality)) { 1554 // SEH is even less tolerant, there may not be any sort of exceptional 1555 // funclet in the callee. 1556 for (const BasicBlock &CalledBB : *CalledFunc) { 1557 if (CalledBB.isEHPad()) 1558 return false; 1559 } 1560 } 1561 } 1562 } 1563 } 1564 1565 // Determine if we are dealing with a call in an EHPad which does not unwind 1566 // to caller. 1567 bool EHPadForCallUnwindsLocally = false; 1568 if (CallSiteEHPad && CS.isCall()) { 1569 UnwindDestMemoTy FuncletUnwindMap; 1570 Value *CallSiteUnwindDestToken = 1571 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 1572 1573 EHPadForCallUnwindsLocally = 1574 CallSiteUnwindDestToken && 1575 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 1576 } 1577 1578 // Get an iterator to the last basic block in the function, which will have 1579 // the new function inlined after it. 1580 Function::iterator LastBlock = --Caller->end(); 1581 1582 // Make sure to capture all of the return instructions from the cloned 1583 // function. 1584 SmallVector<ReturnInst*, 8> Returns; 1585 ClonedCodeInfo InlinedFunctionInfo; 1586 Function::iterator FirstNewBlock; 1587 1588 { // Scope to destroy VMap after cloning. 1589 ValueToValueMapTy VMap; 1590 // Keep a list of pair (dst, src) to emit byval initializations. 1591 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 1592 1593 auto &DL = Caller->getParent()->getDataLayout(); 1594 1595 assert(CalledFunc->arg_size() == CS.arg_size() && 1596 "No varargs calls can be inlined!"); 1597 1598 // Calculate the vector of arguments to pass into the function cloner, which 1599 // matches up the formal to the actual argument values. 1600 CallSite::arg_iterator AI = CS.arg_begin(); 1601 unsigned ArgNo = 0; 1602 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 1603 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1604 Value *ActualArg = *AI; 1605 1606 // When byval arguments actually inlined, we need to make the copy implied 1607 // by them explicit. However, we don't do this if the callee is readonly 1608 // or readnone, because the copy would be unneeded: the callee doesn't 1609 // modify the struct. 1610 if (CS.isByValArgument(ArgNo)) { 1611 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 1612 CalledFunc->getParamAlignment(ArgNo+1)); 1613 if (ActualArg != *AI) 1614 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 1615 } 1616 1617 VMap[&*I] = ActualArg; 1618 } 1619 1620 // Add alignment assumptions if necessary. We do this before the inlined 1621 // instructions are actually cloned into the caller so that we can easily 1622 // check what will be known at the start of the inlined code. 1623 AddAlignmentAssumptions(CS, IFI); 1624 1625 // We want the inliner to prune the code as it copies. We would LOVE to 1626 // have no dead or constant instructions leftover after inlining occurs 1627 // (which can happen, e.g., because an argument was constant), but we'll be 1628 // happy with whatever the cloner can do. 1629 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1630 /*ModuleLevelChanges=*/false, Returns, ".i", 1631 &InlinedFunctionInfo, TheCall); 1632 // Remember the first block that is newly cloned over. 1633 FirstNewBlock = LastBlock; ++FirstNewBlock; 1634 1635 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr) { 1636 // Update the BFI of blocks cloned into the caller. 1637 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI, 1638 CalledFunc->front()); 1639 // Update the profile count of callee. 1640 updateCalleeCount(*IFI.CallerBFI, OrigBB, CalledFunc); 1641 } 1642 1643 // Inject byval arguments initialization. 1644 for (std::pair<Value*, Value*> &Init : ByValInit) 1645 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1646 &*FirstNewBlock, IFI); 1647 1648 Optional<OperandBundleUse> ParentDeopt = 1649 CS.getOperandBundle(LLVMContext::OB_deopt); 1650 if (ParentDeopt) { 1651 SmallVector<OperandBundleDef, 2> OpDefs; 1652 1653 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1654 Instruction *I = dyn_cast_or_null<Instruction>(VH); 1655 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef 1656 1657 OpDefs.clear(); 1658 1659 CallSite ICS(I); 1660 OpDefs.reserve(ICS.getNumOperandBundles()); 1661 1662 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) { 1663 auto ChildOB = ICS.getOperandBundleAt(i); 1664 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1665 // If the inlined call has other operand bundles, let them be 1666 OpDefs.emplace_back(ChildOB); 1667 continue; 1668 } 1669 1670 // It may be useful to separate this logic (of handling operand 1671 // bundles) out to a separate "policy" component if this gets crowded. 1672 // Prepend the parent's deoptimization continuation to the newly 1673 // inlined call's deoptimization continuation. 1674 std::vector<Value *> MergedDeoptArgs; 1675 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 1676 ChildOB.Inputs.size()); 1677 1678 MergedDeoptArgs.insert(MergedDeoptArgs.end(), 1679 ParentDeopt->Inputs.begin(), 1680 ParentDeopt->Inputs.end()); 1681 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(), 1682 ChildOB.Inputs.end()); 1683 1684 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 1685 } 1686 1687 Instruction *NewI = nullptr; 1688 if (isa<CallInst>(I)) 1689 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); 1690 else 1691 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); 1692 1693 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 1694 // this even if the call returns void. 1695 I->replaceAllUsesWith(NewI); 1696 1697 VH = nullptr; 1698 I->eraseFromParent(); 1699 } 1700 } 1701 1702 // Update the callgraph if requested. 1703 if (IFI.CG) 1704 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1705 1706 // For 'nodebug' functions, the associated DISubprogram is always null. 1707 // Conservatively avoid propagating the callsite debug location to 1708 // instructions inlined from a function whose DISubprogram is not null. 1709 fixupLineNumbers(Caller, FirstNewBlock, TheCall, 1710 CalledFunc->getSubprogram() != nullptr); 1711 1712 // Clone existing noalias metadata if necessary. 1713 CloneAliasScopeMetadata(CS, VMap); 1714 1715 // Add noalias metadata if necessary. 1716 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR); 1717 1718 // Propagate llvm.mem.parallel_loop_access if necessary. 1719 PropagateParallelLoopAccessMetadata(CS, VMap); 1720 1721 // Register any cloned assumptions. 1722 if (IFI.GetAssumptionCache) 1723 for (BasicBlock &NewBlock : 1724 make_range(FirstNewBlock->getIterator(), Caller->end())) 1725 for (Instruction &I : NewBlock) { 1726 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1727 if (II->getIntrinsicID() == Intrinsic::assume) 1728 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II); 1729 } 1730 } 1731 1732 // If there are any alloca instructions in the block that used to be the entry 1733 // block for the callee, move them to the entry block of the caller. First 1734 // calculate which instruction they should be inserted before. We insert the 1735 // instructions at the end of the current alloca list. 1736 { 1737 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1738 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1739 E = FirstNewBlock->end(); I != E; ) { 1740 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1741 if (!AI) continue; 1742 1743 // If the alloca is now dead, remove it. This often occurs due to code 1744 // specialization. 1745 if (AI->use_empty()) { 1746 AI->eraseFromParent(); 1747 continue; 1748 } 1749 1750 if (!allocaWouldBeStaticInEntry(AI)) 1751 continue; 1752 1753 // Keep track of the static allocas that we inline into the caller. 1754 IFI.StaticAllocas.push_back(AI); 1755 1756 // Scan for the block of allocas that we can move over, and move them 1757 // all at once. 1758 while (isa<AllocaInst>(I) && 1759 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 1760 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1761 ++I; 1762 } 1763 1764 // Transfer all of the allocas over in a block. Using splice means 1765 // that the instructions aren't removed from the symbol table, then 1766 // reinserted. 1767 Caller->getEntryBlock().getInstList().splice( 1768 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 1769 } 1770 // Move any dbg.declares describing the allocas into the entry basic block. 1771 DIBuilder DIB(*Caller->getParent()); 1772 for (auto &AI : IFI.StaticAllocas) 1773 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false); 1774 } 1775 1776 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 1777 if (InlinedFunctionInfo.ContainsCalls) { 1778 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1779 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1780 CallSiteTailKind = CI->getTailCallKind(); 1781 1782 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1783 ++BB) { 1784 for (Instruction &I : *BB) { 1785 CallInst *CI = dyn_cast<CallInst>(&I); 1786 if (!CI) 1787 continue; 1788 1789 if (Function *F = CI->getCalledFunction()) 1790 InlinedDeoptimizeCalls |= 1791 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 1792 1793 // We need to reduce the strength of any inlined tail calls. For 1794 // musttail, we have to avoid introducing potential unbounded stack 1795 // growth. For example, if functions 'f' and 'g' are mutually recursive 1796 // with musttail, we can inline 'g' into 'f' so long as we preserve 1797 // musttail on the cloned call to 'f'. If either the inlined call site 1798 // or the cloned call site is *not* musttail, the program already has 1799 // one frame of stack growth, so it's safe to remove musttail. Here is 1800 // a table of example transformations: 1801 // 1802 // f -> musttail g -> musttail f ==> f -> musttail f 1803 // f -> musttail g -> tail f ==> f -> tail f 1804 // f -> g -> musttail f ==> f -> f 1805 // f -> g -> tail f ==> f -> f 1806 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1807 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1808 CI->setTailCallKind(ChildTCK); 1809 InlinedMustTailCalls |= CI->isMustTailCall(); 1810 1811 // Calls inlined through a 'nounwind' call site should be marked 1812 // 'nounwind'. 1813 if (MarkNoUnwind) 1814 CI->setDoesNotThrow(); 1815 } 1816 } 1817 } 1818 1819 // Leave lifetime markers for the static alloca's, scoping them to the 1820 // function we just inlined. 1821 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1822 IRBuilder<> builder(&FirstNewBlock->front()); 1823 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1824 AllocaInst *AI = IFI.StaticAllocas[ai]; 1825 // Don't mark swifterror allocas. They can't have bitcast uses. 1826 if (AI->isSwiftError()) 1827 continue; 1828 1829 // If the alloca is already scoped to something smaller than the whole 1830 // function then there's no need to add redundant, less accurate markers. 1831 if (hasLifetimeMarkers(AI)) 1832 continue; 1833 1834 // Try to determine the size of the allocation. 1835 ConstantInt *AllocaSize = nullptr; 1836 if (ConstantInt *AIArraySize = 1837 dyn_cast<ConstantInt>(AI->getArraySize())) { 1838 auto &DL = Caller->getParent()->getDataLayout(); 1839 Type *AllocaType = AI->getAllocatedType(); 1840 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 1841 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1842 1843 // Don't add markers for zero-sized allocas. 1844 if (AllocaArraySize == 0) 1845 continue; 1846 1847 // Check that array size doesn't saturate uint64_t and doesn't 1848 // overflow when it's multiplied by type size. 1849 if (AllocaArraySize != ~0ULL && 1850 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 1851 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1852 AllocaArraySize * AllocaTypeSize); 1853 } 1854 } 1855 1856 builder.CreateLifetimeStart(AI, AllocaSize); 1857 for (ReturnInst *RI : Returns) { 1858 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 1859 // call and a return. The return kills all local allocas. 1860 if (InlinedMustTailCalls && 1861 RI->getParent()->getTerminatingMustTailCall()) 1862 continue; 1863 if (InlinedDeoptimizeCalls && 1864 RI->getParent()->getTerminatingDeoptimizeCall()) 1865 continue; 1866 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1867 } 1868 } 1869 } 1870 1871 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1872 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1873 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1874 Module *M = Caller->getParent(); 1875 // Get the two intrinsics we care about. 1876 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1877 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1878 1879 // Insert the llvm.stacksave. 1880 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 1881 .CreateCall(StackSave, {}, "savedstack"); 1882 1883 // Insert a call to llvm.stackrestore before any return instructions in the 1884 // inlined function. 1885 for (ReturnInst *RI : Returns) { 1886 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 1887 // call and a return. The return will restore the stack pointer. 1888 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1889 continue; 1890 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 1891 continue; 1892 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1893 } 1894 } 1895 1896 // If we are inlining for an invoke instruction, we must make sure to rewrite 1897 // any call instructions into invoke instructions. This is sensitive to which 1898 // funclet pads were top-level in the inlinee, so must be done before 1899 // rewriting the "parent pad" links. 1900 if (auto *II = dyn_cast<InvokeInst>(TheCall)) { 1901 BasicBlock *UnwindDest = II->getUnwindDest(); 1902 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 1903 if (isa<LandingPadInst>(FirstNonPHI)) { 1904 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1905 } else { 1906 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1907 } 1908 } 1909 1910 // Update the lexical scopes of the new funclets and callsites. 1911 // Anything that had 'none' as its parent is now nested inside the callsite's 1912 // EHPad. 1913 1914 if (CallSiteEHPad) { 1915 for (Function::iterator BB = FirstNewBlock->getIterator(), 1916 E = Caller->end(); 1917 BB != E; ++BB) { 1918 // Add bundle operands to any top-level call sites. 1919 SmallVector<OperandBundleDef, 1> OpBundles; 1920 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { 1921 Instruction *I = &*BBI++; 1922 CallSite CS(I); 1923 if (!CS) 1924 continue; 1925 1926 // Skip call sites which are nounwind intrinsics. 1927 auto *CalledFn = 1928 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1929 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow()) 1930 continue; 1931 1932 // Skip call sites which already have a "funclet" bundle. 1933 if (CS.getOperandBundle(LLVMContext::OB_funclet)) 1934 continue; 1935 1936 CS.getOperandBundlesAsDefs(OpBundles); 1937 OpBundles.emplace_back("funclet", CallSiteEHPad); 1938 1939 Instruction *NewInst; 1940 if (CS.isCall()) 1941 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); 1942 else 1943 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); 1944 NewInst->takeName(I); 1945 I->replaceAllUsesWith(NewInst); 1946 I->eraseFromParent(); 1947 1948 OpBundles.clear(); 1949 } 1950 1951 // It is problematic if the inlinee has a cleanupret which unwinds to 1952 // caller and we inline it into a call site which doesn't unwind but into 1953 // an EH pad that does. Such an edge must be dynamically unreachable. 1954 // As such, we replace the cleanupret with unreachable. 1955 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 1956 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 1957 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false); 1958 1959 Instruction *I = BB->getFirstNonPHI(); 1960 if (!I->isEHPad()) 1961 continue; 1962 1963 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 1964 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 1965 CatchSwitch->setParentPad(CallSiteEHPad); 1966 } else { 1967 auto *FPI = cast<FuncletPadInst>(I); 1968 if (isa<ConstantTokenNone>(FPI->getParentPad())) 1969 FPI->setParentPad(CallSiteEHPad); 1970 } 1971 } 1972 } 1973 1974 if (InlinedDeoptimizeCalls) { 1975 // We need to at least remove the deoptimizing returns from the Return set, 1976 // so that the control flow from those returns does not get merged into the 1977 // caller (but terminate it instead). If the caller's return type does not 1978 // match the callee's return type, we also need to change the return type of 1979 // the intrinsic. 1980 if (Caller->getReturnType() == TheCall->getType()) { 1981 auto NewEnd = remove_if(Returns, [](ReturnInst *RI) { 1982 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 1983 }); 1984 Returns.erase(NewEnd, Returns.end()); 1985 } else { 1986 SmallVector<ReturnInst *, 8> NormalReturns; 1987 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 1988 Caller->getParent(), Intrinsic::experimental_deoptimize, 1989 {Caller->getReturnType()}); 1990 1991 for (ReturnInst *RI : Returns) { 1992 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 1993 if (!DeoptCall) { 1994 NormalReturns.push_back(RI); 1995 continue; 1996 } 1997 1998 // The calling convention on the deoptimize call itself may be bogus, 1999 // since the code we're inlining may have undefined behavior (and may 2000 // never actually execute at runtime); but all 2001 // @llvm.experimental.deoptimize declarations have to have the same 2002 // calling convention in a well-formed module. 2003 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 2004 NewDeoptIntrinsic->setCallingConv(CallingConv); 2005 auto *CurBB = RI->getParent(); 2006 RI->eraseFromParent(); 2007 2008 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(), 2009 DeoptCall->arg_end()); 2010 2011 SmallVector<OperandBundleDef, 1> OpBundles; 2012 DeoptCall->getOperandBundlesAsDefs(OpBundles); 2013 DeoptCall->eraseFromParent(); 2014 assert(!OpBundles.empty() && 2015 "Expected at least the deopt operand bundle"); 2016 2017 IRBuilder<> Builder(CurBB); 2018 CallInst *NewDeoptCall = 2019 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 2020 NewDeoptCall->setCallingConv(CallingConv); 2021 if (NewDeoptCall->getType()->isVoidTy()) 2022 Builder.CreateRetVoid(); 2023 else 2024 Builder.CreateRet(NewDeoptCall); 2025 } 2026 2027 // Leave behind the normal returns so we can merge control flow. 2028 std::swap(Returns, NormalReturns); 2029 } 2030 } 2031 2032 // Handle any inlined musttail call sites. In order for a new call site to be 2033 // musttail, the source of the clone and the inlined call site must have been 2034 // musttail. Therefore it's safe to return without merging control into the 2035 // phi below. 2036 if (InlinedMustTailCalls) { 2037 // Check if we need to bitcast the result of any musttail calls. 2038 Type *NewRetTy = Caller->getReturnType(); 2039 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 2040 2041 // Handle the returns preceded by musttail calls separately. 2042 SmallVector<ReturnInst *, 8> NormalReturns; 2043 for (ReturnInst *RI : Returns) { 2044 CallInst *ReturnedMustTail = 2045 RI->getParent()->getTerminatingMustTailCall(); 2046 if (!ReturnedMustTail) { 2047 NormalReturns.push_back(RI); 2048 continue; 2049 } 2050 if (!NeedBitCast) 2051 continue; 2052 2053 // Delete the old return and any preceding bitcast. 2054 BasicBlock *CurBB = RI->getParent(); 2055 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2056 RI->eraseFromParent(); 2057 if (OldCast) 2058 OldCast->eraseFromParent(); 2059 2060 // Insert a new bitcast and return with the right type. 2061 IRBuilder<> Builder(CurBB); 2062 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2063 } 2064 2065 // Leave behind the normal returns so we can merge control flow. 2066 std::swap(Returns, NormalReturns); 2067 } 2068 2069 // Now that all of the transforms on the inlined code have taken place but 2070 // before we splice the inlined code into the CFG and lose track of which 2071 // blocks were actually inlined, collect the call sites. We only do this if 2072 // call graph updates weren't requested, as those provide value handle based 2073 // tracking of inlined call sites instead. 2074 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) { 2075 // Otherwise just collect the raw call sites that were inlined. 2076 for (BasicBlock &NewBB : 2077 make_range(FirstNewBlock->getIterator(), Caller->end())) 2078 for (Instruction &I : NewBB) 2079 if (auto CS = CallSite(&I)) 2080 IFI.InlinedCallSites.push_back(CS); 2081 } 2082 2083 // If we cloned in _exactly one_ basic block, and if that block ends in a 2084 // return instruction, we splice the body of the inlined callee directly into 2085 // the calling basic block. 2086 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2087 // Move all of the instructions right before the call. 2088 OrigBB->getInstList().splice(TheCall->getIterator(), 2089 FirstNewBlock->getInstList(), 2090 FirstNewBlock->begin(), FirstNewBlock->end()); 2091 // Remove the cloned basic block. 2092 Caller->getBasicBlockList().pop_back(); 2093 2094 // If the call site was an invoke instruction, add a branch to the normal 2095 // destination. 2096 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2097 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 2098 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2099 } 2100 2101 // If the return instruction returned a value, replace uses of the call with 2102 // uses of the returned value. 2103 if (!TheCall->use_empty()) { 2104 ReturnInst *R = Returns[0]; 2105 if (TheCall == R->getReturnValue()) 2106 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2107 else 2108 TheCall->replaceAllUsesWith(R->getReturnValue()); 2109 } 2110 // Since we are now done with the Call/Invoke, we can delete it. 2111 TheCall->eraseFromParent(); 2112 2113 // Since we are now done with the return instruction, delete it also. 2114 Returns[0]->eraseFromParent(); 2115 2116 // We are now done with the inlining. 2117 return true; 2118 } 2119 2120 // Otherwise, we have the normal case, of more than one block to inline or 2121 // multiple return sites. 2122 2123 // We want to clone the entire callee function into the hole between the 2124 // "starter" and "ender" blocks. How we accomplish this depends on whether 2125 // this is an invoke instruction or a call instruction. 2126 BasicBlock *AfterCallBB; 2127 BranchInst *CreatedBranchToNormalDest = nullptr; 2128 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2129 2130 // Add an unconditional branch to make this look like the CallInst case... 2131 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 2132 2133 // Split the basic block. This guarantees that no PHI nodes will have to be 2134 // updated due to new incoming edges, and make the invoke case more 2135 // symmetric to the call case. 2136 AfterCallBB = 2137 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2138 CalledFunc->getName() + ".exit"); 2139 2140 } else { // It's a call 2141 // If this is a call instruction, we need to split the basic block that 2142 // the call lives in. 2143 // 2144 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(), 2145 CalledFunc->getName() + ".exit"); 2146 } 2147 2148 if (IFI.CallerBFI) { 2149 // Copy original BB's block frequency to AfterCallBB 2150 IFI.CallerBFI->setBlockFreq( 2151 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency()); 2152 } 2153 2154 // Change the branch that used to go to AfterCallBB to branch to the first 2155 // basic block of the inlined function. 2156 // 2157 TerminatorInst *Br = OrigBB->getTerminator(); 2158 assert(Br && Br->getOpcode() == Instruction::Br && 2159 "splitBasicBlock broken!"); 2160 Br->setOperand(0, &*FirstNewBlock); 2161 2162 // Now that the function is correct, make it a little bit nicer. In 2163 // particular, move the basic blocks inserted from the end of the function 2164 // into the space made by splitting the source basic block. 2165 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 2166 Caller->getBasicBlockList(), FirstNewBlock, 2167 Caller->end()); 2168 2169 // Handle all of the return instructions that we just cloned in, and eliminate 2170 // any users of the original call/invoke instruction. 2171 Type *RTy = CalledFunc->getReturnType(); 2172 2173 PHINode *PHI = nullptr; 2174 if (Returns.size() > 1) { 2175 // The PHI node should go at the front of the new basic block to merge all 2176 // possible incoming values. 2177 if (!TheCall->use_empty()) { 2178 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 2179 &AfterCallBB->front()); 2180 // Anything that used the result of the function call should now use the 2181 // PHI node as their operand. 2182 TheCall->replaceAllUsesWith(PHI); 2183 } 2184 2185 // Loop over all of the return instructions adding entries to the PHI node 2186 // as appropriate. 2187 if (PHI) { 2188 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2189 ReturnInst *RI = Returns[i]; 2190 assert(RI->getReturnValue()->getType() == PHI->getType() && 2191 "Ret value not consistent in function!"); 2192 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2193 } 2194 } 2195 2196 // Add a branch to the merge points and remove return instructions. 2197 DebugLoc Loc; 2198 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2199 ReturnInst *RI = Returns[i]; 2200 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2201 Loc = RI->getDebugLoc(); 2202 BI->setDebugLoc(Loc); 2203 RI->eraseFromParent(); 2204 } 2205 // We need to set the debug location to *somewhere* inside the 2206 // inlined function. The line number may be nonsensical, but the 2207 // instruction will at least be associated with the right 2208 // function. 2209 if (CreatedBranchToNormalDest) 2210 CreatedBranchToNormalDest->setDebugLoc(Loc); 2211 } else if (!Returns.empty()) { 2212 // Otherwise, if there is exactly one return value, just replace anything 2213 // using the return value of the call with the computed value. 2214 if (!TheCall->use_empty()) { 2215 if (TheCall == Returns[0]->getReturnValue()) 2216 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2217 else 2218 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 2219 } 2220 2221 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2222 BasicBlock *ReturnBB = Returns[0]->getParent(); 2223 ReturnBB->replaceAllUsesWith(AfterCallBB); 2224 2225 // Splice the code from the return block into the block that it will return 2226 // to, which contains the code that was after the call. 2227 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 2228 ReturnBB->getInstList()); 2229 2230 if (CreatedBranchToNormalDest) 2231 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2232 2233 // Delete the return instruction now and empty ReturnBB now. 2234 Returns[0]->eraseFromParent(); 2235 ReturnBB->eraseFromParent(); 2236 } else if (!TheCall->use_empty()) { 2237 // No returns, but something is using the return value of the call. Just 2238 // nuke the result. 2239 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2240 } 2241 2242 // Since we are now done with the Call/Invoke, we can delete it. 2243 TheCall->eraseFromParent(); 2244 2245 // If we inlined any musttail calls and the original return is now 2246 // unreachable, delete it. It can only contain a bitcast and ret. 2247 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 2248 AfterCallBB->eraseFromParent(); 2249 2250 // We should always be able to fold the entry block of the function into the 2251 // single predecessor of the block... 2252 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2253 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2254 2255 // Splice the code entry block into calling block, right before the 2256 // unconditional branch. 2257 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2258 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 2259 2260 // Remove the unconditional branch. 2261 OrigBB->getInstList().erase(Br); 2262 2263 // Now we can remove the CalleeEntry block, which is now empty. 2264 Caller->getBasicBlockList().erase(CalleeEntry); 2265 2266 // If we inserted a phi node, check to see if it has a single value (e.g. all 2267 // the entries are the same or undef). If so, remove the PHI so it doesn't 2268 // block other optimizations. 2269 if (PHI) { 2270 AssumptionCache *AC = 2271 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 2272 auto &DL = Caller->getParent()->getDataLayout(); 2273 if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) { 2274 PHI->replaceAllUsesWith(V); 2275 PHI->eraseFromParent(); 2276 } 2277 } 2278 2279 return true; 2280 } 2281