1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/CallGraph.h" 25 #include "llvm/Analysis/CaptureTracking.h" 26 #include "llvm/Analysis/EHPersonalities.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/Attributes.h" 30 #include "llvm/IR/CallSite.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DebugInfo.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/DIBuilder.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/IRBuilder.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/Intrinsics.h" 42 #include "llvm/IR/MDBuilder.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 #include <algorithm> 47 48 using namespace llvm; 49 50 static cl::opt<bool> 51 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 52 cl::Hidden, 53 cl::desc("Convert noalias attributes to metadata during inlining.")); 54 55 static cl::opt<bool> 56 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 57 cl::init(true), cl::Hidden, 58 cl::desc("Convert align attributes to assumptions during inlining.")); 59 60 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 61 AAResults *CalleeAAR, bool InsertLifetime) { 62 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime); 63 } 64 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 65 AAResults *CalleeAAR, bool InsertLifetime) { 66 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime); 67 } 68 69 namespace { 70 /// A class for recording information about inlining a landing pad. 71 class LandingPadInliningInfo { 72 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 73 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 74 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 75 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 76 SmallVector<Value*, 8> UnwindDestPHIValues; 77 78 public: 79 LandingPadInliningInfo(InvokeInst *II) 80 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 81 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 82 // If there are PHI nodes in the unwind destination block, we need to keep 83 // track of which values came into them from the invoke before removing 84 // the edge from this block. 85 llvm::BasicBlock *InvokeBB = II->getParent(); 86 BasicBlock::iterator I = OuterResumeDest->begin(); 87 for (; isa<PHINode>(I); ++I) { 88 // Save the value to use for this edge. 89 PHINode *PHI = cast<PHINode>(I); 90 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 91 } 92 93 CallerLPad = cast<LandingPadInst>(I); 94 } 95 96 /// The outer unwind destination is the target of 97 /// unwind edges introduced for calls within the inlined function. 98 BasicBlock *getOuterResumeDest() const { 99 return OuterResumeDest; 100 } 101 102 BasicBlock *getInnerResumeDest(); 103 104 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 105 106 /// Forward the 'resume' instruction to the caller's landing pad block. 107 /// When the landing pad block has only one predecessor, this is 108 /// a simple branch. When there is more than one predecessor, we need to 109 /// split the landing pad block after the landingpad instruction and jump 110 /// to there. 111 void forwardResume(ResumeInst *RI, 112 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 113 114 /// Add incoming-PHI values to the unwind destination block for the given 115 /// basic block, using the values for the original invoke's source block. 116 void addIncomingPHIValuesFor(BasicBlock *BB) const { 117 addIncomingPHIValuesForInto(BB, OuterResumeDest); 118 } 119 120 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 121 BasicBlock::iterator I = dest->begin(); 122 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 123 PHINode *phi = cast<PHINode>(I); 124 phi->addIncoming(UnwindDestPHIValues[i], src); 125 } 126 } 127 }; 128 } // anonymous namespace 129 130 /// Get or create a target for the branch from ResumeInsts. 131 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 132 if (InnerResumeDest) return InnerResumeDest; 133 134 // Split the landing pad. 135 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 136 InnerResumeDest = 137 OuterResumeDest->splitBasicBlock(SplitPoint, 138 OuterResumeDest->getName() + ".body"); 139 140 // The number of incoming edges we expect to the inner landing pad. 141 const unsigned PHICapacity = 2; 142 143 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 144 Instruction *InsertPoint = &InnerResumeDest->front(); 145 BasicBlock::iterator I = OuterResumeDest->begin(); 146 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 147 PHINode *OuterPHI = cast<PHINode>(I); 148 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 149 OuterPHI->getName() + ".lpad-body", 150 InsertPoint); 151 OuterPHI->replaceAllUsesWith(InnerPHI); 152 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 153 } 154 155 // Create a PHI for the exception values. 156 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 157 "eh.lpad-body", InsertPoint); 158 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 159 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 160 161 // All done. 162 return InnerResumeDest; 163 } 164 165 /// Forward the 'resume' instruction to the caller's landing pad block. 166 /// When the landing pad block has only one predecessor, this is a simple 167 /// branch. When there is more than one predecessor, we need to split the 168 /// landing pad block after the landingpad instruction and jump to there. 169 void LandingPadInliningInfo::forwardResume( 170 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 171 BasicBlock *Dest = getInnerResumeDest(); 172 BasicBlock *Src = RI->getParent(); 173 174 BranchInst::Create(Dest, Src); 175 176 // Update the PHIs in the destination. They were inserted in an order which 177 // makes this work. 178 addIncomingPHIValuesForInto(Src, Dest); 179 180 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 181 RI->eraseFromParent(); 182 } 183 184 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 185 static Value *getParentPad(Value *EHPad) { 186 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 187 return FPI->getParentPad(); 188 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 189 } 190 191 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy; 192 193 /// Helper for getUnwindDestToken that does the descendant-ward part of 194 /// the search. 195 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 196 UnwindDestMemoTy &MemoMap) { 197 SmallVector<Instruction *, 8> Worklist(1, EHPad); 198 199 while (!Worklist.empty()) { 200 Instruction *CurrentPad = Worklist.pop_back_val(); 201 // We only put pads on the worklist that aren't in the MemoMap. When 202 // we find an unwind dest for a pad we may update its ancestors, but 203 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 204 // so they should never get updated while queued on the worklist. 205 assert(!MemoMap.count(CurrentPad)); 206 Value *UnwindDestToken = nullptr; 207 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 208 if (CatchSwitch->hasUnwindDest()) { 209 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 210 } else { 211 // Catchswitch doesn't have a 'nounwind' variant, and one might be 212 // annotated as "unwinds to caller" when really it's nounwind (see 213 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 214 // parent's unwind dest from this. We can check its catchpads' 215 // descendants, since they might include a cleanuppad with an 216 // "unwinds to caller" cleanupret, which can be trusted. 217 for (auto HI = CatchSwitch->handler_begin(), 218 HE = CatchSwitch->handler_end(); 219 HI != HE && !UnwindDestToken; ++HI) { 220 BasicBlock *HandlerBlock = *HI; 221 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 222 for (User *Child : CatchPad->users()) { 223 // Intentionally ignore invokes here -- since the catchswitch is 224 // marked "unwind to caller", it would be a verifier error if it 225 // contained an invoke which unwinds out of it, so any invoke we'd 226 // encounter must unwind to some child of the catch. 227 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 228 continue; 229 230 Instruction *ChildPad = cast<Instruction>(Child); 231 auto Memo = MemoMap.find(ChildPad); 232 if (Memo == MemoMap.end()) { 233 // Haven't figured out this child pad yet; queue it. 234 Worklist.push_back(ChildPad); 235 continue; 236 } 237 // We've already checked this child, but might have found that 238 // it offers no proof either way. 239 Value *ChildUnwindDestToken = Memo->second; 240 if (!ChildUnwindDestToken) 241 continue; 242 // We already know the child's unwind dest, which can either 243 // be ConstantTokenNone to indicate unwind to caller, or can 244 // be another child of the catchpad. Only the former indicates 245 // the unwind dest of the catchswitch. 246 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 247 UnwindDestToken = ChildUnwindDestToken; 248 break; 249 } 250 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 251 } 252 } 253 } 254 } else { 255 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 256 for (User *U : CleanupPad->users()) { 257 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 258 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 259 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 260 else 261 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 262 break; 263 } 264 Value *ChildUnwindDestToken; 265 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 266 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 267 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 268 Instruction *ChildPad = cast<Instruction>(U); 269 auto Memo = MemoMap.find(ChildPad); 270 if (Memo == MemoMap.end()) { 271 // Haven't resolved this child yet; queue it and keep searching. 272 Worklist.push_back(ChildPad); 273 continue; 274 } 275 // We've checked this child, but still need to ignore it if it 276 // had no proof either way. 277 ChildUnwindDestToken = Memo->second; 278 if (!ChildUnwindDestToken) 279 continue; 280 } else { 281 // Not a relevant user of the cleanuppad 282 continue; 283 } 284 // In a well-formed program, the child/invoke must either unwind to 285 // an(other) child of the cleanup, or exit the cleanup. In the 286 // first case, continue searching. 287 if (isa<Instruction>(ChildUnwindDestToken) && 288 getParentPad(ChildUnwindDestToken) == CleanupPad) 289 continue; 290 UnwindDestToken = ChildUnwindDestToken; 291 break; 292 } 293 } 294 // If we haven't found an unwind dest for CurrentPad, we may have queued its 295 // children, so move on to the next in the worklist. 296 if (!UnwindDestToken) 297 continue; 298 299 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 300 // any ancestors of CurrentPad up to but not including UnwindDestToken's 301 // parent pad. Record this in the memo map, and check to see if the 302 // original EHPad being queried is one of the ones exited. 303 Value *UnwindParent; 304 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 305 UnwindParent = getParentPad(UnwindPad); 306 else 307 UnwindParent = nullptr; 308 bool ExitedOriginalPad = false; 309 for (Instruction *ExitedPad = CurrentPad; 310 ExitedPad && ExitedPad != UnwindParent; 311 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 312 // Skip over catchpads since they just follow their catchswitches. 313 if (isa<CatchPadInst>(ExitedPad)) 314 continue; 315 MemoMap[ExitedPad] = UnwindDestToken; 316 ExitedOriginalPad |= (ExitedPad == EHPad); 317 } 318 319 if (ExitedOriginalPad) 320 return UnwindDestToken; 321 322 // Continue the search. 323 } 324 325 // No definitive information is contained within this funclet. 326 return nullptr; 327 } 328 329 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 330 /// return that pad instruction. If it unwinds to caller, return 331 /// ConstantTokenNone. If it does not have a definitive unwind destination, 332 /// return nullptr. 333 /// 334 /// This routine gets invoked for calls in funclets in inlinees when inlining 335 /// an invoke. Since many funclets don't have calls inside them, it's queried 336 /// on-demand rather than building a map of pads to unwind dests up front. 337 /// Determining a funclet's unwind dest may require recursively searching its 338 /// descendants, and also ancestors and cousins if the descendants don't provide 339 /// an answer. Since most funclets will have their unwind dest immediately 340 /// available as the unwind dest of a catchswitch or cleanupret, this routine 341 /// searches top-down from the given pad and then up. To avoid worst-case 342 /// quadratic run-time given that approach, it uses a memo map to avoid 343 /// re-processing funclet trees. The callers that rewrite the IR as they go 344 /// take advantage of this, for correctness, by checking/forcing rewritten 345 /// pads' entries to match the original callee view. 346 static Value *getUnwindDestToken(Instruction *EHPad, 347 UnwindDestMemoTy &MemoMap) { 348 // Catchpads unwind to the same place as their catchswitch; 349 // redirct any queries on catchpads so the code below can 350 // deal with just catchswitches and cleanuppads. 351 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 352 EHPad = CPI->getCatchSwitch(); 353 354 // Check if we've already determined the unwind dest for this pad. 355 auto Memo = MemoMap.find(EHPad); 356 if (Memo != MemoMap.end()) 357 return Memo->second; 358 359 // Search EHPad and, if necessary, its descendants. 360 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 361 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 362 if (UnwindDestToken) 363 return UnwindDestToken; 364 365 // No information is available for this EHPad from itself or any of its 366 // descendants. An unwind all the way out to a pad in the caller would 367 // need also to agree with the unwind dest of the parent funclet, so 368 // search up the chain to try to find a funclet with information. Put 369 // null entries in the memo map to avoid re-processing as we go up. 370 MemoMap[EHPad] = nullptr; 371 #ifndef NDEBUG 372 SmallPtrSet<Instruction *, 4> TempMemos; 373 TempMemos.insert(EHPad); 374 #endif 375 Instruction *LastUselessPad = EHPad; 376 Value *AncestorToken; 377 for (AncestorToken = getParentPad(EHPad); 378 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 379 AncestorToken = getParentPad(AncestorToken)) { 380 // Skip over catchpads since they just follow their catchswitches. 381 if (isa<CatchPadInst>(AncestorPad)) 382 continue; 383 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 384 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 385 // call to getUnwindDestToken, that would mean that AncestorPad had no 386 // information in itself, its descendants, or its ancestors. If that 387 // were the case, then we should also have recorded the lack of information 388 // for the descendant that we're coming from. So assert that we don't 389 // find a null entry in the MemoMap for AncestorPad. 390 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 391 auto AncestorMemo = MemoMap.find(AncestorPad); 392 if (AncestorMemo == MemoMap.end()) { 393 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 394 } else { 395 UnwindDestToken = AncestorMemo->second; 396 } 397 if (UnwindDestToken) 398 break; 399 LastUselessPad = AncestorPad; 400 MemoMap[LastUselessPad] = nullptr; 401 #ifndef NDEBUG 402 TempMemos.insert(LastUselessPad); 403 #endif 404 } 405 406 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 407 // returned nullptr (and likewise for EHPad and any of its ancestors up to 408 // LastUselessPad), so LastUselessPad has no information from below. Since 409 // getUnwindDestTokenHelper must investigate all downward paths through 410 // no-information nodes to prove that a node has no information like this, 411 // and since any time it finds information it records it in the MemoMap for 412 // not just the immediately-containing funclet but also any ancestors also 413 // exited, it must be the case that, walking downward from LastUselessPad, 414 // visiting just those nodes which have not been mapped to an unwind dest 415 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 416 // they are just used to keep getUnwindDestTokenHelper from repeating work), 417 // any node visited must have been exhaustively searched with no information 418 // for it found. 419 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 420 while (!Worklist.empty()) { 421 Instruction *UselessPad = Worklist.pop_back_val(); 422 auto Memo = MemoMap.find(UselessPad); 423 if (Memo != MemoMap.end() && Memo->second) { 424 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 425 // that it is a funclet that does have information about unwinding to 426 // a particular destination; its parent was a useless pad. 427 // Since its parent has no information, the unwind edge must not escape 428 // the parent, and must target a sibling of this pad. This local unwind 429 // gives us no information about EHPad. Leave it and the subtree rooted 430 // at it alone. 431 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 432 continue; 433 } 434 // We know we don't have information for UselesPad. If it has an entry in 435 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 436 // added on this invocation of getUnwindDestToken; if a previous invocation 437 // recorded nullptr, it would have had to prove that the ancestors of 438 // UselessPad, which include LastUselessPad, had no information, and that 439 // in turn would have required proving that the descendants of 440 // LastUselesPad, which include EHPad, have no information about 441 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 442 // the MemoMap on that invocation, which isn't the case if we got here. 443 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 444 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 445 // information that we'd be contradicting by making a map entry for it 446 // (which is something that getUnwindDestTokenHelper must have proved for 447 // us to get here). Just assert on is direct users here; the checks in 448 // this downward walk at its descendants will verify that they don't have 449 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 450 // unwind edges or unwind to a sibling). 451 MemoMap[UselessPad] = UnwindDestToken; 452 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 453 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 454 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 455 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 456 for (User *U : CatchPad->users()) { 457 assert( 458 (!isa<InvokeInst>(U) || 459 (getParentPad( 460 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 461 CatchPad)) && 462 "Expected useless pad"); 463 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 464 Worklist.push_back(cast<Instruction>(U)); 465 } 466 } 467 } else { 468 assert(isa<CleanupPadInst>(UselessPad)); 469 for (User *U : UselessPad->users()) { 470 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 471 assert((!isa<InvokeInst>(U) || 472 (getParentPad( 473 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 474 UselessPad)) && 475 "Expected useless pad"); 476 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 477 Worklist.push_back(cast<Instruction>(U)); 478 } 479 } 480 } 481 482 return UnwindDestToken; 483 } 484 485 /// When we inline a basic block into an invoke, 486 /// we have to turn all of the calls that can throw into invokes. 487 /// This function analyze BB to see if there are any calls, and if so, 488 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 489 /// nodes in that block with the values specified in InvokeDestPHIValues. 490 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 491 BasicBlock *BB, BasicBlock *UnwindEdge, 492 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 493 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 494 Instruction *I = &*BBI++; 495 496 // We only need to check for function calls: inlined invoke 497 // instructions require no special handling. 498 CallInst *CI = dyn_cast<CallInst>(I); 499 500 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 501 continue; 502 503 // We do not need to (and in fact, cannot) convert possibly throwing calls 504 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 505 // invokes. The caller's "segment" of the deoptimization continuation 506 // attached to the newly inlined @llvm.experimental_deoptimize 507 // (resp. @llvm.experimental.guard) call should contain the exception 508 // handling logic, if any. 509 if (auto *F = CI->getCalledFunction()) 510 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 511 F->getIntrinsicID() == Intrinsic::experimental_guard) 512 continue; 513 514 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 515 // This call is nested inside a funclet. If that funclet has an unwind 516 // destination within the inlinee, then unwinding out of this call would 517 // be UB. Rewriting this call to an invoke which targets the inlined 518 // invoke's unwind dest would give the call's parent funclet multiple 519 // unwind destinations, which is something that subsequent EH table 520 // generation can't handle and that the veirifer rejects. So when we 521 // see such a call, leave it as a call. 522 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 523 Value *UnwindDestToken = 524 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 525 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 526 continue; 527 #ifndef NDEBUG 528 Instruction *MemoKey; 529 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 530 MemoKey = CatchPad->getCatchSwitch(); 531 else 532 MemoKey = FuncletPad; 533 assert(FuncletUnwindMap->count(MemoKey) && 534 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 535 "must get memoized to avoid confusing later searches"); 536 #endif // NDEBUG 537 } 538 539 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge); 540 return BB; 541 } 542 return nullptr; 543 } 544 545 /// If we inlined an invoke site, we need to convert calls 546 /// in the body of the inlined function into invokes. 547 /// 548 /// II is the invoke instruction being inlined. FirstNewBlock is the first 549 /// block of the inlined code (the last block is the end of the function), 550 /// and InlineCodeInfo is information about the code that got inlined. 551 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 552 ClonedCodeInfo &InlinedCodeInfo) { 553 BasicBlock *InvokeDest = II->getUnwindDest(); 554 555 Function *Caller = FirstNewBlock->getParent(); 556 557 // The inlined code is currently at the end of the function, scan from the 558 // start of the inlined code to its end, checking for stuff we need to 559 // rewrite. 560 LandingPadInliningInfo Invoke(II); 561 562 // Get all of the inlined landing pad instructions. 563 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 564 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 565 I != E; ++I) 566 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 567 InlinedLPads.insert(II->getLandingPadInst()); 568 569 // Append the clauses from the outer landing pad instruction into the inlined 570 // landing pad instructions. 571 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 572 for (LandingPadInst *InlinedLPad : InlinedLPads) { 573 unsigned OuterNum = OuterLPad->getNumClauses(); 574 InlinedLPad->reserveClauses(OuterNum); 575 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 576 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 577 if (OuterLPad->isCleanup()) 578 InlinedLPad->setCleanup(true); 579 } 580 581 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 582 BB != E; ++BB) { 583 if (InlinedCodeInfo.ContainsCalls) 584 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 585 &*BB, Invoke.getOuterResumeDest())) 586 // Update any PHI nodes in the exceptional block to indicate that there 587 // is now a new entry in them. 588 Invoke.addIncomingPHIValuesFor(NewBB); 589 590 // Forward any resumes that are remaining here. 591 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 592 Invoke.forwardResume(RI, InlinedLPads); 593 } 594 595 // Now that everything is happy, we have one final detail. The PHI nodes in 596 // the exception destination block still have entries due to the original 597 // invoke instruction. Eliminate these entries (which might even delete the 598 // PHI node) now. 599 InvokeDest->removePredecessor(II->getParent()); 600 } 601 602 /// If we inlined an invoke site, we need to convert calls 603 /// in the body of the inlined function into invokes. 604 /// 605 /// II is the invoke instruction being inlined. FirstNewBlock is the first 606 /// block of the inlined code (the last block is the end of the function), 607 /// and InlineCodeInfo is information about the code that got inlined. 608 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 609 ClonedCodeInfo &InlinedCodeInfo) { 610 BasicBlock *UnwindDest = II->getUnwindDest(); 611 Function *Caller = FirstNewBlock->getParent(); 612 613 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 614 615 // If there are PHI nodes in the unwind destination block, we need to keep 616 // track of which values came into them from the invoke before removing the 617 // edge from this block. 618 SmallVector<Value *, 8> UnwindDestPHIValues; 619 llvm::BasicBlock *InvokeBB = II->getParent(); 620 for (Instruction &I : *UnwindDest) { 621 // Save the value to use for this edge. 622 PHINode *PHI = dyn_cast<PHINode>(&I); 623 if (!PHI) 624 break; 625 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 626 } 627 628 // Add incoming-PHI values to the unwind destination block for the given basic 629 // block, using the values for the original invoke's source block. 630 auto UpdatePHINodes = [&](BasicBlock *Src) { 631 BasicBlock::iterator I = UnwindDest->begin(); 632 for (Value *V : UnwindDestPHIValues) { 633 PHINode *PHI = cast<PHINode>(I); 634 PHI->addIncoming(V, Src); 635 ++I; 636 } 637 }; 638 639 // This connects all the instructions which 'unwind to caller' to the invoke 640 // destination. 641 UnwindDestMemoTy FuncletUnwindMap; 642 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 643 BB != E; ++BB) { 644 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 645 if (CRI->unwindsToCaller()) { 646 auto *CleanupPad = CRI->getCleanupPad(); 647 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 648 CRI->eraseFromParent(); 649 UpdatePHINodes(&*BB); 650 // Finding a cleanupret with an unwind destination would confuse 651 // subsequent calls to getUnwindDestToken, so map the cleanuppad 652 // to short-circuit any such calls and recognize this as an "unwind 653 // to caller" cleanup. 654 assert(!FuncletUnwindMap.count(CleanupPad) || 655 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 656 FuncletUnwindMap[CleanupPad] = 657 ConstantTokenNone::get(Caller->getContext()); 658 } 659 } 660 661 Instruction *I = BB->getFirstNonPHI(); 662 if (!I->isEHPad()) 663 continue; 664 665 Instruction *Replacement = nullptr; 666 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 667 if (CatchSwitch->unwindsToCaller()) { 668 Value *UnwindDestToken; 669 if (auto *ParentPad = 670 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 671 // This catchswitch is nested inside another funclet. If that 672 // funclet has an unwind destination within the inlinee, then 673 // unwinding out of this catchswitch would be UB. Rewriting this 674 // catchswitch to unwind to the inlined invoke's unwind dest would 675 // give the parent funclet multiple unwind destinations, which is 676 // something that subsequent EH table generation can't handle and 677 // that the veirifer rejects. So when we see such a call, leave it 678 // as "unwind to caller". 679 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 680 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 681 continue; 682 } else { 683 // This catchswitch has no parent to inherit constraints from, and 684 // none of its descendants can have an unwind edge that exits it and 685 // targets another funclet in the inlinee. It may or may not have a 686 // descendant that definitively has an unwind to caller. In either 687 // case, we'll have to assume that any unwinds out of it may need to 688 // be routed to the caller, so treat it as though it has a definitive 689 // unwind to caller. 690 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 691 } 692 auto *NewCatchSwitch = CatchSwitchInst::Create( 693 CatchSwitch->getParentPad(), UnwindDest, 694 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 695 CatchSwitch); 696 for (BasicBlock *PadBB : CatchSwitch->handlers()) 697 NewCatchSwitch->addHandler(PadBB); 698 // Propagate info for the old catchswitch over to the new one in 699 // the unwind map. This also serves to short-circuit any subsequent 700 // checks for the unwind dest of this catchswitch, which would get 701 // confused if they found the outer handler in the callee. 702 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 703 Replacement = NewCatchSwitch; 704 } 705 } else if (!isa<FuncletPadInst>(I)) { 706 llvm_unreachable("unexpected EHPad!"); 707 } 708 709 if (Replacement) { 710 Replacement->takeName(I); 711 I->replaceAllUsesWith(Replacement); 712 I->eraseFromParent(); 713 UpdatePHINodes(&*BB); 714 } 715 } 716 717 if (InlinedCodeInfo.ContainsCalls) 718 for (Function::iterator BB = FirstNewBlock->getIterator(), 719 E = Caller->end(); 720 BB != E; ++BB) 721 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 722 &*BB, UnwindDest, &FuncletUnwindMap)) 723 // Update any PHI nodes in the exceptional block to indicate that there 724 // is now a new entry in them. 725 UpdatePHINodes(NewBB); 726 727 // Now that everything is happy, we have one final detail. The PHI nodes in 728 // the exception destination block still have entries due to the original 729 // invoke instruction. Eliminate these entries (which might even delete the 730 // PHI node) now. 731 UnwindDest->removePredecessor(InvokeBB); 732 } 733 734 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata, 735 /// that metadata should be propagated to all memory-accessing cloned 736 /// instructions. 737 static void PropagateParallelLoopAccessMetadata(CallSite CS, 738 ValueToValueMapTy &VMap) { 739 MDNode *M = 740 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 741 if (!M) 742 return; 743 744 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 745 VMI != VMIE; ++VMI) { 746 if (!VMI->second) 747 continue; 748 749 Instruction *NI = dyn_cast<Instruction>(VMI->second); 750 if (!NI) 751 continue; 752 753 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) { 754 M = MDNode::concatenate(PM, M); 755 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 756 } else if (NI->mayReadOrWriteMemory()) { 757 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 758 } 759 } 760 } 761 762 /// When inlining a function that contains noalias scope metadata, 763 /// this metadata needs to be cloned so that the inlined blocks 764 /// have different "unique scopes" at every call site. Were this not done, then 765 /// aliasing scopes from a function inlined into a caller multiple times could 766 /// not be differentiated (and this would lead to miscompiles because the 767 /// non-aliasing property communicated by the metadata could have 768 /// call-site-specific control dependencies). 769 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 770 const Function *CalledFunc = CS.getCalledFunction(); 771 SetVector<const MDNode *> MD; 772 773 // Note: We could only clone the metadata if it is already used in the 774 // caller. I'm omitting that check here because it might confuse 775 // inter-procedural alias analysis passes. We can revisit this if it becomes 776 // an efficiency or overhead problem. 777 778 for (const BasicBlock &I : *CalledFunc) 779 for (const Instruction &J : I) { 780 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope)) 781 MD.insert(M); 782 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias)) 783 MD.insert(M); 784 } 785 786 if (MD.empty()) 787 return; 788 789 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 790 // the set. 791 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 792 while (!Queue.empty()) { 793 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 794 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 795 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 796 if (MD.insert(M1)) 797 Queue.push_back(M1); 798 } 799 800 // Now we have a complete set of all metadata in the chains used to specify 801 // the noalias scopes and the lists of those scopes. 802 SmallVector<TempMDTuple, 16> DummyNodes; 803 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap; 804 for (const MDNode *I : MD) { 805 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None)); 806 MDMap[I].reset(DummyNodes.back().get()); 807 } 808 809 // Create new metadata nodes to replace the dummy nodes, replacing old 810 // metadata references with either a dummy node or an already-created new 811 // node. 812 for (const MDNode *I : MD) { 813 SmallVector<Metadata *, 4> NewOps; 814 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) { 815 const Metadata *V = I->getOperand(i); 816 if (const MDNode *M = dyn_cast<MDNode>(V)) 817 NewOps.push_back(MDMap[M]); 818 else 819 NewOps.push_back(const_cast<Metadata *>(V)); 820 } 821 822 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps); 823 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 824 assert(TempM->isTemporary() && "Expected temporary node"); 825 826 TempM->replaceAllUsesWith(NewM); 827 } 828 829 // Now replace the metadata in the new inlined instructions with the 830 // repacements from the map. 831 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 832 VMI != VMIE; ++VMI) { 833 if (!VMI->second) 834 continue; 835 836 Instruction *NI = dyn_cast<Instruction>(VMI->second); 837 if (!NI) 838 continue; 839 840 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 841 MDNode *NewMD = MDMap[M]; 842 // If the call site also had alias scope metadata (a list of scopes to 843 // which instructions inside it might belong), propagate those scopes to 844 // the inlined instructions. 845 if (MDNode *CSM = 846 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 847 NewMD = MDNode::concatenate(NewMD, CSM); 848 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 849 } else if (NI->mayReadOrWriteMemory()) { 850 if (MDNode *M = 851 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 852 NI->setMetadata(LLVMContext::MD_alias_scope, M); 853 } 854 855 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 856 MDNode *NewMD = MDMap[M]; 857 // If the call site also had noalias metadata (a list of scopes with 858 // which instructions inside it don't alias), propagate those scopes to 859 // the inlined instructions. 860 if (MDNode *CSM = 861 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 862 NewMD = MDNode::concatenate(NewMD, CSM); 863 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 864 } else if (NI->mayReadOrWriteMemory()) { 865 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 866 NI->setMetadata(LLVMContext::MD_noalias, M); 867 } 868 } 869 } 870 871 /// If the inlined function has noalias arguments, 872 /// then add new alias scopes for each noalias argument, tag the mapped noalias 873 /// parameters with noalias metadata specifying the new scope, and tag all 874 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 875 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 876 const DataLayout &DL, AAResults *CalleeAAR) { 877 if (!EnableNoAliasConversion) 878 return; 879 880 const Function *CalledFunc = CS.getCalledFunction(); 881 SmallVector<const Argument *, 4> NoAliasArgs; 882 883 for (const Argument &Arg : CalledFunc->args()) 884 if (Arg.hasNoAliasAttr() && !Arg.use_empty()) 885 NoAliasArgs.push_back(&Arg); 886 887 if (NoAliasArgs.empty()) 888 return; 889 890 // To do a good job, if a noalias variable is captured, we need to know if 891 // the capture point dominates the particular use we're considering. 892 DominatorTree DT; 893 DT.recalculate(const_cast<Function&>(*CalledFunc)); 894 895 // noalias indicates that pointer values based on the argument do not alias 896 // pointer values which are not based on it. So we add a new "scope" for each 897 // noalias function argument. Accesses using pointers based on that argument 898 // become part of that alias scope, accesses using pointers not based on that 899 // argument are tagged as noalias with that scope. 900 901 DenseMap<const Argument *, MDNode *> NewScopes; 902 MDBuilder MDB(CalledFunc->getContext()); 903 904 // Create a new scope domain for this function. 905 MDNode *NewDomain = 906 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 907 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 908 const Argument *A = NoAliasArgs[i]; 909 910 std::string Name = CalledFunc->getName(); 911 if (A->hasName()) { 912 Name += ": %"; 913 Name += A->getName(); 914 } else { 915 Name += ": argument "; 916 Name += utostr(i); 917 } 918 919 // Note: We always create a new anonymous root here. This is true regardless 920 // of the linkage of the callee because the aliasing "scope" is not just a 921 // property of the callee, but also all control dependencies in the caller. 922 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 923 NewScopes.insert(std::make_pair(A, NewScope)); 924 } 925 926 // Iterate over all new instructions in the map; for all memory-access 927 // instructions, add the alias scope metadata. 928 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 929 VMI != VMIE; ++VMI) { 930 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 931 if (!VMI->second) 932 continue; 933 934 Instruction *NI = dyn_cast<Instruction>(VMI->second); 935 if (!NI) 936 continue; 937 938 bool IsArgMemOnlyCall = false, IsFuncCall = false; 939 SmallVector<const Value *, 2> PtrArgs; 940 941 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 942 PtrArgs.push_back(LI->getPointerOperand()); 943 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 944 PtrArgs.push_back(SI->getPointerOperand()); 945 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 946 PtrArgs.push_back(VAAI->getPointerOperand()); 947 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 948 PtrArgs.push_back(CXI->getPointerOperand()); 949 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 950 PtrArgs.push_back(RMWI->getPointerOperand()); 951 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 952 // If we know that the call does not access memory, then we'll still 953 // know that about the inlined clone of this call site, and we don't 954 // need to add metadata. 955 if (ICS.doesNotAccessMemory()) 956 continue; 957 958 IsFuncCall = true; 959 if (CalleeAAR) { 960 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); 961 if (MRB == FMRB_OnlyAccessesArgumentPointees || 962 MRB == FMRB_OnlyReadsArgumentPointees) 963 IsArgMemOnlyCall = true; 964 } 965 966 for (Value *Arg : ICS.args()) { 967 // We need to check the underlying objects of all arguments, not just 968 // the pointer arguments, because we might be passing pointers as 969 // integers, etc. 970 // However, if we know that the call only accesses pointer arguments, 971 // then we only need to check the pointer arguments. 972 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy()) 973 continue; 974 975 PtrArgs.push_back(Arg); 976 } 977 } 978 979 // If we found no pointers, then this instruction is not suitable for 980 // pairing with an instruction to receive aliasing metadata. 981 // However, if this is a call, this we might just alias with none of the 982 // noalias arguments. 983 if (PtrArgs.empty() && !IsFuncCall) 984 continue; 985 986 // It is possible that there is only one underlying object, but you 987 // need to go through several PHIs to see it, and thus could be 988 // repeated in the Objects list. 989 SmallPtrSet<const Value *, 4> ObjSet; 990 SmallVector<Metadata *, 4> Scopes, NoAliases; 991 992 SmallSetVector<const Argument *, 4> NAPtrArgs; 993 for (const Value *V : PtrArgs) { 994 SmallVector<Value *, 4> Objects; 995 GetUnderlyingObjects(const_cast<Value*>(V), 996 Objects, DL, /* LI = */ nullptr); 997 998 for (Value *O : Objects) 999 ObjSet.insert(O); 1000 } 1001 1002 // Figure out if we're derived from anything that is not a noalias 1003 // argument. 1004 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 1005 for (const Value *V : ObjSet) { 1006 // Is this value a constant that cannot be derived from any pointer 1007 // value (we need to exclude constant expressions, for example, that 1008 // are formed from arithmetic on global symbols). 1009 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1010 isa<ConstantPointerNull>(V) || 1011 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1012 if (IsNonPtrConst) 1013 continue; 1014 1015 // If this is anything other than a noalias argument, then we cannot 1016 // completely describe the aliasing properties using alias.scope 1017 // metadata (and, thus, won't add any). 1018 if (const Argument *A = dyn_cast<Argument>(V)) { 1019 if (!A->hasNoAliasAttr()) 1020 UsesAliasingPtr = true; 1021 } else { 1022 UsesAliasingPtr = true; 1023 } 1024 1025 // If this is not some identified function-local object (which cannot 1026 // directly alias a noalias argument), or some other argument (which, 1027 // by definition, also cannot alias a noalias argument), then we could 1028 // alias a noalias argument that has been captured). 1029 if (!isa<Argument>(V) && 1030 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 1031 CanDeriveViaCapture = true; 1032 } 1033 1034 // A function call can always get captured noalias pointers (via other 1035 // parameters, globals, etc.). 1036 if (IsFuncCall && !IsArgMemOnlyCall) 1037 CanDeriveViaCapture = true; 1038 1039 // First, we want to figure out all of the sets with which we definitely 1040 // don't alias. Iterate over all noalias set, and add those for which: 1041 // 1. The noalias argument is not in the set of objects from which we 1042 // definitely derive. 1043 // 2. The noalias argument has not yet been captured. 1044 // An arbitrary function that might load pointers could see captured 1045 // noalias arguments via other noalias arguments or globals, and so we 1046 // must always check for prior capture. 1047 for (const Argument *A : NoAliasArgs) { 1048 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 1049 // It might be tempting to skip the 1050 // PointerMayBeCapturedBefore check if 1051 // A->hasNoCaptureAttr() is true, but this is 1052 // incorrect because nocapture only guarantees 1053 // that no copies outlive the function, not 1054 // that the value cannot be locally captured. 1055 !PointerMayBeCapturedBefore(A, 1056 /* ReturnCaptures */ false, 1057 /* StoreCaptures */ false, I, &DT))) 1058 NoAliases.push_back(NewScopes[A]); 1059 } 1060 1061 if (!NoAliases.empty()) 1062 NI->setMetadata(LLVMContext::MD_noalias, 1063 MDNode::concatenate( 1064 NI->getMetadata(LLVMContext::MD_noalias), 1065 MDNode::get(CalledFunc->getContext(), NoAliases))); 1066 1067 // Next, we want to figure out all of the sets to which we might belong. 1068 // We might belong to a set if the noalias argument is in the set of 1069 // underlying objects. If there is some non-noalias argument in our list 1070 // of underlying objects, then we cannot add a scope because the fact 1071 // that some access does not alias with any set of our noalias arguments 1072 // cannot itself guarantee that it does not alias with this access 1073 // (because there is some pointer of unknown origin involved and the 1074 // other access might also depend on this pointer). We also cannot add 1075 // scopes to arbitrary functions unless we know they don't access any 1076 // non-parameter pointer-values. 1077 bool CanAddScopes = !UsesAliasingPtr; 1078 if (CanAddScopes && IsFuncCall) 1079 CanAddScopes = IsArgMemOnlyCall; 1080 1081 if (CanAddScopes) 1082 for (const Argument *A : NoAliasArgs) { 1083 if (ObjSet.count(A)) 1084 Scopes.push_back(NewScopes[A]); 1085 } 1086 1087 if (!Scopes.empty()) 1088 NI->setMetadata( 1089 LLVMContext::MD_alias_scope, 1090 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1091 MDNode::get(CalledFunc->getContext(), Scopes))); 1092 } 1093 } 1094 } 1095 1096 /// If the inlined function has non-byval align arguments, then 1097 /// add @llvm.assume-based alignment assumptions to preserve this information. 1098 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 1099 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1100 return; 1101 1102 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller()); 1103 auto &DL = CS.getCaller()->getParent()->getDataLayout(); 1104 1105 // To avoid inserting redundant assumptions, we should check for assumptions 1106 // already in the caller. To do this, we might need a DT of the caller. 1107 DominatorTree DT; 1108 bool DTCalculated = false; 1109 1110 Function *CalledFunc = CS.getCalledFunction(); 1111 for (Argument &Arg : CalledFunc->args()) { 1112 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0; 1113 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) { 1114 if (!DTCalculated) { 1115 DT.recalculate(*CS.getCaller()); 1116 DTCalculated = true; 1117 } 1118 1119 // If we can already prove the asserted alignment in the context of the 1120 // caller, then don't bother inserting the assumption. 1121 Value *ArgVal = CS.getArgument(Arg.getArgNo()); 1122 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align) 1123 continue; 1124 1125 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction()) 1126 .CreateAlignmentAssumption(DL, ArgVal, Align); 1127 AC->registerAssumption(NewAsmp); 1128 } 1129 } 1130 } 1131 1132 /// Once we have cloned code over from a callee into the caller, 1133 /// update the specified callgraph to reflect the changes we made. 1134 /// Note that it's possible that not all code was copied over, so only 1135 /// some edges of the callgraph may remain. 1136 static void UpdateCallGraphAfterInlining(CallSite CS, 1137 Function::iterator FirstNewBlock, 1138 ValueToValueMapTy &VMap, 1139 InlineFunctionInfo &IFI) { 1140 CallGraph &CG = *IFI.CG; 1141 const Function *Caller = CS.getCaller(); 1142 const Function *Callee = CS.getCalledFunction(); 1143 CallGraphNode *CalleeNode = CG[Callee]; 1144 CallGraphNode *CallerNode = CG[Caller]; 1145 1146 // Since we inlined some uninlined call sites in the callee into the caller, 1147 // add edges from the caller to all of the callees of the callee. 1148 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 1149 1150 // Consider the case where CalleeNode == CallerNode. 1151 CallGraphNode::CalledFunctionsVector CallCache; 1152 if (CalleeNode == CallerNode) { 1153 CallCache.assign(I, E); 1154 I = CallCache.begin(); 1155 E = CallCache.end(); 1156 } 1157 1158 for (; I != E; ++I) { 1159 const Value *OrigCall = I->first; 1160 1161 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 1162 // Only copy the edge if the call was inlined! 1163 if (VMI == VMap.end() || VMI->second == nullptr) 1164 continue; 1165 1166 // If the call was inlined, but then constant folded, there is no edge to 1167 // add. Check for this case. 1168 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 1169 if (!NewCall) 1170 continue; 1171 1172 // We do not treat intrinsic calls like real function calls because we 1173 // expect them to become inline code; do not add an edge for an intrinsic. 1174 CallSite CS = CallSite(NewCall); 1175 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic()) 1176 continue; 1177 1178 // Remember that this call site got inlined for the client of 1179 // InlineFunction. 1180 IFI.InlinedCalls.push_back(NewCall); 1181 1182 // It's possible that inlining the callsite will cause it to go from an 1183 // indirect to a direct call by resolving a function pointer. If this 1184 // happens, set the callee of the new call site to a more precise 1185 // destination. This can also happen if the call graph node of the caller 1186 // was just unnecessarily imprecise. 1187 if (!I->second->getFunction()) 1188 if (Function *F = CallSite(NewCall).getCalledFunction()) { 1189 // Indirect call site resolved to direct call. 1190 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 1191 1192 continue; 1193 } 1194 1195 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 1196 } 1197 1198 // Update the call graph by deleting the edge from Callee to Caller. We must 1199 // do this after the loop above in case Caller and Callee are the same. 1200 CallerNode->removeCallEdgeFor(CS); 1201 } 1202 1203 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 1204 BasicBlock *InsertBlock, 1205 InlineFunctionInfo &IFI) { 1206 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 1207 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1208 1209 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); 1210 1211 // Always generate a memcpy of alignment 1 here because we don't know 1212 // the alignment of the src pointer. Other optimizations can infer 1213 // better alignment. 1214 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 1215 } 1216 1217 /// When inlining a call site that has a byval argument, 1218 /// we have to make the implicit memcpy explicit by adding it. 1219 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 1220 const Function *CalledFunc, 1221 InlineFunctionInfo &IFI, 1222 unsigned ByValAlignment) { 1223 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 1224 Type *AggTy = ArgTy->getElementType(); 1225 1226 Function *Caller = TheCall->getFunction(); 1227 1228 // If the called function is readonly, then it could not mutate the caller's 1229 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1230 // temporary. 1231 if (CalledFunc->onlyReadsMemory()) { 1232 // If the byval argument has a specified alignment that is greater than the 1233 // passed in pointer, then we either have to round up the input pointer or 1234 // give up on this transformation. 1235 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 1236 return Arg; 1237 1238 AssumptionCache *AC = 1239 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 1240 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1241 1242 // If the pointer is already known to be sufficiently aligned, or if we can 1243 // round it up to a larger alignment, then we don't need a temporary. 1244 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >= 1245 ByValAlignment) 1246 return Arg; 1247 1248 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1249 // for code quality, but rarely happens and is required for correctness. 1250 } 1251 1252 // Create the alloca. If we have DataLayout, use nice alignment. 1253 unsigned Align = 1254 Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy); 1255 1256 // If the byval had an alignment specified, we *must* use at least that 1257 // alignment, as it is required by the byval argument (and uses of the 1258 // pointer inside the callee). 1259 Align = std::max(Align, ByValAlignment); 1260 1261 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 1262 &*Caller->begin()->begin()); 1263 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1264 1265 // Uses of the argument in the function should use our new alloca 1266 // instead. 1267 return NewAlloca; 1268 } 1269 1270 // Check whether this Value is used by a lifetime intrinsic. 1271 static bool isUsedByLifetimeMarker(Value *V) { 1272 for (User *U : V->users()) { 1273 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1274 switch (II->getIntrinsicID()) { 1275 default: break; 1276 case Intrinsic::lifetime_start: 1277 case Intrinsic::lifetime_end: 1278 return true; 1279 } 1280 } 1281 } 1282 return false; 1283 } 1284 1285 // Check whether the given alloca already has 1286 // lifetime.start or lifetime.end intrinsics. 1287 static bool hasLifetimeMarkers(AllocaInst *AI) { 1288 Type *Ty = AI->getType(); 1289 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1290 Ty->getPointerAddressSpace()); 1291 if (Ty == Int8PtrTy) 1292 return isUsedByLifetimeMarker(AI); 1293 1294 // Do a scan to find all the casts to i8*. 1295 for (User *U : AI->users()) { 1296 if (U->getType() != Int8PtrTy) continue; 1297 if (U->stripPointerCasts() != AI) continue; 1298 if (isUsedByLifetimeMarker(U)) 1299 return true; 1300 } 1301 return false; 1302 } 1303 1304 /// Rebuild the entire inlined-at chain for this instruction so that the top of 1305 /// the chain now is inlined-at the new call site. 1306 static DebugLoc 1307 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode, 1308 LLVMContext &Ctx, 1309 DenseMap<const DILocation *, DILocation *> &IANodes) { 1310 SmallVector<DILocation *, 3> InlinedAtLocations; 1311 DILocation *Last = InlinedAtNode; 1312 DILocation *CurInlinedAt = DL; 1313 1314 // Gather all the inlined-at nodes 1315 while (DILocation *IA = CurInlinedAt->getInlinedAt()) { 1316 // Skip any we've already built nodes for 1317 if (DILocation *Found = IANodes[IA]) { 1318 Last = Found; 1319 break; 1320 } 1321 1322 InlinedAtLocations.push_back(IA); 1323 CurInlinedAt = IA; 1324 } 1325 1326 // Starting from the top, rebuild the nodes to point to the new inlined-at 1327 // location (then rebuilding the rest of the chain behind it) and update the 1328 // map of already-constructed inlined-at nodes. 1329 for (const DILocation *MD : reverse(InlinedAtLocations)) { 1330 Last = IANodes[MD] = DILocation::getDistinct( 1331 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last); 1332 } 1333 1334 // And finally create the normal location for this instruction, referring to 1335 // the new inlined-at chain. 1336 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last); 1337 } 1338 1339 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1340 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1341 /// cannot be static. 1342 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1343 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1344 } 1345 1346 /// Update inlined instructions' line numbers to to encode location where these 1347 /// instructions are inlined. Also strip all debug intrinsics that were inlined 1348 /// into a nodebug function; there is no debug info the backend could produce 1349 /// for a function without a DISubprogram attachment. 1350 static void fixupDebugInfo(Function *Fn, Function::iterator FI, 1351 Instruction *TheCall, bool CalleeHasDebugInfo) { 1352 bool CallerHasDebugInfo = Fn->getSubprogram(); 1353 bool StripDebugInfo = !CallerHasDebugInfo && CalleeHasDebugInfo; 1354 SmallVector<DbgInfoIntrinsic *, 8> IntrinsicsToErase; 1355 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1356 1357 auto &Ctx = Fn->getContext(); 1358 DILocation *InlinedAtNode = nullptr; 1359 1360 // Create a unique call site, not to be confused with any other call from the 1361 // same location. 1362 if (TheCallDL) 1363 InlinedAtNode = DILocation::getDistinct( 1364 Ctx, TheCallDL->getLine(), TheCallDL->getColumn(), 1365 TheCallDL->getScope(), TheCallDL->getInlinedAt()); 1366 1367 // Cache the inlined-at nodes as they're built so they are reused, without 1368 // this every instruction's inlined-at chain would become distinct from each 1369 // other. 1370 DenseMap<const DILocation *, DILocation *> IANodes; 1371 1372 for (; FI != Fn->end(); ++FI) { 1373 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1374 BI != BE; ++BI) { 1375 if (StripDebugInfo) { 1376 // Inlining into a nodebug function. 1377 if (auto *DI = dyn_cast<DbgInfoIntrinsic>(BI)) 1378 // Mark dead debug intrinsics for deletion. 1379 IntrinsicsToErase.push_back(DI); 1380 else 1381 // Remove the dangling debug location. 1382 BI->setDebugLoc(DebugLoc()); 1383 continue; 1384 } 1385 1386 if (DebugLoc DL = BI->getDebugLoc()) { 1387 BI->setDebugLoc( 1388 updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes)); 1389 continue; 1390 } 1391 1392 if (CalleeHasDebugInfo) 1393 continue; 1394 1395 // If the inlined instruction has no line number, make it look as if it 1396 // originates from the call location. This is important for 1397 // ((__always_inline__, __nodebug__)) functions which must use caller 1398 // location for all instructions in their function body. 1399 1400 // Don't update static allocas, as they may get moved later. 1401 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1402 if (allocaWouldBeStaticInEntry(AI)) 1403 continue; 1404 1405 BI->setDebugLoc(TheCallDL); 1406 } 1407 } 1408 1409 for (auto *DI : IntrinsicsToErase) 1410 DI->eraseFromParent(); 1411 } 1412 /// Update the block frequencies of the caller after a callee has been inlined. 1413 /// 1414 /// Each block cloned into the caller has its block frequency scaled by the 1415 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 1416 /// callee's entry block gets the same frequency as the callsite block and the 1417 /// relative frequencies of all cloned blocks remain the same after cloning. 1418 static void updateCallerBFI(BasicBlock *CallSiteBlock, 1419 const ValueToValueMapTy &VMap, 1420 BlockFrequencyInfo *CallerBFI, 1421 BlockFrequencyInfo *CalleeBFI, 1422 const BasicBlock &CalleeEntryBlock) { 1423 SmallPtrSet<BasicBlock *, 16> ClonedBBs; 1424 for (auto const &Entry : VMap) { 1425 if (!isa<BasicBlock>(Entry.first) || !Entry.second) 1426 continue; 1427 auto *OrigBB = cast<BasicBlock>(Entry.first); 1428 auto *ClonedBB = cast<BasicBlock>(Entry.second); 1429 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency(); 1430 if (!ClonedBBs.insert(ClonedBB).second) { 1431 // Multiple blocks in the callee might get mapped to one cloned block in 1432 // the caller since we prune the callee as we clone it. When that happens, 1433 // we want to use the maximum among the original blocks' frequencies. 1434 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency(); 1435 if (NewFreq > Freq) 1436 Freq = NewFreq; 1437 } 1438 CallerBFI->setBlockFreq(ClonedBB, Freq); 1439 } 1440 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock)); 1441 CallerBFI->setBlockFreqAndScale( 1442 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(), 1443 ClonedBBs); 1444 } 1445 1446 /// Update the entry count of callee after inlining. 1447 /// 1448 /// The callsite's block count is subtracted from the callee's function entry 1449 /// count. 1450 static void updateCalleeCount(BlockFrequencyInfo &CallerBFI, BasicBlock *CallBB, 1451 Function *Callee) { 1452 // If the callee has a original count of N, and the estimated count of 1453 // callsite is M, the new callee count is set to N - M. M is estimated from 1454 // the caller's entry count, its entry block frequency and the block frequency 1455 // of the callsite. 1456 Optional<uint64_t> CalleeCount = Callee->getEntryCount(); 1457 if (!CalleeCount) 1458 return; 1459 Optional<uint64_t> CallSiteCount = CallerBFI.getBlockProfileCount(CallBB); 1460 if (!CallSiteCount) 1461 return; 1462 // Since CallSiteCount is an estimate, it could exceed the original callee 1463 // count and has to be set to 0. 1464 if (CallSiteCount.getValue() > CalleeCount.getValue()) 1465 Callee->setEntryCount(0); 1466 else 1467 Callee->setEntryCount(CalleeCount.getValue() - CallSiteCount.getValue()); 1468 } 1469 1470 /// This function inlines the called function into the basic block of the 1471 /// caller. This returns false if it is not possible to inline this call. 1472 /// The program is still in a well defined state if this occurs though. 1473 /// 1474 /// Note that this only does one level of inlining. For example, if the 1475 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1476 /// exists in the instruction stream. Similarly this will inline a recursive 1477 /// function by one level. 1478 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 1479 AAResults *CalleeAAR, bool InsertLifetime) { 1480 Instruction *TheCall = CS.getInstruction(); 1481 assert(TheCall->getParent() && TheCall->getFunction() 1482 && "Instruction not in function!"); 1483 1484 // If IFI has any state in it, zap it before we fill it in. 1485 IFI.reset(); 1486 1487 Function *CalledFunc = CS.getCalledFunction(); 1488 if (!CalledFunc || // Can't inline external function or indirect 1489 CalledFunc->isDeclaration() || // call, or call to a vararg function! 1490 CalledFunc->getFunctionType()->isVarArg()) return false; 1491 1492 // The inliner does not know how to inline through calls with operand bundles 1493 // in general ... 1494 if (CS.hasOperandBundles()) { 1495 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { 1496 uint32_t Tag = CS.getOperandBundleAt(i).getTagID(); 1497 // ... but it knows how to inline through "deopt" operand bundles ... 1498 if (Tag == LLVMContext::OB_deopt) 1499 continue; 1500 // ... and "funclet" operand bundles. 1501 if (Tag == LLVMContext::OB_funclet) 1502 continue; 1503 1504 return false; 1505 } 1506 } 1507 1508 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1509 // calls that we inline. 1510 bool MarkNoUnwind = CS.doesNotThrow(); 1511 1512 BasicBlock *OrigBB = TheCall->getParent(); 1513 Function *Caller = OrigBB->getParent(); 1514 1515 // GC poses two hazards to inlining, which only occur when the callee has GC: 1516 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1517 // caller. 1518 // 2. If the caller has a differing GC, it is invalid to inline. 1519 if (CalledFunc->hasGC()) { 1520 if (!Caller->hasGC()) 1521 Caller->setGC(CalledFunc->getGC()); 1522 else if (CalledFunc->getGC() != Caller->getGC()) 1523 return false; 1524 } 1525 1526 // Get the personality function from the callee if it contains a landing pad. 1527 Constant *CalledPersonality = 1528 CalledFunc->hasPersonalityFn() 1529 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1530 : nullptr; 1531 1532 // Find the personality function used by the landing pads of the caller. If it 1533 // exists, then check to see that it matches the personality function used in 1534 // the callee. 1535 Constant *CallerPersonality = 1536 Caller->hasPersonalityFn() 1537 ? Caller->getPersonalityFn()->stripPointerCasts() 1538 : nullptr; 1539 if (CalledPersonality) { 1540 if (!CallerPersonality) 1541 Caller->setPersonalityFn(CalledPersonality); 1542 // If the personality functions match, then we can perform the 1543 // inlining. Otherwise, we can't inline. 1544 // TODO: This isn't 100% true. Some personality functions are proper 1545 // supersets of others and can be used in place of the other. 1546 else if (CalledPersonality != CallerPersonality) 1547 return false; 1548 } 1549 1550 // We need to figure out which funclet the callsite was in so that we may 1551 // properly nest the callee. 1552 Instruction *CallSiteEHPad = nullptr; 1553 if (CallerPersonality) { 1554 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1555 if (isFuncletEHPersonality(Personality)) { 1556 Optional<OperandBundleUse> ParentFunclet = 1557 CS.getOperandBundle(LLVMContext::OB_funclet); 1558 if (ParentFunclet) 1559 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1560 1561 // OK, the inlining site is legal. What about the target function? 1562 1563 if (CallSiteEHPad) { 1564 if (Personality == EHPersonality::MSVC_CXX) { 1565 // The MSVC personality cannot tolerate catches getting inlined into 1566 // cleanup funclets. 1567 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1568 // Ok, the call site is within a cleanuppad. Let's check the callee 1569 // for catchpads. 1570 for (const BasicBlock &CalledBB : *CalledFunc) { 1571 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1572 return false; 1573 } 1574 } 1575 } else if (isAsynchronousEHPersonality(Personality)) { 1576 // SEH is even less tolerant, there may not be any sort of exceptional 1577 // funclet in the callee. 1578 for (const BasicBlock &CalledBB : *CalledFunc) { 1579 if (CalledBB.isEHPad()) 1580 return false; 1581 } 1582 } 1583 } 1584 } 1585 } 1586 1587 // Determine if we are dealing with a call in an EHPad which does not unwind 1588 // to caller. 1589 bool EHPadForCallUnwindsLocally = false; 1590 if (CallSiteEHPad && CS.isCall()) { 1591 UnwindDestMemoTy FuncletUnwindMap; 1592 Value *CallSiteUnwindDestToken = 1593 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 1594 1595 EHPadForCallUnwindsLocally = 1596 CallSiteUnwindDestToken && 1597 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 1598 } 1599 1600 // Get an iterator to the last basic block in the function, which will have 1601 // the new function inlined after it. 1602 Function::iterator LastBlock = --Caller->end(); 1603 1604 // Make sure to capture all of the return instructions from the cloned 1605 // function. 1606 SmallVector<ReturnInst*, 8> Returns; 1607 ClonedCodeInfo InlinedFunctionInfo; 1608 Function::iterator FirstNewBlock; 1609 1610 { // Scope to destroy VMap after cloning. 1611 ValueToValueMapTy VMap; 1612 // Keep a list of pair (dst, src) to emit byval initializations. 1613 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 1614 1615 auto &DL = Caller->getParent()->getDataLayout(); 1616 1617 assert(CalledFunc->arg_size() == CS.arg_size() && 1618 "No varargs calls can be inlined!"); 1619 1620 // Calculate the vector of arguments to pass into the function cloner, which 1621 // matches up the formal to the actual argument values. 1622 CallSite::arg_iterator AI = CS.arg_begin(); 1623 unsigned ArgNo = 0; 1624 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 1625 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1626 Value *ActualArg = *AI; 1627 1628 // When byval arguments actually inlined, we need to make the copy implied 1629 // by them explicit. However, we don't do this if the callee is readonly 1630 // or readnone, because the copy would be unneeded: the callee doesn't 1631 // modify the struct. 1632 if (CS.isByValArgument(ArgNo)) { 1633 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 1634 CalledFunc->getParamAlignment(ArgNo+1)); 1635 if (ActualArg != *AI) 1636 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 1637 } 1638 1639 VMap[&*I] = ActualArg; 1640 } 1641 1642 // Add alignment assumptions if necessary. We do this before the inlined 1643 // instructions are actually cloned into the caller so that we can easily 1644 // check what will be known at the start of the inlined code. 1645 AddAlignmentAssumptions(CS, IFI); 1646 1647 // We want the inliner to prune the code as it copies. We would LOVE to 1648 // have no dead or constant instructions leftover after inlining occurs 1649 // (which can happen, e.g., because an argument was constant), but we'll be 1650 // happy with whatever the cloner can do. 1651 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1652 /*ModuleLevelChanges=*/false, Returns, ".i", 1653 &InlinedFunctionInfo, TheCall); 1654 // Remember the first block that is newly cloned over. 1655 FirstNewBlock = LastBlock; ++FirstNewBlock; 1656 1657 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr) { 1658 // Update the BFI of blocks cloned into the caller. 1659 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI, 1660 CalledFunc->front()); 1661 // Update the profile count of callee. 1662 updateCalleeCount(*IFI.CallerBFI, OrigBB, CalledFunc); 1663 } 1664 1665 // Inject byval arguments initialization. 1666 for (std::pair<Value*, Value*> &Init : ByValInit) 1667 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1668 &*FirstNewBlock, IFI); 1669 1670 Optional<OperandBundleUse> ParentDeopt = 1671 CS.getOperandBundle(LLVMContext::OB_deopt); 1672 if (ParentDeopt) { 1673 SmallVector<OperandBundleDef, 2> OpDefs; 1674 1675 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1676 Instruction *I = dyn_cast_or_null<Instruction>(VH); 1677 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef 1678 1679 OpDefs.clear(); 1680 1681 CallSite ICS(I); 1682 OpDefs.reserve(ICS.getNumOperandBundles()); 1683 1684 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) { 1685 auto ChildOB = ICS.getOperandBundleAt(i); 1686 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1687 // If the inlined call has other operand bundles, let them be 1688 OpDefs.emplace_back(ChildOB); 1689 continue; 1690 } 1691 1692 // It may be useful to separate this logic (of handling operand 1693 // bundles) out to a separate "policy" component if this gets crowded. 1694 // Prepend the parent's deoptimization continuation to the newly 1695 // inlined call's deoptimization continuation. 1696 std::vector<Value *> MergedDeoptArgs; 1697 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 1698 ChildOB.Inputs.size()); 1699 1700 MergedDeoptArgs.insert(MergedDeoptArgs.end(), 1701 ParentDeopt->Inputs.begin(), 1702 ParentDeopt->Inputs.end()); 1703 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(), 1704 ChildOB.Inputs.end()); 1705 1706 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 1707 } 1708 1709 Instruction *NewI = nullptr; 1710 if (isa<CallInst>(I)) 1711 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); 1712 else 1713 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); 1714 1715 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 1716 // this even if the call returns void. 1717 I->replaceAllUsesWith(NewI); 1718 1719 VH = nullptr; 1720 I->eraseFromParent(); 1721 } 1722 } 1723 1724 // Update the callgraph if requested. 1725 if (IFI.CG) 1726 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1727 1728 // For 'nodebug' functions, the associated DISubprogram is always null. 1729 // Conservatively avoid propagating the callsite debug location to 1730 // instructions inlined from a function whose DISubprogram is not null. 1731 fixupDebugInfo(Caller, FirstNewBlock, TheCall, 1732 CalledFunc->getSubprogram() != nullptr); 1733 1734 // Clone existing noalias metadata if necessary. 1735 CloneAliasScopeMetadata(CS, VMap); 1736 1737 // Add noalias metadata if necessary. 1738 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR); 1739 1740 // Propagate llvm.mem.parallel_loop_access if necessary. 1741 PropagateParallelLoopAccessMetadata(CS, VMap); 1742 1743 // Register any cloned assumptions. 1744 if (IFI.GetAssumptionCache) 1745 for (BasicBlock &NewBlock : 1746 make_range(FirstNewBlock->getIterator(), Caller->end())) 1747 for (Instruction &I : NewBlock) { 1748 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1749 if (II->getIntrinsicID() == Intrinsic::assume) 1750 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II); 1751 } 1752 } 1753 1754 // If there are any alloca instructions in the block that used to be the entry 1755 // block for the callee, move them to the entry block of the caller. First 1756 // calculate which instruction they should be inserted before. We insert the 1757 // instructions at the end of the current alloca list. 1758 { 1759 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1760 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1761 E = FirstNewBlock->end(); I != E; ) { 1762 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1763 if (!AI) continue; 1764 1765 // If the alloca is now dead, remove it. This often occurs due to code 1766 // specialization. 1767 if (AI->use_empty()) { 1768 AI->eraseFromParent(); 1769 continue; 1770 } 1771 1772 if (!allocaWouldBeStaticInEntry(AI)) 1773 continue; 1774 1775 // Keep track of the static allocas that we inline into the caller. 1776 IFI.StaticAllocas.push_back(AI); 1777 1778 // Scan for the block of allocas that we can move over, and move them 1779 // all at once. 1780 while (isa<AllocaInst>(I) && 1781 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 1782 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1783 ++I; 1784 } 1785 1786 // Transfer all of the allocas over in a block. Using splice means 1787 // that the instructions aren't removed from the symbol table, then 1788 // reinserted. 1789 Caller->getEntryBlock().getInstList().splice( 1790 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 1791 } 1792 // Move any dbg.declares describing the allocas into the entry basic block. 1793 DIBuilder DIB(*Caller->getParent()); 1794 for (auto &AI : IFI.StaticAllocas) 1795 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false); 1796 } 1797 1798 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 1799 if (InlinedFunctionInfo.ContainsCalls) { 1800 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1801 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1802 CallSiteTailKind = CI->getTailCallKind(); 1803 1804 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1805 ++BB) { 1806 for (Instruction &I : *BB) { 1807 CallInst *CI = dyn_cast<CallInst>(&I); 1808 if (!CI) 1809 continue; 1810 1811 if (Function *F = CI->getCalledFunction()) 1812 InlinedDeoptimizeCalls |= 1813 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 1814 1815 // We need to reduce the strength of any inlined tail calls. For 1816 // musttail, we have to avoid introducing potential unbounded stack 1817 // growth. For example, if functions 'f' and 'g' are mutually recursive 1818 // with musttail, we can inline 'g' into 'f' so long as we preserve 1819 // musttail on the cloned call to 'f'. If either the inlined call site 1820 // or the cloned call site is *not* musttail, the program already has 1821 // one frame of stack growth, so it's safe to remove musttail. Here is 1822 // a table of example transformations: 1823 // 1824 // f -> musttail g -> musttail f ==> f -> musttail f 1825 // f -> musttail g -> tail f ==> f -> tail f 1826 // f -> g -> musttail f ==> f -> f 1827 // f -> g -> tail f ==> f -> f 1828 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1829 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1830 CI->setTailCallKind(ChildTCK); 1831 InlinedMustTailCalls |= CI->isMustTailCall(); 1832 1833 // Calls inlined through a 'nounwind' call site should be marked 1834 // 'nounwind'. 1835 if (MarkNoUnwind) 1836 CI->setDoesNotThrow(); 1837 } 1838 } 1839 } 1840 1841 // Leave lifetime markers for the static alloca's, scoping them to the 1842 // function we just inlined. 1843 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1844 IRBuilder<> builder(&FirstNewBlock->front()); 1845 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1846 AllocaInst *AI = IFI.StaticAllocas[ai]; 1847 // Don't mark swifterror allocas. They can't have bitcast uses. 1848 if (AI->isSwiftError()) 1849 continue; 1850 1851 // If the alloca is already scoped to something smaller than the whole 1852 // function then there's no need to add redundant, less accurate markers. 1853 if (hasLifetimeMarkers(AI)) 1854 continue; 1855 1856 // Try to determine the size of the allocation. 1857 ConstantInt *AllocaSize = nullptr; 1858 if (ConstantInt *AIArraySize = 1859 dyn_cast<ConstantInt>(AI->getArraySize())) { 1860 auto &DL = Caller->getParent()->getDataLayout(); 1861 Type *AllocaType = AI->getAllocatedType(); 1862 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 1863 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1864 1865 // Don't add markers for zero-sized allocas. 1866 if (AllocaArraySize == 0) 1867 continue; 1868 1869 // Check that array size doesn't saturate uint64_t and doesn't 1870 // overflow when it's multiplied by type size. 1871 if (AllocaArraySize != ~0ULL && 1872 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 1873 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1874 AllocaArraySize * AllocaTypeSize); 1875 } 1876 } 1877 1878 builder.CreateLifetimeStart(AI, AllocaSize); 1879 for (ReturnInst *RI : Returns) { 1880 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 1881 // call and a return. The return kills all local allocas. 1882 if (InlinedMustTailCalls && 1883 RI->getParent()->getTerminatingMustTailCall()) 1884 continue; 1885 if (InlinedDeoptimizeCalls && 1886 RI->getParent()->getTerminatingDeoptimizeCall()) 1887 continue; 1888 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1889 } 1890 } 1891 } 1892 1893 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1894 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1895 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1896 Module *M = Caller->getParent(); 1897 // Get the two intrinsics we care about. 1898 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1899 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1900 1901 // Insert the llvm.stacksave. 1902 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 1903 .CreateCall(StackSave, {}, "savedstack"); 1904 1905 // Insert a call to llvm.stackrestore before any return instructions in the 1906 // inlined function. 1907 for (ReturnInst *RI : Returns) { 1908 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 1909 // call and a return. The return will restore the stack pointer. 1910 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1911 continue; 1912 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 1913 continue; 1914 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1915 } 1916 } 1917 1918 // If we are inlining for an invoke instruction, we must make sure to rewrite 1919 // any call instructions into invoke instructions. This is sensitive to which 1920 // funclet pads were top-level in the inlinee, so must be done before 1921 // rewriting the "parent pad" links. 1922 if (auto *II = dyn_cast<InvokeInst>(TheCall)) { 1923 BasicBlock *UnwindDest = II->getUnwindDest(); 1924 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 1925 if (isa<LandingPadInst>(FirstNonPHI)) { 1926 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1927 } else { 1928 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1929 } 1930 } 1931 1932 // Update the lexical scopes of the new funclets and callsites. 1933 // Anything that had 'none' as its parent is now nested inside the callsite's 1934 // EHPad. 1935 1936 if (CallSiteEHPad) { 1937 for (Function::iterator BB = FirstNewBlock->getIterator(), 1938 E = Caller->end(); 1939 BB != E; ++BB) { 1940 // Add bundle operands to any top-level call sites. 1941 SmallVector<OperandBundleDef, 1> OpBundles; 1942 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { 1943 Instruction *I = &*BBI++; 1944 CallSite CS(I); 1945 if (!CS) 1946 continue; 1947 1948 // Skip call sites which are nounwind intrinsics. 1949 auto *CalledFn = 1950 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1951 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow()) 1952 continue; 1953 1954 // Skip call sites which already have a "funclet" bundle. 1955 if (CS.getOperandBundle(LLVMContext::OB_funclet)) 1956 continue; 1957 1958 CS.getOperandBundlesAsDefs(OpBundles); 1959 OpBundles.emplace_back("funclet", CallSiteEHPad); 1960 1961 Instruction *NewInst; 1962 if (CS.isCall()) 1963 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); 1964 else 1965 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); 1966 NewInst->takeName(I); 1967 I->replaceAllUsesWith(NewInst); 1968 I->eraseFromParent(); 1969 1970 OpBundles.clear(); 1971 } 1972 1973 // It is problematic if the inlinee has a cleanupret which unwinds to 1974 // caller and we inline it into a call site which doesn't unwind but into 1975 // an EH pad that does. Such an edge must be dynamically unreachable. 1976 // As such, we replace the cleanupret with unreachable. 1977 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 1978 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 1979 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false); 1980 1981 Instruction *I = BB->getFirstNonPHI(); 1982 if (!I->isEHPad()) 1983 continue; 1984 1985 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 1986 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 1987 CatchSwitch->setParentPad(CallSiteEHPad); 1988 } else { 1989 auto *FPI = cast<FuncletPadInst>(I); 1990 if (isa<ConstantTokenNone>(FPI->getParentPad())) 1991 FPI->setParentPad(CallSiteEHPad); 1992 } 1993 } 1994 } 1995 1996 if (InlinedDeoptimizeCalls) { 1997 // We need to at least remove the deoptimizing returns from the Return set, 1998 // so that the control flow from those returns does not get merged into the 1999 // caller (but terminate it instead). If the caller's return type does not 2000 // match the callee's return type, we also need to change the return type of 2001 // the intrinsic. 2002 if (Caller->getReturnType() == TheCall->getType()) { 2003 auto NewEnd = remove_if(Returns, [](ReturnInst *RI) { 2004 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 2005 }); 2006 Returns.erase(NewEnd, Returns.end()); 2007 } else { 2008 SmallVector<ReturnInst *, 8> NormalReturns; 2009 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 2010 Caller->getParent(), Intrinsic::experimental_deoptimize, 2011 {Caller->getReturnType()}); 2012 2013 for (ReturnInst *RI : Returns) { 2014 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 2015 if (!DeoptCall) { 2016 NormalReturns.push_back(RI); 2017 continue; 2018 } 2019 2020 // The calling convention on the deoptimize call itself may be bogus, 2021 // since the code we're inlining may have undefined behavior (and may 2022 // never actually execute at runtime); but all 2023 // @llvm.experimental.deoptimize declarations have to have the same 2024 // calling convention in a well-formed module. 2025 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 2026 NewDeoptIntrinsic->setCallingConv(CallingConv); 2027 auto *CurBB = RI->getParent(); 2028 RI->eraseFromParent(); 2029 2030 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(), 2031 DeoptCall->arg_end()); 2032 2033 SmallVector<OperandBundleDef, 1> OpBundles; 2034 DeoptCall->getOperandBundlesAsDefs(OpBundles); 2035 DeoptCall->eraseFromParent(); 2036 assert(!OpBundles.empty() && 2037 "Expected at least the deopt operand bundle"); 2038 2039 IRBuilder<> Builder(CurBB); 2040 CallInst *NewDeoptCall = 2041 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 2042 NewDeoptCall->setCallingConv(CallingConv); 2043 if (NewDeoptCall->getType()->isVoidTy()) 2044 Builder.CreateRetVoid(); 2045 else 2046 Builder.CreateRet(NewDeoptCall); 2047 } 2048 2049 // Leave behind the normal returns so we can merge control flow. 2050 std::swap(Returns, NormalReturns); 2051 } 2052 } 2053 2054 // Handle any inlined musttail call sites. In order for a new call site to be 2055 // musttail, the source of the clone and the inlined call site must have been 2056 // musttail. Therefore it's safe to return without merging control into the 2057 // phi below. 2058 if (InlinedMustTailCalls) { 2059 // Check if we need to bitcast the result of any musttail calls. 2060 Type *NewRetTy = Caller->getReturnType(); 2061 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 2062 2063 // Handle the returns preceded by musttail calls separately. 2064 SmallVector<ReturnInst *, 8> NormalReturns; 2065 for (ReturnInst *RI : Returns) { 2066 CallInst *ReturnedMustTail = 2067 RI->getParent()->getTerminatingMustTailCall(); 2068 if (!ReturnedMustTail) { 2069 NormalReturns.push_back(RI); 2070 continue; 2071 } 2072 if (!NeedBitCast) 2073 continue; 2074 2075 // Delete the old return and any preceding bitcast. 2076 BasicBlock *CurBB = RI->getParent(); 2077 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2078 RI->eraseFromParent(); 2079 if (OldCast) 2080 OldCast->eraseFromParent(); 2081 2082 // Insert a new bitcast and return with the right type. 2083 IRBuilder<> Builder(CurBB); 2084 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2085 } 2086 2087 // Leave behind the normal returns so we can merge control flow. 2088 std::swap(Returns, NormalReturns); 2089 } 2090 2091 // Now that all of the transforms on the inlined code have taken place but 2092 // before we splice the inlined code into the CFG and lose track of which 2093 // blocks were actually inlined, collect the call sites. We only do this if 2094 // call graph updates weren't requested, as those provide value handle based 2095 // tracking of inlined call sites instead. 2096 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) { 2097 // Otherwise just collect the raw call sites that were inlined. 2098 for (BasicBlock &NewBB : 2099 make_range(FirstNewBlock->getIterator(), Caller->end())) 2100 for (Instruction &I : NewBB) 2101 if (auto CS = CallSite(&I)) 2102 IFI.InlinedCallSites.push_back(CS); 2103 } 2104 2105 // If we cloned in _exactly one_ basic block, and if that block ends in a 2106 // return instruction, we splice the body of the inlined callee directly into 2107 // the calling basic block. 2108 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2109 // Move all of the instructions right before the call. 2110 OrigBB->getInstList().splice(TheCall->getIterator(), 2111 FirstNewBlock->getInstList(), 2112 FirstNewBlock->begin(), FirstNewBlock->end()); 2113 // Remove the cloned basic block. 2114 Caller->getBasicBlockList().pop_back(); 2115 2116 // If the call site was an invoke instruction, add a branch to the normal 2117 // destination. 2118 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2119 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 2120 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2121 } 2122 2123 // If the return instruction returned a value, replace uses of the call with 2124 // uses of the returned value. 2125 if (!TheCall->use_empty()) { 2126 ReturnInst *R = Returns[0]; 2127 if (TheCall == R->getReturnValue()) 2128 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2129 else 2130 TheCall->replaceAllUsesWith(R->getReturnValue()); 2131 } 2132 // Since we are now done with the Call/Invoke, we can delete it. 2133 TheCall->eraseFromParent(); 2134 2135 // Since we are now done with the return instruction, delete it also. 2136 Returns[0]->eraseFromParent(); 2137 2138 // We are now done with the inlining. 2139 return true; 2140 } 2141 2142 // Otherwise, we have the normal case, of more than one block to inline or 2143 // multiple return sites. 2144 2145 // We want to clone the entire callee function into the hole between the 2146 // "starter" and "ender" blocks. How we accomplish this depends on whether 2147 // this is an invoke instruction or a call instruction. 2148 BasicBlock *AfterCallBB; 2149 BranchInst *CreatedBranchToNormalDest = nullptr; 2150 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2151 2152 // Add an unconditional branch to make this look like the CallInst case... 2153 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 2154 2155 // Split the basic block. This guarantees that no PHI nodes will have to be 2156 // updated due to new incoming edges, and make the invoke case more 2157 // symmetric to the call case. 2158 AfterCallBB = 2159 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2160 CalledFunc->getName() + ".exit"); 2161 2162 } else { // It's a call 2163 // If this is a call instruction, we need to split the basic block that 2164 // the call lives in. 2165 // 2166 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(), 2167 CalledFunc->getName() + ".exit"); 2168 } 2169 2170 if (IFI.CallerBFI) { 2171 // Copy original BB's block frequency to AfterCallBB 2172 IFI.CallerBFI->setBlockFreq( 2173 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency()); 2174 } 2175 2176 // Change the branch that used to go to AfterCallBB to branch to the first 2177 // basic block of the inlined function. 2178 // 2179 TerminatorInst *Br = OrigBB->getTerminator(); 2180 assert(Br && Br->getOpcode() == Instruction::Br && 2181 "splitBasicBlock broken!"); 2182 Br->setOperand(0, &*FirstNewBlock); 2183 2184 // Now that the function is correct, make it a little bit nicer. In 2185 // particular, move the basic blocks inserted from the end of the function 2186 // into the space made by splitting the source basic block. 2187 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 2188 Caller->getBasicBlockList(), FirstNewBlock, 2189 Caller->end()); 2190 2191 // Handle all of the return instructions that we just cloned in, and eliminate 2192 // any users of the original call/invoke instruction. 2193 Type *RTy = CalledFunc->getReturnType(); 2194 2195 PHINode *PHI = nullptr; 2196 if (Returns.size() > 1) { 2197 // The PHI node should go at the front of the new basic block to merge all 2198 // possible incoming values. 2199 if (!TheCall->use_empty()) { 2200 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 2201 &AfterCallBB->front()); 2202 // Anything that used the result of the function call should now use the 2203 // PHI node as their operand. 2204 TheCall->replaceAllUsesWith(PHI); 2205 } 2206 2207 // Loop over all of the return instructions adding entries to the PHI node 2208 // as appropriate. 2209 if (PHI) { 2210 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2211 ReturnInst *RI = Returns[i]; 2212 assert(RI->getReturnValue()->getType() == PHI->getType() && 2213 "Ret value not consistent in function!"); 2214 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2215 } 2216 } 2217 2218 // Add a branch to the merge points and remove return instructions. 2219 DebugLoc Loc; 2220 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2221 ReturnInst *RI = Returns[i]; 2222 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2223 Loc = RI->getDebugLoc(); 2224 BI->setDebugLoc(Loc); 2225 RI->eraseFromParent(); 2226 } 2227 // We need to set the debug location to *somewhere* inside the 2228 // inlined function. The line number may be nonsensical, but the 2229 // instruction will at least be associated with the right 2230 // function. 2231 if (CreatedBranchToNormalDest) 2232 CreatedBranchToNormalDest->setDebugLoc(Loc); 2233 } else if (!Returns.empty()) { 2234 // Otherwise, if there is exactly one return value, just replace anything 2235 // using the return value of the call with the computed value. 2236 if (!TheCall->use_empty()) { 2237 if (TheCall == Returns[0]->getReturnValue()) 2238 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2239 else 2240 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 2241 } 2242 2243 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2244 BasicBlock *ReturnBB = Returns[0]->getParent(); 2245 ReturnBB->replaceAllUsesWith(AfterCallBB); 2246 2247 // Splice the code from the return block into the block that it will return 2248 // to, which contains the code that was after the call. 2249 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 2250 ReturnBB->getInstList()); 2251 2252 if (CreatedBranchToNormalDest) 2253 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2254 2255 // Delete the return instruction now and empty ReturnBB now. 2256 Returns[0]->eraseFromParent(); 2257 ReturnBB->eraseFromParent(); 2258 } else if (!TheCall->use_empty()) { 2259 // No returns, but something is using the return value of the call. Just 2260 // nuke the result. 2261 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2262 } 2263 2264 // Since we are now done with the Call/Invoke, we can delete it. 2265 TheCall->eraseFromParent(); 2266 2267 // If we inlined any musttail calls and the original return is now 2268 // unreachable, delete it. It can only contain a bitcast and ret. 2269 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 2270 AfterCallBB->eraseFromParent(); 2271 2272 // We should always be able to fold the entry block of the function into the 2273 // single predecessor of the block... 2274 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2275 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2276 2277 // Splice the code entry block into calling block, right before the 2278 // unconditional branch. 2279 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2280 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 2281 2282 // Remove the unconditional branch. 2283 OrigBB->getInstList().erase(Br); 2284 2285 // Now we can remove the CalleeEntry block, which is now empty. 2286 Caller->getBasicBlockList().erase(CalleeEntry); 2287 2288 // If we inserted a phi node, check to see if it has a single value (e.g. all 2289 // the entries are the same or undef). If so, remove the PHI so it doesn't 2290 // block other optimizations. 2291 if (PHI) { 2292 AssumptionCache *AC = 2293 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 2294 auto &DL = Caller->getParent()->getDataLayout(); 2295 if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) { 2296 PHI->replaceAllUsesWith(V); 2297 PHI->eraseFromParent(); 2298 } 2299 } 2300 2301 return true; 2302 } 2303