1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Cloning.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/StringExtras.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/CallGraph.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/EHPersonalities.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/CFG.h" 31 #include "llvm/IR/Constants.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/DebugInfo.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/DIBuilder.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/IRBuilder.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/MDBuilder.h" 42 #include "llvm/IR/Module.h" 43 #include "llvm/Transforms/Utils/Local.h" 44 #include "llvm/Support/CommandLine.h" 45 #include <algorithm> 46 47 using namespace llvm; 48 49 static cl::opt<bool> 50 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 51 cl::Hidden, 52 cl::desc("Convert noalias attributes to metadata during inlining.")); 53 54 static cl::opt<bool> 55 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 56 cl::init(true), cl::Hidden, 57 cl::desc("Convert align attributes to assumptions during inlining.")); 58 59 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 60 AAResults *CalleeAAR, bool InsertLifetime) { 61 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime); 62 } 63 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 64 AAResults *CalleeAAR, bool InsertLifetime) { 65 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime); 66 } 67 68 namespace { 69 /// A class for recording information about inlining a landing pad. 70 class LandingPadInliningInfo { 71 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind. 72 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume. 73 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke. 74 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts. 75 SmallVector<Value*, 8> UnwindDestPHIValues; 76 77 public: 78 LandingPadInliningInfo(InvokeInst *II) 79 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr), 80 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) { 81 // If there are PHI nodes in the unwind destination block, we need to keep 82 // track of which values came into them from the invoke before removing 83 // the edge from this block. 84 llvm::BasicBlock *InvokeBB = II->getParent(); 85 BasicBlock::iterator I = OuterResumeDest->begin(); 86 for (; isa<PHINode>(I); ++I) { 87 // Save the value to use for this edge. 88 PHINode *PHI = cast<PHINode>(I); 89 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 90 } 91 92 CallerLPad = cast<LandingPadInst>(I); 93 } 94 95 /// The outer unwind destination is the target of 96 /// unwind edges introduced for calls within the inlined function. 97 BasicBlock *getOuterResumeDest() const { 98 return OuterResumeDest; 99 } 100 101 BasicBlock *getInnerResumeDest(); 102 103 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 104 105 /// Forward the 'resume' instruction to the caller's landing pad block. 106 /// When the landing pad block has only one predecessor, this is 107 /// a simple branch. When there is more than one predecessor, we need to 108 /// split the landing pad block after the landingpad instruction and jump 109 /// to there. 110 void forwardResume(ResumeInst *RI, 111 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 112 113 /// Add incoming-PHI values to the unwind destination block for the given 114 /// basic block, using the values for the original invoke's source block. 115 void addIncomingPHIValuesFor(BasicBlock *BB) const { 116 addIncomingPHIValuesForInto(BB, OuterResumeDest); 117 } 118 119 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 120 BasicBlock::iterator I = dest->begin(); 121 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 122 PHINode *phi = cast<PHINode>(I); 123 phi->addIncoming(UnwindDestPHIValues[i], src); 124 } 125 } 126 }; 127 } // anonymous namespace 128 129 /// Get or create a target for the branch from ResumeInsts. 130 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 131 if (InnerResumeDest) return InnerResumeDest; 132 133 // Split the landing pad. 134 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 135 InnerResumeDest = 136 OuterResumeDest->splitBasicBlock(SplitPoint, 137 OuterResumeDest->getName() + ".body"); 138 139 // The number of incoming edges we expect to the inner landing pad. 140 const unsigned PHICapacity = 2; 141 142 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 143 Instruction *InsertPoint = &InnerResumeDest->front(); 144 BasicBlock::iterator I = OuterResumeDest->begin(); 145 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 146 PHINode *OuterPHI = cast<PHINode>(I); 147 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 148 OuterPHI->getName() + ".lpad-body", 149 InsertPoint); 150 OuterPHI->replaceAllUsesWith(InnerPHI); 151 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 152 } 153 154 // Create a PHI for the exception values. 155 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 156 "eh.lpad-body", InsertPoint); 157 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 158 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 159 160 // All done. 161 return InnerResumeDest; 162 } 163 164 /// Forward the 'resume' instruction to the caller's landing pad block. 165 /// When the landing pad block has only one predecessor, this is a simple 166 /// branch. When there is more than one predecessor, we need to split the 167 /// landing pad block after the landingpad instruction and jump to there. 168 void LandingPadInliningInfo::forwardResume( 169 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 170 BasicBlock *Dest = getInnerResumeDest(); 171 BasicBlock *Src = RI->getParent(); 172 173 BranchInst::Create(Dest, Src); 174 175 // Update the PHIs in the destination. They were inserted in an order which 176 // makes this work. 177 addIncomingPHIValuesForInto(Src, Dest); 178 179 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 180 RI->eraseFromParent(); 181 } 182 183 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 184 static Value *getParentPad(Value *EHPad) { 185 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 186 return FPI->getParentPad(); 187 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 188 } 189 190 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy; 191 192 /// Helper for getUnwindDestToken that does the descendant-ward part of 193 /// the search. 194 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 195 UnwindDestMemoTy &MemoMap) { 196 SmallVector<Instruction *, 8> Worklist(1, EHPad); 197 198 while (!Worklist.empty()) { 199 Instruction *CurrentPad = Worklist.pop_back_val(); 200 // We only put pads on the worklist that aren't in the MemoMap. When 201 // we find an unwind dest for a pad we may update its ancestors, but 202 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 203 // so they should never get updated while queued on the worklist. 204 assert(!MemoMap.count(CurrentPad)); 205 Value *UnwindDestToken = nullptr; 206 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 207 if (CatchSwitch->hasUnwindDest()) { 208 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 209 } else { 210 // Catchswitch doesn't have a 'nounwind' variant, and one might be 211 // annotated as "unwinds to caller" when really it's nounwind (see 212 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 213 // parent's unwind dest from this. We can check its catchpads' 214 // descendants, since they might include a cleanuppad with an 215 // "unwinds to caller" cleanupret, which can be trusted. 216 for (auto HI = CatchSwitch->handler_begin(), 217 HE = CatchSwitch->handler_end(); 218 HI != HE && !UnwindDestToken; ++HI) { 219 BasicBlock *HandlerBlock = *HI; 220 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 221 for (User *Child : CatchPad->users()) { 222 // Intentionally ignore invokes here -- since the catchswitch is 223 // marked "unwind to caller", it would be a verifier error if it 224 // contained an invoke which unwinds out of it, so any invoke we'd 225 // encounter must unwind to some child of the catch. 226 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 227 continue; 228 229 Instruction *ChildPad = cast<Instruction>(Child); 230 auto Memo = MemoMap.find(ChildPad); 231 if (Memo == MemoMap.end()) { 232 // Haven't figured out this child pad yet; queue it. 233 Worklist.push_back(ChildPad); 234 continue; 235 } 236 // We've already checked this child, but might have found that 237 // it offers no proof either way. 238 Value *ChildUnwindDestToken = Memo->second; 239 if (!ChildUnwindDestToken) 240 continue; 241 // We already know the child's unwind dest, which can either 242 // be ConstantTokenNone to indicate unwind to caller, or can 243 // be another child of the catchpad. Only the former indicates 244 // the unwind dest of the catchswitch. 245 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 246 UnwindDestToken = ChildUnwindDestToken; 247 break; 248 } 249 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 250 } 251 } 252 } 253 } else { 254 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 255 for (User *U : CleanupPad->users()) { 256 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 257 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 258 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 259 else 260 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 261 break; 262 } 263 Value *ChildUnwindDestToken; 264 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 265 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 266 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 267 Instruction *ChildPad = cast<Instruction>(U); 268 auto Memo = MemoMap.find(ChildPad); 269 if (Memo == MemoMap.end()) { 270 // Haven't resolved this child yet; queue it and keep searching. 271 Worklist.push_back(ChildPad); 272 continue; 273 } 274 // We've checked this child, but still need to ignore it if it 275 // had no proof either way. 276 ChildUnwindDestToken = Memo->second; 277 if (!ChildUnwindDestToken) 278 continue; 279 } else { 280 // Not a relevant user of the cleanuppad 281 continue; 282 } 283 // In a well-formed program, the child/invoke must either unwind to 284 // an(other) child of the cleanup, or exit the cleanup. In the 285 // first case, continue searching. 286 if (isa<Instruction>(ChildUnwindDestToken) && 287 getParentPad(ChildUnwindDestToken) == CleanupPad) 288 continue; 289 UnwindDestToken = ChildUnwindDestToken; 290 break; 291 } 292 } 293 // If we haven't found an unwind dest for CurrentPad, we may have queued its 294 // children, so move on to the next in the worklist. 295 if (!UnwindDestToken) 296 continue; 297 298 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 299 // any ancestors of CurrentPad up to but not including UnwindDestToken's 300 // parent pad. Record this in the memo map, and check to see if the 301 // original EHPad being queried is one of the ones exited. 302 Value *UnwindParent; 303 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 304 UnwindParent = getParentPad(UnwindPad); 305 else 306 UnwindParent = nullptr; 307 bool ExitedOriginalPad = false; 308 for (Instruction *ExitedPad = CurrentPad; 309 ExitedPad && ExitedPad != UnwindParent; 310 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 311 // Skip over catchpads since they just follow their catchswitches. 312 if (isa<CatchPadInst>(ExitedPad)) 313 continue; 314 MemoMap[ExitedPad] = UnwindDestToken; 315 ExitedOriginalPad |= (ExitedPad == EHPad); 316 } 317 318 if (ExitedOriginalPad) 319 return UnwindDestToken; 320 321 // Continue the search. 322 } 323 324 // No definitive information is contained within this funclet. 325 return nullptr; 326 } 327 328 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 329 /// return that pad instruction. If it unwinds to caller, return 330 /// ConstantTokenNone. If it does not have a definitive unwind destination, 331 /// return nullptr. 332 /// 333 /// This routine gets invoked for calls in funclets in inlinees when inlining 334 /// an invoke. Since many funclets don't have calls inside them, it's queried 335 /// on-demand rather than building a map of pads to unwind dests up front. 336 /// Determining a funclet's unwind dest may require recursively searching its 337 /// descendants, and also ancestors and cousins if the descendants don't provide 338 /// an answer. Since most funclets will have their unwind dest immediately 339 /// available as the unwind dest of a catchswitch or cleanupret, this routine 340 /// searches top-down from the given pad and then up. To avoid worst-case 341 /// quadratic run-time given that approach, it uses a memo map to avoid 342 /// re-processing funclet trees. The callers that rewrite the IR as they go 343 /// take advantage of this, for correctness, by checking/forcing rewritten 344 /// pads' entries to match the original callee view. 345 static Value *getUnwindDestToken(Instruction *EHPad, 346 UnwindDestMemoTy &MemoMap) { 347 // Catchpads unwind to the same place as their catchswitch; 348 // redirct any queries on catchpads so the code below can 349 // deal with just catchswitches and cleanuppads. 350 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 351 EHPad = CPI->getCatchSwitch(); 352 353 // Check if we've already determined the unwind dest for this pad. 354 auto Memo = MemoMap.find(EHPad); 355 if (Memo != MemoMap.end()) 356 return Memo->second; 357 358 // Search EHPad and, if necessary, its descendants. 359 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 360 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 361 if (UnwindDestToken) 362 return UnwindDestToken; 363 364 // No information is available for this EHPad from itself or any of its 365 // descendants. An unwind all the way out to a pad in the caller would 366 // need also to agree with the unwind dest of the parent funclet, so 367 // search up the chain to try to find a funclet with information. Put 368 // null entries in the memo map to avoid re-processing as we go up. 369 MemoMap[EHPad] = nullptr; 370 #ifndef NDEBUG 371 SmallPtrSet<Instruction *, 4> TempMemos; 372 TempMemos.insert(EHPad); 373 #endif 374 Instruction *LastUselessPad = EHPad; 375 Value *AncestorToken; 376 for (AncestorToken = getParentPad(EHPad); 377 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 378 AncestorToken = getParentPad(AncestorToken)) { 379 // Skip over catchpads since they just follow their catchswitches. 380 if (isa<CatchPadInst>(AncestorPad)) 381 continue; 382 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 383 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 384 // call to getUnwindDestToken, that would mean that AncestorPad had no 385 // information in itself, its descendants, or its ancestors. If that 386 // were the case, then we should also have recorded the lack of information 387 // for the descendant that we're coming from. So assert that we don't 388 // find a null entry in the MemoMap for AncestorPad. 389 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 390 auto AncestorMemo = MemoMap.find(AncestorPad); 391 if (AncestorMemo == MemoMap.end()) { 392 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 393 } else { 394 UnwindDestToken = AncestorMemo->second; 395 } 396 if (UnwindDestToken) 397 break; 398 LastUselessPad = AncestorPad; 399 MemoMap[LastUselessPad] = nullptr; 400 #ifndef NDEBUG 401 TempMemos.insert(LastUselessPad); 402 #endif 403 } 404 405 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 406 // returned nullptr (and likewise for EHPad and any of its ancestors up to 407 // LastUselessPad), so LastUselessPad has no information from below. Since 408 // getUnwindDestTokenHelper must investigate all downward paths through 409 // no-information nodes to prove that a node has no information like this, 410 // and since any time it finds information it records it in the MemoMap for 411 // not just the immediately-containing funclet but also any ancestors also 412 // exited, it must be the case that, walking downward from LastUselessPad, 413 // visiting just those nodes which have not been mapped to an unwind dest 414 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 415 // they are just used to keep getUnwindDestTokenHelper from repeating work), 416 // any node visited must have been exhaustively searched with no information 417 // for it found. 418 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 419 while (!Worklist.empty()) { 420 Instruction *UselessPad = Worklist.pop_back_val(); 421 auto Memo = MemoMap.find(UselessPad); 422 if (Memo != MemoMap.end() && Memo->second) { 423 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 424 // that it is a funclet that does have information about unwinding to 425 // a particular destination; its parent was a useless pad. 426 // Since its parent has no information, the unwind edge must not escape 427 // the parent, and must target a sibling of this pad. This local unwind 428 // gives us no information about EHPad. Leave it and the subtree rooted 429 // at it alone. 430 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 431 continue; 432 } 433 // We know we don't have information for UselesPad. If it has an entry in 434 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 435 // added on this invocation of getUnwindDestToken; if a previous invocation 436 // recorded nullptr, it would have had to prove that the ancestors of 437 // UselessPad, which include LastUselessPad, had no information, and that 438 // in turn would have required proving that the descendants of 439 // LastUselesPad, which include EHPad, have no information about 440 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 441 // the MemoMap on that invocation, which isn't the case if we got here. 442 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 443 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 444 // information that we'd be contradicting by making a map entry for it 445 // (which is something that getUnwindDestTokenHelper must have proved for 446 // us to get here). Just assert on is direct users here; the checks in 447 // this downward walk at its descendants will verify that they don't have 448 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 449 // unwind edges or unwind to a sibling). 450 MemoMap[UselessPad] = UnwindDestToken; 451 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 452 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 453 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 454 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 455 for (User *U : CatchPad->users()) { 456 assert( 457 (!isa<InvokeInst>(U) || 458 (getParentPad( 459 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 460 CatchPad)) && 461 "Expected useless pad"); 462 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 463 Worklist.push_back(cast<Instruction>(U)); 464 } 465 } 466 } else { 467 assert(isa<CleanupPadInst>(UselessPad)); 468 for (User *U : UselessPad->users()) { 469 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 470 assert((!isa<InvokeInst>(U) || 471 (getParentPad( 472 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 473 UselessPad)) && 474 "Expected useless pad"); 475 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 476 Worklist.push_back(cast<Instruction>(U)); 477 } 478 } 479 } 480 481 return UnwindDestToken; 482 } 483 484 /// When we inline a basic block into an invoke, 485 /// we have to turn all of the calls that can throw into invokes. 486 /// This function analyze BB to see if there are any calls, and if so, 487 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 488 /// nodes in that block with the values specified in InvokeDestPHIValues. 489 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 490 BasicBlock *BB, BasicBlock *UnwindEdge, 491 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 492 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 493 Instruction *I = &*BBI++; 494 495 // We only need to check for function calls: inlined invoke 496 // instructions require no special handling. 497 CallInst *CI = dyn_cast<CallInst>(I); 498 499 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 500 continue; 501 502 // We do not need to (and in fact, cannot) convert possibly throwing calls 503 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 504 // invokes. The caller's "segment" of the deoptimization continuation 505 // attached to the newly inlined @llvm.experimental_deoptimize 506 // (resp. @llvm.experimental.guard) call should contain the exception 507 // handling logic, if any. 508 if (auto *F = CI->getCalledFunction()) 509 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 510 F->getIntrinsicID() == Intrinsic::experimental_guard) 511 continue; 512 513 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 514 // This call is nested inside a funclet. If that funclet has an unwind 515 // destination within the inlinee, then unwinding out of this call would 516 // be UB. Rewriting this call to an invoke which targets the inlined 517 // invoke's unwind dest would give the call's parent funclet multiple 518 // unwind destinations, which is something that subsequent EH table 519 // generation can't handle and that the veirifer rejects. So when we 520 // see such a call, leave it as a call. 521 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 522 Value *UnwindDestToken = 523 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 524 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 525 continue; 526 #ifndef NDEBUG 527 Instruction *MemoKey; 528 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 529 MemoKey = CatchPad->getCatchSwitch(); 530 else 531 MemoKey = FuncletPad; 532 assert(FuncletUnwindMap->count(MemoKey) && 533 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 534 "must get memoized to avoid confusing later searches"); 535 #endif // NDEBUG 536 } 537 538 // Convert this function call into an invoke instruction. First, split the 539 // basic block. 540 BasicBlock *Split = 541 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc"); 542 543 // Delete the unconditional branch inserted by splitBasicBlock 544 BB->getInstList().pop_back(); 545 546 // Create the new invoke instruction. 547 SmallVector<Value*, 8> InvokeArgs(CI->arg_begin(), CI->arg_end()); 548 SmallVector<OperandBundleDef, 1> OpBundles; 549 550 CI->getOperandBundlesAsDefs(OpBundles); 551 552 // Note: we're round tripping operand bundles through memory here, and that 553 // can potentially be avoided with a cleverer API design that we do not have 554 // as of this time. 555 556 InvokeInst *II = 557 InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge, InvokeArgs, 558 OpBundles, CI->getName(), BB); 559 II->setDebugLoc(CI->getDebugLoc()); 560 II->setCallingConv(CI->getCallingConv()); 561 II->setAttributes(CI->getAttributes()); 562 563 // Make sure that anything using the call now uses the invoke! This also 564 // updates the CallGraph if present, because it uses a WeakVH. 565 CI->replaceAllUsesWith(II); 566 567 // Delete the original call 568 Split->getInstList().pop_front(); 569 return BB; 570 } 571 return nullptr; 572 } 573 574 /// If we inlined an invoke site, we need to convert calls 575 /// in the body of the inlined function into invokes. 576 /// 577 /// II is the invoke instruction being inlined. FirstNewBlock is the first 578 /// block of the inlined code (the last block is the end of the function), 579 /// and InlineCodeInfo is information about the code that got inlined. 580 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 581 ClonedCodeInfo &InlinedCodeInfo) { 582 BasicBlock *InvokeDest = II->getUnwindDest(); 583 584 Function *Caller = FirstNewBlock->getParent(); 585 586 // The inlined code is currently at the end of the function, scan from the 587 // start of the inlined code to its end, checking for stuff we need to 588 // rewrite. 589 LandingPadInliningInfo Invoke(II); 590 591 // Get all of the inlined landing pad instructions. 592 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 593 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 594 I != E; ++I) 595 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 596 InlinedLPads.insert(II->getLandingPadInst()); 597 598 // Append the clauses from the outer landing pad instruction into the inlined 599 // landing pad instructions. 600 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 601 for (LandingPadInst *InlinedLPad : InlinedLPads) { 602 unsigned OuterNum = OuterLPad->getNumClauses(); 603 InlinedLPad->reserveClauses(OuterNum); 604 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 605 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 606 if (OuterLPad->isCleanup()) 607 InlinedLPad->setCleanup(true); 608 } 609 610 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 611 BB != E; ++BB) { 612 if (InlinedCodeInfo.ContainsCalls) 613 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 614 &*BB, Invoke.getOuterResumeDest())) 615 // Update any PHI nodes in the exceptional block to indicate that there 616 // is now a new entry in them. 617 Invoke.addIncomingPHIValuesFor(NewBB); 618 619 // Forward any resumes that are remaining here. 620 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 621 Invoke.forwardResume(RI, InlinedLPads); 622 } 623 624 // Now that everything is happy, we have one final detail. The PHI nodes in 625 // the exception destination block still have entries due to the original 626 // invoke instruction. Eliminate these entries (which might even delete the 627 // PHI node) now. 628 InvokeDest->removePredecessor(II->getParent()); 629 } 630 631 /// If we inlined an invoke site, we need to convert calls 632 /// in the body of the inlined function into invokes. 633 /// 634 /// II is the invoke instruction being inlined. FirstNewBlock is the first 635 /// block of the inlined code (the last block is the end of the function), 636 /// and InlineCodeInfo is information about the code that got inlined. 637 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 638 ClonedCodeInfo &InlinedCodeInfo) { 639 BasicBlock *UnwindDest = II->getUnwindDest(); 640 Function *Caller = FirstNewBlock->getParent(); 641 642 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 643 644 // If there are PHI nodes in the unwind destination block, we need to keep 645 // track of which values came into them from the invoke before removing the 646 // edge from this block. 647 SmallVector<Value *, 8> UnwindDestPHIValues; 648 llvm::BasicBlock *InvokeBB = II->getParent(); 649 for (Instruction &I : *UnwindDest) { 650 // Save the value to use for this edge. 651 PHINode *PHI = dyn_cast<PHINode>(&I); 652 if (!PHI) 653 break; 654 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 655 } 656 657 // Add incoming-PHI values to the unwind destination block for the given basic 658 // block, using the values for the original invoke's source block. 659 auto UpdatePHINodes = [&](BasicBlock *Src) { 660 BasicBlock::iterator I = UnwindDest->begin(); 661 for (Value *V : UnwindDestPHIValues) { 662 PHINode *PHI = cast<PHINode>(I); 663 PHI->addIncoming(V, Src); 664 ++I; 665 } 666 }; 667 668 // This connects all the instructions which 'unwind to caller' to the invoke 669 // destination. 670 UnwindDestMemoTy FuncletUnwindMap; 671 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 672 BB != E; ++BB) { 673 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 674 if (CRI->unwindsToCaller()) { 675 auto *CleanupPad = CRI->getCleanupPad(); 676 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 677 CRI->eraseFromParent(); 678 UpdatePHINodes(&*BB); 679 // Finding a cleanupret with an unwind destination would confuse 680 // subsequent calls to getUnwindDestToken, so map the cleanuppad 681 // to short-circuit any such calls and recognize this as an "unwind 682 // to caller" cleanup. 683 assert(!FuncletUnwindMap.count(CleanupPad) || 684 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 685 FuncletUnwindMap[CleanupPad] = 686 ConstantTokenNone::get(Caller->getContext()); 687 } 688 } 689 690 Instruction *I = BB->getFirstNonPHI(); 691 if (!I->isEHPad()) 692 continue; 693 694 Instruction *Replacement = nullptr; 695 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 696 if (CatchSwitch->unwindsToCaller()) { 697 Value *UnwindDestToken; 698 if (auto *ParentPad = 699 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 700 // This catchswitch is nested inside another funclet. If that 701 // funclet has an unwind destination within the inlinee, then 702 // unwinding out of this catchswitch would be UB. Rewriting this 703 // catchswitch to unwind to the inlined invoke's unwind dest would 704 // give the parent funclet multiple unwind destinations, which is 705 // something that subsequent EH table generation can't handle and 706 // that the veirifer rejects. So when we see such a call, leave it 707 // as "unwind to caller". 708 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 709 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 710 continue; 711 } else { 712 // This catchswitch has no parent to inherit constraints from, and 713 // none of its descendants can have an unwind edge that exits it and 714 // targets another funclet in the inlinee. It may or may not have a 715 // descendant that definitively has an unwind to caller. In either 716 // case, we'll have to assume that any unwinds out of it may need to 717 // be routed to the caller, so treat it as though it has a definitive 718 // unwind to caller. 719 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 720 } 721 auto *NewCatchSwitch = CatchSwitchInst::Create( 722 CatchSwitch->getParentPad(), UnwindDest, 723 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 724 CatchSwitch); 725 for (BasicBlock *PadBB : CatchSwitch->handlers()) 726 NewCatchSwitch->addHandler(PadBB); 727 // Propagate info for the old catchswitch over to the new one in 728 // the unwind map. This also serves to short-circuit any subsequent 729 // checks for the unwind dest of this catchswitch, which would get 730 // confused if they found the outer handler in the callee. 731 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 732 Replacement = NewCatchSwitch; 733 } 734 } else if (!isa<FuncletPadInst>(I)) { 735 llvm_unreachable("unexpected EHPad!"); 736 } 737 738 if (Replacement) { 739 Replacement->takeName(I); 740 I->replaceAllUsesWith(Replacement); 741 I->eraseFromParent(); 742 UpdatePHINodes(&*BB); 743 } 744 } 745 746 if (InlinedCodeInfo.ContainsCalls) 747 for (Function::iterator BB = FirstNewBlock->getIterator(), 748 E = Caller->end(); 749 BB != E; ++BB) 750 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 751 &*BB, UnwindDest, &FuncletUnwindMap)) 752 // Update any PHI nodes in the exceptional block to indicate that there 753 // is now a new entry in them. 754 UpdatePHINodes(NewBB); 755 756 // Now that everything is happy, we have one final detail. The PHI nodes in 757 // the exception destination block still have entries due to the original 758 // invoke instruction. Eliminate these entries (which might even delete the 759 // PHI node) now. 760 UnwindDest->removePredecessor(InvokeBB); 761 } 762 763 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata, 764 /// that metadata should be propagated to all memory-accessing cloned 765 /// instructions. 766 static void PropagateParallelLoopAccessMetadata(CallSite CS, 767 ValueToValueMapTy &VMap) { 768 MDNode *M = 769 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 770 if (!M) 771 return; 772 773 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 774 VMI != VMIE; ++VMI) { 775 if (!VMI->second) 776 continue; 777 778 Instruction *NI = dyn_cast<Instruction>(VMI->second); 779 if (!NI) 780 continue; 781 782 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) { 783 M = MDNode::concatenate(PM, M); 784 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 785 } else if (NI->mayReadOrWriteMemory()) { 786 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 787 } 788 } 789 } 790 791 /// When inlining a function that contains noalias scope metadata, 792 /// this metadata needs to be cloned so that the inlined blocks 793 /// have different "unqiue scopes" at every call site. Were this not done, then 794 /// aliasing scopes from a function inlined into a caller multiple times could 795 /// not be differentiated (and this would lead to miscompiles because the 796 /// non-aliasing property communicated by the metadata could have 797 /// call-site-specific control dependencies). 798 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 799 const Function *CalledFunc = CS.getCalledFunction(); 800 SetVector<const MDNode *> MD; 801 802 // Note: We could only clone the metadata if it is already used in the 803 // caller. I'm omitting that check here because it might confuse 804 // inter-procedural alias analysis passes. We can revisit this if it becomes 805 // an efficiency or overhead problem. 806 807 for (const BasicBlock &I : *CalledFunc) 808 for (const Instruction &J : I) { 809 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope)) 810 MD.insert(M); 811 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias)) 812 MD.insert(M); 813 } 814 815 if (MD.empty()) 816 return; 817 818 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 819 // the set. 820 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 821 while (!Queue.empty()) { 822 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 823 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 824 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 825 if (MD.insert(M1)) 826 Queue.push_back(M1); 827 } 828 829 // Now we have a complete set of all metadata in the chains used to specify 830 // the noalias scopes and the lists of those scopes. 831 SmallVector<TempMDTuple, 16> DummyNodes; 832 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap; 833 for (const MDNode *I : MD) { 834 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None)); 835 MDMap[I].reset(DummyNodes.back().get()); 836 } 837 838 // Create new metadata nodes to replace the dummy nodes, replacing old 839 // metadata references with either a dummy node or an already-created new 840 // node. 841 for (const MDNode *I : MD) { 842 SmallVector<Metadata *, 4> NewOps; 843 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) { 844 const Metadata *V = I->getOperand(i); 845 if (const MDNode *M = dyn_cast<MDNode>(V)) 846 NewOps.push_back(MDMap[M]); 847 else 848 NewOps.push_back(const_cast<Metadata *>(V)); 849 } 850 851 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps); 852 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 853 assert(TempM->isTemporary() && "Expected temporary node"); 854 855 TempM->replaceAllUsesWith(NewM); 856 } 857 858 // Now replace the metadata in the new inlined instructions with the 859 // repacements from the map. 860 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 861 VMI != VMIE; ++VMI) { 862 if (!VMI->second) 863 continue; 864 865 Instruction *NI = dyn_cast<Instruction>(VMI->second); 866 if (!NI) 867 continue; 868 869 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 870 MDNode *NewMD = MDMap[M]; 871 // If the call site also had alias scope metadata (a list of scopes to 872 // which instructions inside it might belong), propagate those scopes to 873 // the inlined instructions. 874 if (MDNode *CSM = 875 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 876 NewMD = MDNode::concatenate(NewMD, CSM); 877 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 878 } else if (NI->mayReadOrWriteMemory()) { 879 if (MDNode *M = 880 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 881 NI->setMetadata(LLVMContext::MD_alias_scope, M); 882 } 883 884 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 885 MDNode *NewMD = MDMap[M]; 886 // If the call site also had noalias metadata (a list of scopes with 887 // which instructions inside it don't alias), propagate those scopes to 888 // the inlined instructions. 889 if (MDNode *CSM = 890 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 891 NewMD = MDNode::concatenate(NewMD, CSM); 892 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 893 } else if (NI->mayReadOrWriteMemory()) { 894 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 895 NI->setMetadata(LLVMContext::MD_noalias, M); 896 } 897 } 898 } 899 900 /// If the inlined function has noalias arguments, 901 /// then add new alias scopes for each noalias argument, tag the mapped noalias 902 /// parameters with noalias metadata specifying the new scope, and tag all 903 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 904 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 905 const DataLayout &DL, AAResults *CalleeAAR) { 906 if (!EnableNoAliasConversion) 907 return; 908 909 const Function *CalledFunc = CS.getCalledFunction(); 910 SmallVector<const Argument *, 4> NoAliasArgs; 911 912 for (const Argument &Arg : CalledFunc->args()) 913 if (Arg.hasNoAliasAttr() && !Arg.use_empty()) 914 NoAliasArgs.push_back(&Arg); 915 916 if (NoAliasArgs.empty()) 917 return; 918 919 // To do a good job, if a noalias variable is captured, we need to know if 920 // the capture point dominates the particular use we're considering. 921 DominatorTree DT; 922 DT.recalculate(const_cast<Function&>(*CalledFunc)); 923 924 // noalias indicates that pointer values based on the argument do not alias 925 // pointer values which are not based on it. So we add a new "scope" for each 926 // noalias function argument. Accesses using pointers based on that argument 927 // become part of that alias scope, accesses using pointers not based on that 928 // argument are tagged as noalias with that scope. 929 930 DenseMap<const Argument *, MDNode *> NewScopes; 931 MDBuilder MDB(CalledFunc->getContext()); 932 933 // Create a new scope domain for this function. 934 MDNode *NewDomain = 935 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 936 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 937 const Argument *A = NoAliasArgs[i]; 938 939 std::string Name = CalledFunc->getName(); 940 if (A->hasName()) { 941 Name += ": %"; 942 Name += A->getName(); 943 } else { 944 Name += ": argument "; 945 Name += utostr(i); 946 } 947 948 // Note: We always create a new anonymous root here. This is true regardless 949 // of the linkage of the callee because the aliasing "scope" is not just a 950 // property of the callee, but also all control dependencies in the caller. 951 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 952 NewScopes.insert(std::make_pair(A, NewScope)); 953 } 954 955 // Iterate over all new instructions in the map; for all memory-access 956 // instructions, add the alias scope metadata. 957 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 958 VMI != VMIE; ++VMI) { 959 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 960 if (!VMI->second) 961 continue; 962 963 Instruction *NI = dyn_cast<Instruction>(VMI->second); 964 if (!NI) 965 continue; 966 967 bool IsArgMemOnlyCall = false, IsFuncCall = false; 968 SmallVector<const Value *, 2> PtrArgs; 969 970 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 971 PtrArgs.push_back(LI->getPointerOperand()); 972 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 973 PtrArgs.push_back(SI->getPointerOperand()); 974 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 975 PtrArgs.push_back(VAAI->getPointerOperand()); 976 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 977 PtrArgs.push_back(CXI->getPointerOperand()); 978 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 979 PtrArgs.push_back(RMWI->getPointerOperand()); 980 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 981 // If we know that the call does not access memory, then we'll still 982 // know that about the inlined clone of this call site, and we don't 983 // need to add metadata. 984 if (ICS.doesNotAccessMemory()) 985 continue; 986 987 IsFuncCall = true; 988 if (CalleeAAR) { 989 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); 990 if (MRB == FMRB_OnlyAccessesArgumentPointees || 991 MRB == FMRB_OnlyReadsArgumentPointees) 992 IsArgMemOnlyCall = true; 993 } 994 995 for (Value *Arg : ICS.args()) { 996 // We need to check the underlying objects of all arguments, not just 997 // the pointer arguments, because we might be passing pointers as 998 // integers, etc. 999 // However, if we know that the call only accesses pointer arguments, 1000 // then we only need to check the pointer arguments. 1001 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy()) 1002 continue; 1003 1004 PtrArgs.push_back(Arg); 1005 } 1006 } 1007 1008 // If we found no pointers, then this instruction is not suitable for 1009 // pairing with an instruction to receive aliasing metadata. 1010 // However, if this is a call, this we might just alias with none of the 1011 // noalias arguments. 1012 if (PtrArgs.empty() && !IsFuncCall) 1013 continue; 1014 1015 // It is possible that there is only one underlying object, but you 1016 // need to go through several PHIs to see it, and thus could be 1017 // repeated in the Objects list. 1018 SmallPtrSet<const Value *, 4> ObjSet; 1019 SmallVector<Metadata *, 4> Scopes, NoAliases; 1020 1021 SmallSetVector<const Argument *, 4> NAPtrArgs; 1022 for (const Value *V : PtrArgs) { 1023 SmallVector<Value *, 4> Objects; 1024 GetUnderlyingObjects(const_cast<Value*>(V), 1025 Objects, DL, /* LI = */ nullptr); 1026 1027 for (Value *O : Objects) 1028 ObjSet.insert(O); 1029 } 1030 1031 // Figure out if we're derived from anything that is not a noalias 1032 // argument. 1033 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 1034 for (const Value *V : ObjSet) { 1035 // Is this value a constant that cannot be derived from any pointer 1036 // value (we need to exclude constant expressions, for example, that 1037 // are formed from arithmetic on global symbols). 1038 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1039 isa<ConstantPointerNull>(V) || 1040 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1041 if (IsNonPtrConst) 1042 continue; 1043 1044 // If this is anything other than a noalias argument, then we cannot 1045 // completely describe the aliasing properties using alias.scope 1046 // metadata (and, thus, won't add any). 1047 if (const Argument *A = dyn_cast<Argument>(V)) { 1048 if (!A->hasNoAliasAttr()) 1049 UsesAliasingPtr = true; 1050 } else { 1051 UsesAliasingPtr = true; 1052 } 1053 1054 // If this is not some identified function-local object (which cannot 1055 // directly alias a noalias argument), or some other argument (which, 1056 // by definition, also cannot alias a noalias argument), then we could 1057 // alias a noalias argument that has been captured). 1058 if (!isa<Argument>(V) && 1059 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 1060 CanDeriveViaCapture = true; 1061 } 1062 1063 // A function call can always get captured noalias pointers (via other 1064 // parameters, globals, etc.). 1065 if (IsFuncCall && !IsArgMemOnlyCall) 1066 CanDeriveViaCapture = true; 1067 1068 // First, we want to figure out all of the sets with which we definitely 1069 // don't alias. Iterate over all noalias set, and add those for which: 1070 // 1. The noalias argument is not in the set of objects from which we 1071 // definitely derive. 1072 // 2. The noalias argument has not yet been captured. 1073 // An arbitrary function that might load pointers could see captured 1074 // noalias arguments via other noalias arguments or globals, and so we 1075 // must always check for prior capture. 1076 for (const Argument *A : NoAliasArgs) { 1077 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 1078 // It might be tempting to skip the 1079 // PointerMayBeCapturedBefore check if 1080 // A->hasNoCaptureAttr() is true, but this is 1081 // incorrect because nocapture only guarantees 1082 // that no copies outlive the function, not 1083 // that the value cannot be locally captured. 1084 !PointerMayBeCapturedBefore(A, 1085 /* ReturnCaptures */ false, 1086 /* StoreCaptures */ false, I, &DT))) 1087 NoAliases.push_back(NewScopes[A]); 1088 } 1089 1090 if (!NoAliases.empty()) 1091 NI->setMetadata(LLVMContext::MD_noalias, 1092 MDNode::concatenate( 1093 NI->getMetadata(LLVMContext::MD_noalias), 1094 MDNode::get(CalledFunc->getContext(), NoAliases))); 1095 1096 // Next, we want to figure out all of the sets to which we might belong. 1097 // We might belong to a set if the noalias argument is in the set of 1098 // underlying objects. If there is some non-noalias argument in our list 1099 // of underlying objects, then we cannot add a scope because the fact 1100 // that some access does not alias with any set of our noalias arguments 1101 // cannot itself guarantee that it does not alias with this access 1102 // (because there is some pointer of unknown origin involved and the 1103 // other access might also depend on this pointer). We also cannot add 1104 // scopes to arbitrary functions unless we know they don't access any 1105 // non-parameter pointer-values. 1106 bool CanAddScopes = !UsesAliasingPtr; 1107 if (CanAddScopes && IsFuncCall) 1108 CanAddScopes = IsArgMemOnlyCall; 1109 1110 if (CanAddScopes) 1111 for (const Argument *A : NoAliasArgs) { 1112 if (ObjSet.count(A)) 1113 Scopes.push_back(NewScopes[A]); 1114 } 1115 1116 if (!Scopes.empty()) 1117 NI->setMetadata( 1118 LLVMContext::MD_alias_scope, 1119 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1120 MDNode::get(CalledFunc->getContext(), Scopes))); 1121 } 1122 } 1123 } 1124 1125 /// If the inlined function has non-byval align arguments, then 1126 /// add @llvm.assume-based alignment assumptions to preserve this information. 1127 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 1128 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1129 return; 1130 AssumptionCache *AC = IFI.GetAssumptionCache 1131 ? &(*IFI.GetAssumptionCache)(*CS.getCaller()) 1132 : nullptr; 1133 auto &DL = CS.getCaller()->getParent()->getDataLayout(); 1134 1135 // To avoid inserting redundant assumptions, we should check for assumptions 1136 // already in the caller. To do this, we might need a DT of the caller. 1137 DominatorTree DT; 1138 bool DTCalculated = false; 1139 1140 Function *CalledFunc = CS.getCalledFunction(); 1141 for (Function::arg_iterator I = CalledFunc->arg_begin(), 1142 E = CalledFunc->arg_end(); 1143 I != E; ++I) { 1144 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0; 1145 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) { 1146 if (!DTCalculated) { 1147 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent() 1148 ->getParent())); 1149 DTCalculated = true; 1150 } 1151 1152 // If we can already prove the asserted alignment in the context of the 1153 // caller, then don't bother inserting the assumption. 1154 Value *Arg = CS.getArgument(I->getArgNo()); 1155 if (getKnownAlignment(Arg, DL, CS.getInstruction(), AC, &DT) >= Align) 1156 continue; 1157 1158 CallInst *NewAssumption = IRBuilder<>(CS.getInstruction()) 1159 .CreateAlignmentAssumption(DL, Arg, Align); 1160 if (AC) 1161 AC->registerAssumption(NewAssumption); 1162 } 1163 } 1164 } 1165 1166 /// Once we have cloned code over from a callee into the caller, 1167 /// update the specified callgraph to reflect the changes we made. 1168 /// Note that it's possible that not all code was copied over, so only 1169 /// some edges of the callgraph may remain. 1170 static void UpdateCallGraphAfterInlining(CallSite CS, 1171 Function::iterator FirstNewBlock, 1172 ValueToValueMapTy &VMap, 1173 InlineFunctionInfo &IFI) { 1174 CallGraph &CG = *IFI.CG; 1175 const Function *Caller = CS.getInstruction()->getParent()->getParent(); 1176 const Function *Callee = CS.getCalledFunction(); 1177 CallGraphNode *CalleeNode = CG[Callee]; 1178 CallGraphNode *CallerNode = CG[Caller]; 1179 1180 // Since we inlined some uninlined call sites in the callee into the caller, 1181 // add edges from the caller to all of the callees of the callee. 1182 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 1183 1184 // Consider the case where CalleeNode == CallerNode. 1185 CallGraphNode::CalledFunctionsVector CallCache; 1186 if (CalleeNode == CallerNode) { 1187 CallCache.assign(I, E); 1188 I = CallCache.begin(); 1189 E = CallCache.end(); 1190 } 1191 1192 for (; I != E; ++I) { 1193 const Value *OrigCall = I->first; 1194 1195 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 1196 // Only copy the edge if the call was inlined! 1197 if (VMI == VMap.end() || VMI->second == nullptr) 1198 continue; 1199 1200 // If the call was inlined, but then constant folded, there is no edge to 1201 // add. Check for this case. 1202 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 1203 if (!NewCall) 1204 continue; 1205 1206 // We do not treat intrinsic calls like real function calls because we 1207 // expect them to become inline code; do not add an edge for an intrinsic. 1208 CallSite CS = CallSite(NewCall); 1209 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic()) 1210 continue; 1211 1212 // Remember that this call site got inlined for the client of 1213 // InlineFunction. 1214 IFI.InlinedCalls.push_back(NewCall); 1215 1216 // It's possible that inlining the callsite will cause it to go from an 1217 // indirect to a direct call by resolving a function pointer. If this 1218 // happens, set the callee of the new call site to a more precise 1219 // destination. This can also happen if the call graph node of the caller 1220 // was just unnecessarily imprecise. 1221 if (!I->second->getFunction()) 1222 if (Function *F = CallSite(NewCall).getCalledFunction()) { 1223 // Indirect call site resolved to direct call. 1224 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 1225 1226 continue; 1227 } 1228 1229 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 1230 } 1231 1232 // Update the call graph by deleting the edge from Callee to Caller. We must 1233 // do this after the loop above in case Caller and Callee are the same. 1234 CallerNode->removeCallEdgeFor(CS); 1235 } 1236 1237 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 1238 BasicBlock *InsertBlock, 1239 InlineFunctionInfo &IFI) { 1240 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 1241 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1242 1243 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); 1244 1245 // Always generate a memcpy of alignment 1 here because we don't know 1246 // the alignment of the src pointer. Other optimizations can infer 1247 // better alignment. 1248 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 1249 } 1250 1251 /// When inlining a call site that has a byval argument, 1252 /// we have to make the implicit memcpy explicit by adding it. 1253 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 1254 const Function *CalledFunc, 1255 InlineFunctionInfo &IFI, 1256 unsigned ByValAlignment) { 1257 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 1258 Type *AggTy = ArgTy->getElementType(); 1259 1260 Function *Caller = TheCall->getParent()->getParent(); 1261 1262 // If the called function is readonly, then it could not mutate the caller's 1263 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1264 // temporary. 1265 if (CalledFunc->onlyReadsMemory()) { 1266 // If the byval argument has a specified alignment that is greater than the 1267 // passed in pointer, then we either have to round up the input pointer or 1268 // give up on this transformation. 1269 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 1270 return Arg; 1271 1272 AssumptionCache *AC = 1273 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 1274 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1275 1276 // If the pointer is already known to be sufficiently aligned, or if we can 1277 // round it up to a larger alignment, then we don't need a temporary. 1278 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >= 1279 ByValAlignment) 1280 return Arg; 1281 1282 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1283 // for code quality, but rarely happens and is required for correctness. 1284 } 1285 1286 // Create the alloca. If we have DataLayout, use nice alignment. 1287 unsigned Align = 1288 Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy); 1289 1290 // If the byval had an alignment specified, we *must* use at least that 1291 // alignment, as it is required by the byval argument (and uses of the 1292 // pointer inside the callee). 1293 Align = std::max(Align, ByValAlignment); 1294 1295 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(), 1296 &*Caller->begin()->begin()); 1297 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1298 1299 // Uses of the argument in the function should use our new alloca 1300 // instead. 1301 return NewAlloca; 1302 } 1303 1304 // Check whether this Value is used by a lifetime intrinsic. 1305 static bool isUsedByLifetimeMarker(Value *V) { 1306 for (User *U : V->users()) { 1307 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1308 switch (II->getIntrinsicID()) { 1309 default: break; 1310 case Intrinsic::lifetime_start: 1311 case Intrinsic::lifetime_end: 1312 return true; 1313 } 1314 } 1315 } 1316 return false; 1317 } 1318 1319 // Check whether the given alloca already has 1320 // lifetime.start or lifetime.end intrinsics. 1321 static bool hasLifetimeMarkers(AllocaInst *AI) { 1322 Type *Ty = AI->getType(); 1323 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1324 Ty->getPointerAddressSpace()); 1325 if (Ty == Int8PtrTy) 1326 return isUsedByLifetimeMarker(AI); 1327 1328 // Do a scan to find all the casts to i8*. 1329 for (User *U : AI->users()) { 1330 if (U->getType() != Int8PtrTy) continue; 1331 if (U->stripPointerCasts() != AI) continue; 1332 if (isUsedByLifetimeMarker(U)) 1333 return true; 1334 } 1335 return false; 1336 } 1337 1338 /// Rebuild the entire inlined-at chain for this instruction so that the top of 1339 /// the chain now is inlined-at the new call site. 1340 static DebugLoc 1341 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode, 1342 LLVMContext &Ctx, 1343 DenseMap<const DILocation *, DILocation *> &IANodes) { 1344 SmallVector<DILocation *, 3> InlinedAtLocations; 1345 DILocation *Last = InlinedAtNode; 1346 DILocation *CurInlinedAt = DL; 1347 1348 // Gather all the inlined-at nodes 1349 while (DILocation *IA = CurInlinedAt->getInlinedAt()) { 1350 // Skip any we've already built nodes for 1351 if (DILocation *Found = IANodes[IA]) { 1352 Last = Found; 1353 break; 1354 } 1355 1356 InlinedAtLocations.push_back(IA); 1357 CurInlinedAt = IA; 1358 } 1359 1360 // Starting from the top, rebuild the nodes to point to the new inlined-at 1361 // location (then rebuilding the rest of the chain behind it) and update the 1362 // map of already-constructed inlined-at nodes. 1363 for (const DILocation *MD : reverse(InlinedAtLocations)) { 1364 Last = IANodes[MD] = DILocation::getDistinct( 1365 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last); 1366 } 1367 1368 // And finally create the normal location for this instruction, referring to 1369 // the new inlined-at chain. 1370 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last); 1371 } 1372 1373 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1374 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1375 /// cannot be static. 1376 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1377 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1378 } 1379 1380 /// Update inlined instructions' line numbers to 1381 /// to encode location where these instructions are inlined. 1382 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 1383 Instruction *TheCall) { 1384 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1385 if (!TheCallDL) 1386 return; 1387 1388 auto &Ctx = Fn->getContext(); 1389 DILocation *InlinedAtNode = TheCallDL; 1390 1391 // Create a unique call site, not to be confused with any other call from the 1392 // same location. 1393 InlinedAtNode = DILocation::getDistinct( 1394 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 1395 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 1396 1397 // Cache the inlined-at nodes as they're built so they are reused, without 1398 // this every instruction's inlined-at chain would become distinct from each 1399 // other. 1400 DenseMap<const DILocation *, DILocation *> IANodes; 1401 1402 for (; FI != Fn->end(); ++FI) { 1403 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1404 BI != BE; ++BI) { 1405 DebugLoc DL = BI->getDebugLoc(); 1406 if (!DL) { 1407 // If the inlined instruction has no line number, make it look as if it 1408 // originates from the call location. This is important for 1409 // ((__always_inline__, __nodebug__)) functions which must use caller 1410 // location for all instructions in their function body. 1411 1412 // Don't update static allocas, as they may get moved later. 1413 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1414 if (allocaWouldBeStaticInEntry(AI)) 1415 continue; 1416 1417 BI->setDebugLoc(TheCallDL); 1418 } else { 1419 BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes)); 1420 } 1421 } 1422 } 1423 } 1424 1425 /// This function inlines the called function into the basic block of the 1426 /// caller. This returns false if it is not possible to inline this call. 1427 /// The program is still in a well defined state if this occurs though. 1428 /// 1429 /// Note that this only does one level of inlining. For example, if the 1430 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1431 /// exists in the instruction stream. Similarly this will inline a recursive 1432 /// function by one level. 1433 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 1434 AAResults *CalleeAAR, bool InsertLifetime) { 1435 Instruction *TheCall = CS.getInstruction(); 1436 assert(TheCall->getParent() && TheCall->getParent()->getParent() && 1437 "Instruction not in function!"); 1438 1439 // If IFI has any state in it, zap it before we fill it in. 1440 IFI.reset(); 1441 1442 const Function *CalledFunc = CS.getCalledFunction(); 1443 if (!CalledFunc || // Can't inline external function or indirect 1444 CalledFunc->isDeclaration() || // call, or call to a vararg function! 1445 CalledFunc->getFunctionType()->isVarArg()) return false; 1446 1447 // The inliner does not know how to inline through calls with operand bundles 1448 // in general ... 1449 if (CS.hasOperandBundles()) { 1450 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { 1451 uint32_t Tag = CS.getOperandBundleAt(i).getTagID(); 1452 // ... but it knows how to inline through "deopt" operand bundles ... 1453 if (Tag == LLVMContext::OB_deopt) 1454 continue; 1455 // ... and "funclet" operand bundles. 1456 if (Tag == LLVMContext::OB_funclet) 1457 continue; 1458 1459 return false; 1460 } 1461 } 1462 1463 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1464 // calls that we inline. 1465 bool MarkNoUnwind = CS.doesNotThrow(); 1466 1467 BasicBlock *OrigBB = TheCall->getParent(); 1468 Function *Caller = OrigBB->getParent(); 1469 1470 // GC poses two hazards to inlining, which only occur when the callee has GC: 1471 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1472 // caller. 1473 // 2. If the caller has a differing GC, it is invalid to inline. 1474 if (CalledFunc->hasGC()) { 1475 if (!Caller->hasGC()) 1476 Caller->setGC(CalledFunc->getGC()); 1477 else if (CalledFunc->getGC() != Caller->getGC()) 1478 return false; 1479 } 1480 1481 // Get the personality function from the callee if it contains a landing pad. 1482 Constant *CalledPersonality = 1483 CalledFunc->hasPersonalityFn() 1484 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1485 : nullptr; 1486 1487 // Find the personality function used by the landing pads of the caller. If it 1488 // exists, then check to see that it matches the personality function used in 1489 // the callee. 1490 Constant *CallerPersonality = 1491 Caller->hasPersonalityFn() 1492 ? Caller->getPersonalityFn()->stripPointerCasts() 1493 : nullptr; 1494 if (CalledPersonality) { 1495 if (!CallerPersonality) 1496 Caller->setPersonalityFn(CalledPersonality); 1497 // If the personality functions match, then we can perform the 1498 // inlining. Otherwise, we can't inline. 1499 // TODO: This isn't 100% true. Some personality functions are proper 1500 // supersets of others and can be used in place of the other. 1501 else if (CalledPersonality != CallerPersonality) 1502 return false; 1503 } 1504 1505 // We need to figure out which funclet the callsite was in so that we may 1506 // properly nest the callee. 1507 Instruction *CallSiteEHPad = nullptr; 1508 if (CallerPersonality) { 1509 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1510 if (isFuncletEHPersonality(Personality)) { 1511 Optional<OperandBundleUse> ParentFunclet = 1512 CS.getOperandBundle(LLVMContext::OB_funclet); 1513 if (ParentFunclet) 1514 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1515 1516 // OK, the inlining site is legal. What about the target function? 1517 1518 if (CallSiteEHPad) { 1519 if (Personality == EHPersonality::MSVC_CXX) { 1520 // The MSVC personality cannot tolerate catches getting inlined into 1521 // cleanup funclets. 1522 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1523 // Ok, the call site is within a cleanuppad. Let's check the callee 1524 // for catchpads. 1525 for (const BasicBlock &CalledBB : *CalledFunc) { 1526 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1527 return false; 1528 } 1529 } 1530 } else if (isAsynchronousEHPersonality(Personality)) { 1531 // SEH is even less tolerant, there may not be any sort of exceptional 1532 // funclet in the callee. 1533 for (const BasicBlock &CalledBB : *CalledFunc) { 1534 if (CalledBB.isEHPad()) 1535 return false; 1536 } 1537 } 1538 } 1539 } 1540 } 1541 1542 // Determine if we are dealing with a call in an EHPad which does not unwind 1543 // to caller. 1544 bool EHPadForCallUnwindsLocally = false; 1545 if (CallSiteEHPad && CS.isCall()) { 1546 UnwindDestMemoTy FuncletUnwindMap; 1547 Value *CallSiteUnwindDestToken = 1548 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 1549 1550 EHPadForCallUnwindsLocally = 1551 CallSiteUnwindDestToken && 1552 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 1553 } 1554 1555 // Get an iterator to the last basic block in the function, which will have 1556 // the new function inlined after it. 1557 Function::iterator LastBlock = --Caller->end(); 1558 1559 // Make sure to capture all of the return instructions from the cloned 1560 // function. 1561 SmallVector<ReturnInst*, 8> Returns; 1562 ClonedCodeInfo InlinedFunctionInfo; 1563 Function::iterator FirstNewBlock; 1564 1565 { // Scope to destroy VMap after cloning. 1566 ValueToValueMapTy VMap; 1567 // Keep a list of pair (dst, src) to emit byval initializations. 1568 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 1569 1570 auto &DL = Caller->getParent()->getDataLayout(); 1571 1572 assert(CalledFunc->arg_size() == CS.arg_size() && 1573 "No varargs calls can be inlined!"); 1574 1575 // Calculate the vector of arguments to pass into the function cloner, which 1576 // matches up the formal to the actual argument values. 1577 CallSite::arg_iterator AI = CS.arg_begin(); 1578 unsigned ArgNo = 0; 1579 for (Function::const_arg_iterator I = CalledFunc->arg_begin(), 1580 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1581 Value *ActualArg = *AI; 1582 1583 // When byval arguments actually inlined, we need to make the copy implied 1584 // by them explicit. However, we don't do this if the callee is readonly 1585 // or readnone, because the copy would be unneeded: the callee doesn't 1586 // modify the struct. 1587 if (CS.isByValArgument(ArgNo)) { 1588 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 1589 CalledFunc->getParamAlignment(ArgNo+1)); 1590 if (ActualArg != *AI) 1591 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 1592 } 1593 1594 VMap[&*I] = ActualArg; 1595 } 1596 1597 // Add alignment assumptions if necessary. We do this before the inlined 1598 // instructions are actually cloned into the caller so that we can easily 1599 // check what will be known at the start of the inlined code. 1600 AddAlignmentAssumptions(CS, IFI); 1601 1602 // We want the inliner to prune the code as it copies. We would LOVE to 1603 // have no dead or constant instructions leftover after inlining occurs 1604 // (which can happen, e.g., because an argument was constant), but we'll be 1605 // happy with whatever the cloner can do. 1606 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1607 /*ModuleLevelChanges=*/false, Returns, ".i", 1608 &InlinedFunctionInfo, TheCall); 1609 1610 // Remember the first block that is newly cloned over. 1611 FirstNewBlock = LastBlock; ++FirstNewBlock; 1612 1613 // Inject byval arguments initialization. 1614 for (std::pair<Value*, Value*> &Init : ByValInit) 1615 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1616 &*FirstNewBlock, IFI); 1617 1618 Optional<OperandBundleUse> ParentDeopt = 1619 CS.getOperandBundle(LLVMContext::OB_deopt); 1620 if (ParentDeopt) { 1621 SmallVector<OperandBundleDef, 2> OpDefs; 1622 1623 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1624 Instruction *I = dyn_cast_or_null<Instruction>(VH); 1625 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef 1626 1627 OpDefs.clear(); 1628 1629 CallSite ICS(I); 1630 OpDefs.reserve(ICS.getNumOperandBundles()); 1631 1632 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) { 1633 auto ChildOB = ICS.getOperandBundleAt(i); 1634 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1635 // If the inlined call has other operand bundles, let them be 1636 OpDefs.emplace_back(ChildOB); 1637 continue; 1638 } 1639 1640 // It may be useful to separate this logic (of handling operand 1641 // bundles) out to a separate "policy" component if this gets crowded. 1642 // Prepend the parent's deoptimization continuation to the newly 1643 // inlined call's deoptimization continuation. 1644 std::vector<Value *> MergedDeoptArgs; 1645 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 1646 ChildOB.Inputs.size()); 1647 1648 MergedDeoptArgs.insert(MergedDeoptArgs.end(), 1649 ParentDeopt->Inputs.begin(), 1650 ParentDeopt->Inputs.end()); 1651 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(), 1652 ChildOB.Inputs.end()); 1653 1654 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 1655 } 1656 1657 Instruction *NewI = nullptr; 1658 if (isa<CallInst>(I)) 1659 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); 1660 else 1661 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); 1662 1663 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 1664 // this even if the call returns void. 1665 I->replaceAllUsesWith(NewI); 1666 1667 VH = nullptr; 1668 I->eraseFromParent(); 1669 } 1670 } 1671 1672 // Update the callgraph if requested. 1673 if (IFI.CG) 1674 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1675 1676 // Update inlined instructions' line number information. 1677 fixupLineNumbers(Caller, FirstNewBlock, TheCall); 1678 1679 // Clone existing noalias metadata if necessary. 1680 CloneAliasScopeMetadata(CS, VMap); 1681 1682 // Add noalias metadata if necessary. 1683 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR); 1684 1685 // Propagate llvm.mem.parallel_loop_access if necessary. 1686 PropagateParallelLoopAccessMetadata(CS, VMap); 1687 1688 // Register any cloned assumptions. 1689 if (IFI.GetAssumptionCache) 1690 for (BasicBlock &NewBlock : 1691 make_range(FirstNewBlock->getIterator(), Caller->end())) 1692 for (Instruction &I : NewBlock) { 1693 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1694 if (II->getIntrinsicID() == Intrinsic::assume) 1695 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II); 1696 } 1697 } 1698 1699 // If there are any alloca instructions in the block that used to be the entry 1700 // block for the callee, move them to the entry block of the caller. First 1701 // calculate which instruction they should be inserted before. We insert the 1702 // instructions at the end of the current alloca list. 1703 { 1704 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1705 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1706 E = FirstNewBlock->end(); I != E; ) { 1707 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1708 if (!AI) continue; 1709 1710 // If the alloca is now dead, remove it. This often occurs due to code 1711 // specialization. 1712 if (AI->use_empty()) { 1713 AI->eraseFromParent(); 1714 continue; 1715 } 1716 1717 if (!allocaWouldBeStaticInEntry(AI)) 1718 continue; 1719 1720 // Keep track of the static allocas that we inline into the caller. 1721 IFI.StaticAllocas.push_back(AI); 1722 1723 // Scan for the block of allocas that we can move over, and move them 1724 // all at once. 1725 while (isa<AllocaInst>(I) && 1726 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 1727 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1728 ++I; 1729 } 1730 1731 // Transfer all of the allocas over in a block. Using splice means 1732 // that the instructions aren't removed from the symbol table, then 1733 // reinserted. 1734 Caller->getEntryBlock().getInstList().splice( 1735 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 1736 } 1737 // Move any dbg.declares describing the allocas into the entry basic block. 1738 DIBuilder DIB(*Caller->getParent()); 1739 for (auto &AI : IFI.StaticAllocas) 1740 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false); 1741 } 1742 1743 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 1744 if (InlinedFunctionInfo.ContainsCalls) { 1745 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1746 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1747 CallSiteTailKind = CI->getTailCallKind(); 1748 1749 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1750 ++BB) { 1751 for (Instruction &I : *BB) { 1752 CallInst *CI = dyn_cast<CallInst>(&I); 1753 if (!CI) 1754 continue; 1755 1756 if (Function *F = CI->getCalledFunction()) 1757 InlinedDeoptimizeCalls |= 1758 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 1759 1760 // We need to reduce the strength of any inlined tail calls. For 1761 // musttail, we have to avoid introducing potential unbounded stack 1762 // growth. For example, if functions 'f' and 'g' are mutually recursive 1763 // with musttail, we can inline 'g' into 'f' so long as we preserve 1764 // musttail on the cloned call to 'f'. If either the inlined call site 1765 // or the cloned call site is *not* musttail, the program already has 1766 // one frame of stack growth, so it's safe to remove musttail. Here is 1767 // a table of example transformations: 1768 // 1769 // f -> musttail g -> musttail f ==> f -> musttail f 1770 // f -> musttail g -> tail f ==> f -> tail f 1771 // f -> g -> musttail f ==> f -> f 1772 // f -> g -> tail f ==> f -> f 1773 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1774 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1775 CI->setTailCallKind(ChildTCK); 1776 InlinedMustTailCalls |= CI->isMustTailCall(); 1777 1778 // Calls inlined through a 'nounwind' call site should be marked 1779 // 'nounwind'. 1780 if (MarkNoUnwind) 1781 CI->setDoesNotThrow(); 1782 } 1783 } 1784 } 1785 1786 // Leave lifetime markers for the static alloca's, scoping them to the 1787 // function we just inlined. 1788 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1789 IRBuilder<> builder(&FirstNewBlock->front()); 1790 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1791 AllocaInst *AI = IFI.StaticAllocas[ai]; 1792 // Don't mark swifterror allocas. They can't have bitcast uses. 1793 if (AI->isSwiftError()) 1794 continue; 1795 1796 // If the alloca is already scoped to something smaller than the whole 1797 // function then there's no need to add redundant, less accurate markers. 1798 if (hasLifetimeMarkers(AI)) 1799 continue; 1800 1801 // Try to determine the size of the allocation. 1802 ConstantInt *AllocaSize = nullptr; 1803 if (ConstantInt *AIArraySize = 1804 dyn_cast<ConstantInt>(AI->getArraySize())) { 1805 auto &DL = Caller->getParent()->getDataLayout(); 1806 Type *AllocaType = AI->getAllocatedType(); 1807 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 1808 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1809 1810 // Don't add markers for zero-sized allocas. 1811 if (AllocaArraySize == 0) 1812 continue; 1813 1814 // Check that array size doesn't saturate uint64_t and doesn't 1815 // overflow when it's multiplied by type size. 1816 if (AllocaArraySize != ~0ULL && 1817 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) { 1818 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1819 AllocaArraySize * AllocaTypeSize); 1820 } 1821 } 1822 1823 builder.CreateLifetimeStart(AI, AllocaSize); 1824 for (ReturnInst *RI : Returns) { 1825 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 1826 // call and a return. The return kills all local allocas. 1827 if (InlinedMustTailCalls && 1828 RI->getParent()->getTerminatingMustTailCall()) 1829 continue; 1830 if (InlinedDeoptimizeCalls && 1831 RI->getParent()->getTerminatingDeoptimizeCall()) 1832 continue; 1833 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1834 } 1835 } 1836 } 1837 1838 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1839 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1840 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1841 Module *M = Caller->getParent(); 1842 // Get the two intrinsics we care about. 1843 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1844 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1845 1846 // Insert the llvm.stacksave. 1847 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 1848 .CreateCall(StackSave, {}, "savedstack"); 1849 1850 // Insert a call to llvm.stackrestore before any return instructions in the 1851 // inlined function. 1852 for (ReturnInst *RI : Returns) { 1853 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 1854 // call and a return. The return will restore the stack pointer. 1855 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1856 continue; 1857 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 1858 continue; 1859 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1860 } 1861 } 1862 1863 // If we are inlining for an invoke instruction, we must make sure to rewrite 1864 // any call instructions into invoke instructions. This is sensitive to which 1865 // funclet pads were top-level in the inlinee, so must be done before 1866 // rewriting the "parent pad" links. 1867 if (auto *II = dyn_cast<InvokeInst>(TheCall)) { 1868 BasicBlock *UnwindDest = II->getUnwindDest(); 1869 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 1870 if (isa<LandingPadInst>(FirstNonPHI)) { 1871 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1872 } else { 1873 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1874 } 1875 } 1876 1877 // Update the lexical scopes of the new funclets and callsites. 1878 // Anything that had 'none' as its parent is now nested inside the callsite's 1879 // EHPad. 1880 1881 if (CallSiteEHPad) { 1882 for (Function::iterator BB = FirstNewBlock->getIterator(), 1883 E = Caller->end(); 1884 BB != E; ++BB) { 1885 // Add bundle operands to any top-level call sites. 1886 SmallVector<OperandBundleDef, 1> OpBundles; 1887 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { 1888 Instruction *I = &*BBI++; 1889 CallSite CS(I); 1890 if (!CS) 1891 continue; 1892 1893 // Skip call sites which are nounwind intrinsics. 1894 auto *CalledFn = 1895 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1896 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow()) 1897 continue; 1898 1899 // Skip call sites which already have a "funclet" bundle. 1900 if (CS.getOperandBundle(LLVMContext::OB_funclet)) 1901 continue; 1902 1903 CS.getOperandBundlesAsDefs(OpBundles); 1904 OpBundles.emplace_back("funclet", CallSiteEHPad); 1905 1906 Instruction *NewInst; 1907 if (CS.isCall()) 1908 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); 1909 else 1910 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); 1911 NewInst->takeName(I); 1912 I->replaceAllUsesWith(NewInst); 1913 I->eraseFromParent(); 1914 1915 OpBundles.clear(); 1916 } 1917 1918 // It is problematic if the inlinee has a cleanupret which unwinds to 1919 // caller and we inline it into a call site which doesn't unwind but into 1920 // an EH pad that does. Such an edge must be dynamically unreachable. 1921 // As such, we replace the cleanupret with unreachable. 1922 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 1923 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 1924 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false); 1925 1926 Instruction *I = BB->getFirstNonPHI(); 1927 if (!I->isEHPad()) 1928 continue; 1929 1930 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 1931 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 1932 CatchSwitch->setParentPad(CallSiteEHPad); 1933 } else { 1934 auto *FPI = cast<FuncletPadInst>(I); 1935 if (isa<ConstantTokenNone>(FPI->getParentPad())) 1936 FPI->setParentPad(CallSiteEHPad); 1937 } 1938 } 1939 } 1940 1941 if (InlinedDeoptimizeCalls) { 1942 // We need to at least remove the deoptimizing returns from the Return set, 1943 // so that the control flow from those returns does not get merged into the 1944 // caller (but terminate it instead). If the caller's return type does not 1945 // match the callee's return type, we also need to change the return type of 1946 // the intrinsic. 1947 if (Caller->getReturnType() == TheCall->getType()) { 1948 auto NewEnd = remove_if(Returns, [](ReturnInst *RI) { 1949 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 1950 }); 1951 Returns.erase(NewEnd, Returns.end()); 1952 } else { 1953 SmallVector<ReturnInst *, 8> NormalReturns; 1954 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 1955 Caller->getParent(), Intrinsic::experimental_deoptimize, 1956 {Caller->getReturnType()}); 1957 1958 for (ReturnInst *RI : Returns) { 1959 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 1960 if (!DeoptCall) { 1961 NormalReturns.push_back(RI); 1962 continue; 1963 } 1964 1965 // The calling convention on the deoptimize call itself may be bogus, 1966 // since the code we're inlining may have undefined behavior (and may 1967 // never actually execute at runtime); but all 1968 // @llvm.experimental.deoptimize declarations have to have the same 1969 // calling convention in a well-formed module. 1970 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 1971 NewDeoptIntrinsic->setCallingConv(CallingConv); 1972 auto *CurBB = RI->getParent(); 1973 RI->eraseFromParent(); 1974 1975 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(), 1976 DeoptCall->arg_end()); 1977 1978 SmallVector<OperandBundleDef, 1> OpBundles; 1979 DeoptCall->getOperandBundlesAsDefs(OpBundles); 1980 DeoptCall->eraseFromParent(); 1981 assert(!OpBundles.empty() && 1982 "Expected at least the deopt operand bundle"); 1983 1984 IRBuilder<> Builder(CurBB); 1985 CallInst *NewDeoptCall = 1986 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 1987 NewDeoptCall->setCallingConv(CallingConv); 1988 if (NewDeoptCall->getType()->isVoidTy()) 1989 Builder.CreateRetVoid(); 1990 else 1991 Builder.CreateRet(NewDeoptCall); 1992 } 1993 1994 // Leave behind the normal returns so we can merge control flow. 1995 std::swap(Returns, NormalReturns); 1996 } 1997 } 1998 1999 // Handle any inlined musttail call sites. In order for a new call site to be 2000 // musttail, the source of the clone and the inlined call site must have been 2001 // musttail. Therefore it's safe to return without merging control into the 2002 // phi below. 2003 if (InlinedMustTailCalls) { 2004 // Check if we need to bitcast the result of any musttail calls. 2005 Type *NewRetTy = Caller->getReturnType(); 2006 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 2007 2008 // Handle the returns preceded by musttail calls separately. 2009 SmallVector<ReturnInst *, 8> NormalReturns; 2010 for (ReturnInst *RI : Returns) { 2011 CallInst *ReturnedMustTail = 2012 RI->getParent()->getTerminatingMustTailCall(); 2013 if (!ReturnedMustTail) { 2014 NormalReturns.push_back(RI); 2015 continue; 2016 } 2017 if (!NeedBitCast) 2018 continue; 2019 2020 // Delete the old return and any preceding bitcast. 2021 BasicBlock *CurBB = RI->getParent(); 2022 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2023 RI->eraseFromParent(); 2024 if (OldCast) 2025 OldCast->eraseFromParent(); 2026 2027 // Insert a new bitcast and return with the right type. 2028 IRBuilder<> Builder(CurBB); 2029 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2030 } 2031 2032 // Leave behind the normal returns so we can merge control flow. 2033 std::swap(Returns, NormalReturns); 2034 } 2035 2036 // If we cloned in _exactly one_ basic block, and if that block ends in a 2037 // return instruction, we splice the body of the inlined callee directly into 2038 // the calling basic block. 2039 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2040 // Move all of the instructions right before the call. 2041 OrigBB->getInstList().splice(TheCall->getIterator(), 2042 FirstNewBlock->getInstList(), 2043 FirstNewBlock->begin(), FirstNewBlock->end()); 2044 // Remove the cloned basic block. 2045 Caller->getBasicBlockList().pop_back(); 2046 2047 // If the call site was an invoke instruction, add a branch to the normal 2048 // destination. 2049 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2050 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 2051 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2052 } 2053 2054 // If the return instruction returned a value, replace uses of the call with 2055 // uses of the returned value. 2056 if (!TheCall->use_empty()) { 2057 ReturnInst *R = Returns[0]; 2058 if (TheCall == R->getReturnValue()) 2059 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2060 else 2061 TheCall->replaceAllUsesWith(R->getReturnValue()); 2062 } 2063 // Since we are now done with the Call/Invoke, we can delete it. 2064 TheCall->eraseFromParent(); 2065 2066 // Since we are now done with the return instruction, delete it also. 2067 Returns[0]->eraseFromParent(); 2068 2069 // We are now done with the inlining. 2070 return true; 2071 } 2072 2073 // Otherwise, we have the normal case, of more than one block to inline or 2074 // multiple return sites. 2075 2076 // We want to clone the entire callee function into the hole between the 2077 // "starter" and "ender" blocks. How we accomplish this depends on whether 2078 // this is an invoke instruction or a call instruction. 2079 BasicBlock *AfterCallBB; 2080 BranchInst *CreatedBranchToNormalDest = nullptr; 2081 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2082 2083 // Add an unconditional branch to make this look like the CallInst case... 2084 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 2085 2086 // Split the basic block. This guarantees that no PHI nodes will have to be 2087 // updated due to new incoming edges, and make the invoke case more 2088 // symmetric to the call case. 2089 AfterCallBB = 2090 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2091 CalledFunc->getName() + ".exit"); 2092 2093 } else { // It's a call 2094 // If this is a call instruction, we need to split the basic block that 2095 // the call lives in. 2096 // 2097 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(), 2098 CalledFunc->getName() + ".exit"); 2099 } 2100 2101 // Change the branch that used to go to AfterCallBB to branch to the first 2102 // basic block of the inlined function. 2103 // 2104 TerminatorInst *Br = OrigBB->getTerminator(); 2105 assert(Br && Br->getOpcode() == Instruction::Br && 2106 "splitBasicBlock broken!"); 2107 Br->setOperand(0, &*FirstNewBlock); 2108 2109 // Now that the function is correct, make it a little bit nicer. In 2110 // particular, move the basic blocks inserted from the end of the function 2111 // into the space made by splitting the source basic block. 2112 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 2113 Caller->getBasicBlockList(), FirstNewBlock, 2114 Caller->end()); 2115 2116 // Handle all of the return instructions that we just cloned in, and eliminate 2117 // any users of the original call/invoke instruction. 2118 Type *RTy = CalledFunc->getReturnType(); 2119 2120 PHINode *PHI = nullptr; 2121 if (Returns.size() > 1) { 2122 // The PHI node should go at the front of the new basic block to merge all 2123 // possible incoming values. 2124 if (!TheCall->use_empty()) { 2125 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 2126 &AfterCallBB->front()); 2127 // Anything that used the result of the function call should now use the 2128 // PHI node as their operand. 2129 TheCall->replaceAllUsesWith(PHI); 2130 } 2131 2132 // Loop over all of the return instructions adding entries to the PHI node 2133 // as appropriate. 2134 if (PHI) { 2135 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2136 ReturnInst *RI = Returns[i]; 2137 assert(RI->getReturnValue()->getType() == PHI->getType() && 2138 "Ret value not consistent in function!"); 2139 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2140 } 2141 } 2142 2143 // Add a branch to the merge points and remove return instructions. 2144 DebugLoc Loc; 2145 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2146 ReturnInst *RI = Returns[i]; 2147 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2148 Loc = RI->getDebugLoc(); 2149 BI->setDebugLoc(Loc); 2150 RI->eraseFromParent(); 2151 } 2152 // We need to set the debug location to *somewhere* inside the 2153 // inlined function. The line number may be nonsensical, but the 2154 // instruction will at least be associated with the right 2155 // function. 2156 if (CreatedBranchToNormalDest) 2157 CreatedBranchToNormalDest->setDebugLoc(Loc); 2158 } else if (!Returns.empty()) { 2159 // Otherwise, if there is exactly one return value, just replace anything 2160 // using the return value of the call with the computed value. 2161 if (!TheCall->use_empty()) { 2162 if (TheCall == Returns[0]->getReturnValue()) 2163 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2164 else 2165 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 2166 } 2167 2168 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2169 BasicBlock *ReturnBB = Returns[0]->getParent(); 2170 ReturnBB->replaceAllUsesWith(AfterCallBB); 2171 2172 // Splice the code from the return block into the block that it will return 2173 // to, which contains the code that was after the call. 2174 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 2175 ReturnBB->getInstList()); 2176 2177 if (CreatedBranchToNormalDest) 2178 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2179 2180 // Delete the return instruction now and empty ReturnBB now. 2181 Returns[0]->eraseFromParent(); 2182 ReturnBB->eraseFromParent(); 2183 } else if (!TheCall->use_empty()) { 2184 // No returns, but something is using the return value of the call. Just 2185 // nuke the result. 2186 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2187 } 2188 2189 // Since we are now done with the Call/Invoke, we can delete it. 2190 TheCall->eraseFromParent(); 2191 2192 // If we inlined any musttail calls and the original return is now 2193 // unreachable, delete it. It can only contain a bitcast and ret. 2194 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 2195 AfterCallBB->eraseFromParent(); 2196 2197 // We should always be able to fold the entry block of the function into the 2198 // single predecessor of the block... 2199 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2200 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2201 2202 // Splice the code entry block into calling block, right before the 2203 // unconditional branch. 2204 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2205 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 2206 2207 // Remove the unconditional branch. 2208 OrigBB->getInstList().erase(Br); 2209 2210 // Now we can remove the CalleeEntry block, which is now empty. 2211 Caller->getBasicBlockList().erase(CalleeEntry); 2212 2213 // If we inserted a phi node, check to see if it has a single value (e.g. all 2214 // the entries are the same or undef). If so, remove the PHI so it doesn't 2215 // block other optimizations. 2216 if (PHI) { 2217 AssumptionCache *AC = 2218 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 2219 auto &DL = Caller->getParent()->getDataLayout(); 2220 if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) { 2221 PHI->replaceAllUsesWith(V); 2222 PHI->eraseFromParent(); 2223 } 2224 } 2225 2226 return true; 2227 } 2228