1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringExtras.h" 23 #include "llvm/ADT/iterator_range.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/BlockFrequencyInfo.h" 27 #include "llvm/Analysis/CallGraph.h" 28 #include "llvm/Analysis/CaptureTracking.h" 29 #include "llvm/Analysis/EHPersonalities.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/ProfileSummaryInfo.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Argument.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CFG.h" 36 #include "llvm/IR/CallSite.h" 37 #include "llvm/IR/Constant.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DIBuilder.h" 40 #include "llvm/IR/DataLayout.h" 41 #include "llvm/IR/DebugInfoMetadata.h" 42 #include "llvm/IR/DebugLoc.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/IRBuilder.h" 47 #include "llvm/IR/InstrTypes.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/Intrinsics.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/IR/MDBuilder.h" 54 #include "llvm/IR/Metadata.h" 55 #include "llvm/IR/Module.h" 56 #include "llvm/IR/Type.h" 57 #include "llvm/IR/User.h" 58 #include "llvm/IR/Value.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/CommandLine.h" 61 #include "llvm/Support/ErrorHandling.h" 62 #include "llvm/Transforms/Utils/Cloning.h" 63 #include "llvm/Transforms/Utils/Local.h" 64 #include "llvm/Transforms/Utils/ValueMapper.h" 65 #include <algorithm> 66 #include <cassert> 67 #include <cstdint> 68 #include <iterator> 69 #include <limits> 70 #include <string> 71 #include <utility> 72 #include <vector> 73 74 using namespace llvm; 75 76 static cl::opt<bool> 77 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 78 cl::Hidden, 79 cl::desc("Convert noalias attributes to metadata during inlining.")); 80 81 static cl::opt<bool> 82 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 83 cl::init(true), cl::Hidden, 84 cl::desc("Convert align attributes to assumptions during inlining.")); 85 86 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 87 AAResults *CalleeAAR, bool InsertLifetime) { 88 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime); 89 } 90 91 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 92 AAResults *CalleeAAR, bool InsertLifetime) { 93 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime); 94 } 95 96 namespace { 97 98 /// A class for recording information about inlining a landing pad. 99 class LandingPadInliningInfo { 100 /// Destination of the invoke's unwind. 101 BasicBlock *OuterResumeDest; 102 103 /// Destination for the callee's resume. 104 BasicBlock *InnerResumeDest = nullptr; 105 106 /// LandingPadInst associated with the invoke. 107 LandingPadInst *CallerLPad = nullptr; 108 109 /// PHI for EH values from landingpad insts. 110 PHINode *InnerEHValuesPHI = nullptr; 111 112 SmallVector<Value*, 8> UnwindDestPHIValues; 113 114 public: 115 LandingPadInliningInfo(InvokeInst *II) 116 : OuterResumeDest(II->getUnwindDest()) { 117 // If there are PHI nodes in the unwind destination block, we need to keep 118 // track of which values came into them from the invoke before removing 119 // the edge from this block. 120 BasicBlock *InvokeBB = II->getParent(); 121 BasicBlock::iterator I = OuterResumeDest->begin(); 122 for (; isa<PHINode>(I); ++I) { 123 // Save the value to use for this edge. 124 PHINode *PHI = cast<PHINode>(I); 125 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 126 } 127 128 CallerLPad = cast<LandingPadInst>(I); 129 } 130 131 /// The outer unwind destination is the target of 132 /// unwind edges introduced for calls within the inlined function. 133 BasicBlock *getOuterResumeDest() const { 134 return OuterResumeDest; 135 } 136 137 BasicBlock *getInnerResumeDest(); 138 139 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 140 141 /// Forward the 'resume' instruction to the caller's landing pad block. 142 /// When the landing pad block has only one predecessor, this is 143 /// a simple branch. When there is more than one predecessor, we need to 144 /// split the landing pad block after the landingpad instruction and jump 145 /// to there. 146 void forwardResume(ResumeInst *RI, 147 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 148 149 /// Add incoming-PHI values to the unwind destination block for the given 150 /// basic block, using the values for the original invoke's source block. 151 void addIncomingPHIValuesFor(BasicBlock *BB) const { 152 addIncomingPHIValuesForInto(BB, OuterResumeDest); 153 } 154 155 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 156 BasicBlock::iterator I = dest->begin(); 157 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 158 PHINode *phi = cast<PHINode>(I); 159 phi->addIncoming(UnwindDestPHIValues[i], src); 160 } 161 } 162 }; 163 164 } // end anonymous namespace 165 166 /// Get or create a target for the branch from ResumeInsts. 167 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 168 if (InnerResumeDest) return InnerResumeDest; 169 170 // Split the landing pad. 171 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 172 InnerResumeDest = 173 OuterResumeDest->splitBasicBlock(SplitPoint, 174 OuterResumeDest->getName() + ".body"); 175 176 // The number of incoming edges we expect to the inner landing pad. 177 const unsigned PHICapacity = 2; 178 179 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 180 Instruction *InsertPoint = &InnerResumeDest->front(); 181 BasicBlock::iterator I = OuterResumeDest->begin(); 182 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 183 PHINode *OuterPHI = cast<PHINode>(I); 184 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 185 OuterPHI->getName() + ".lpad-body", 186 InsertPoint); 187 OuterPHI->replaceAllUsesWith(InnerPHI); 188 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 189 } 190 191 // Create a PHI for the exception values. 192 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 193 "eh.lpad-body", InsertPoint); 194 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 195 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 196 197 // All done. 198 return InnerResumeDest; 199 } 200 201 /// Forward the 'resume' instruction to the caller's landing pad block. 202 /// When the landing pad block has only one predecessor, this is a simple 203 /// branch. When there is more than one predecessor, we need to split the 204 /// landing pad block after the landingpad instruction and jump to there. 205 void LandingPadInliningInfo::forwardResume( 206 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 207 BasicBlock *Dest = getInnerResumeDest(); 208 BasicBlock *Src = RI->getParent(); 209 210 BranchInst::Create(Dest, Src); 211 212 // Update the PHIs in the destination. They were inserted in an order which 213 // makes this work. 214 addIncomingPHIValuesForInto(Src, Dest); 215 216 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 217 RI->eraseFromParent(); 218 } 219 220 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 221 static Value *getParentPad(Value *EHPad) { 222 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 223 return FPI->getParentPad(); 224 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 225 } 226 227 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>; 228 229 /// Helper for getUnwindDestToken that does the descendant-ward part of 230 /// the search. 231 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 232 UnwindDestMemoTy &MemoMap) { 233 SmallVector<Instruction *, 8> Worklist(1, EHPad); 234 235 while (!Worklist.empty()) { 236 Instruction *CurrentPad = Worklist.pop_back_val(); 237 // We only put pads on the worklist that aren't in the MemoMap. When 238 // we find an unwind dest for a pad we may update its ancestors, but 239 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 240 // so they should never get updated while queued on the worklist. 241 assert(!MemoMap.count(CurrentPad)); 242 Value *UnwindDestToken = nullptr; 243 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 244 if (CatchSwitch->hasUnwindDest()) { 245 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 246 } else { 247 // Catchswitch doesn't have a 'nounwind' variant, and one might be 248 // annotated as "unwinds to caller" when really it's nounwind (see 249 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 250 // parent's unwind dest from this. We can check its catchpads' 251 // descendants, since they might include a cleanuppad with an 252 // "unwinds to caller" cleanupret, which can be trusted. 253 for (auto HI = CatchSwitch->handler_begin(), 254 HE = CatchSwitch->handler_end(); 255 HI != HE && !UnwindDestToken; ++HI) { 256 BasicBlock *HandlerBlock = *HI; 257 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 258 for (User *Child : CatchPad->users()) { 259 // Intentionally ignore invokes here -- since the catchswitch is 260 // marked "unwind to caller", it would be a verifier error if it 261 // contained an invoke which unwinds out of it, so any invoke we'd 262 // encounter must unwind to some child of the catch. 263 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 264 continue; 265 266 Instruction *ChildPad = cast<Instruction>(Child); 267 auto Memo = MemoMap.find(ChildPad); 268 if (Memo == MemoMap.end()) { 269 // Haven't figured out this child pad yet; queue it. 270 Worklist.push_back(ChildPad); 271 continue; 272 } 273 // We've already checked this child, but might have found that 274 // it offers no proof either way. 275 Value *ChildUnwindDestToken = Memo->second; 276 if (!ChildUnwindDestToken) 277 continue; 278 // We already know the child's unwind dest, which can either 279 // be ConstantTokenNone to indicate unwind to caller, or can 280 // be another child of the catchpad. Only the former indicates 281 // the unwind dest of the catchswitch. 282 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 283 UnwindDestToken = ChildUnwindDestToken; 284 break; 285 } 286 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 287 } 288 } 289 } 290 } else { 291 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 292 for (User *U : CleanupPad->users()) { 293 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 294 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 295 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 296 else 297 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 298 break; 299 } 300 Value *ChildUnwindDestToken; 301 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 302 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 303 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 304 Instruction *ChildPad = cast<Instruction>(U); 305 auto Memo = MemoMap.find(ChildPad); 306 if (Memo == MemoMap.end()) { 307 // Haven't resolved this child yet; queue it and keep searching. 308 Worklist.push_back(ChildPad); 309 continue; 310 } 311 // We've checked this child, but still need to ignore it if it 312 // had no proof either way. 313 ChildUnwindDestToken = Memo->second; 314 if (!ChildUnwindDestToken) 315 continue; 316 } else { 317 // Not a relevant user of the cleanuppad 318 continue; 319 } 320 // In a well-formed program, the child/invoke must either unwind to 321 // an(other) child of the cleanup, or exit the cleanup. In the 322 // first case, continue searching. 323 if (isa<Instruction>(ChildUnwindDestToken) && 324 getParentPad(ChildUnwindDestToken) == CleanupPad) 325 continue; 326 UnwindDestToken = ChildUnwindDestToken; 327 break; 328 } 329 } 330 // If we haven't found an unwind dest for CurrentPad, we may have queued its 331 // children, so move on to the next in the worklist. 332 if (!UnwindDestToken) 333 continue; 334 335 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 336 // any ancestors of CurrentPad up to but not including UnwindDestToken's 337 // parent pad. Record this in the memo map, and check to see if the 338 // original EHPad being queried is one of the ones exited. 339 Value *UnwindParent; 340 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 341 UnwindParent = getParentPad(UnwindPad); 342 else 343 UnwindParent = nullptr; 344 bool ExitedOriginalPad = false; 345 for (Instruction *ExitedPad = CurrentPad; 346 ExitedPad && ExitedPad != UnwindParent; 347 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 348 // Skip over catchpads since they just follow their catchswitches. 349 if (isa<CatchPadInst>(ExitedPad)) 350 continue; 351 MemoMap[ExitedPad] = UnwindDestToken; 352 ExitedOriginalPad |= (ExitedPad == EHPad); 353 } 354 355 if (ExitedOriginalPad) 356 return UnwindDestToken; 357 358 // Continue the search. 359 } 360 361 // No definitive information is contained within this funclet. 362 return nullptr; 363 } 364 365 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 366 /// return that pad instruction. If it unwinds to caller, return 367 /// ConstantTokenNone. If it does not have a definitive unwind destination, 368 /// return nullptr. 369 /// 370 /// This routine gets invoked for calls in funclets in inlinees when inlining 371 /// an invoke. Since many funclets don't have calls inside them, it's queried 372 /// on-demand rather than building a map of pads to unwind dests up front. 373 /// Determining a funclet's unwind dest may require recursively searching its 374 /// descendants, and also ancestors and cousins if the descendants don't provide 375 /// an answer. Since most funclets will have their unwind dest immediately 376 /// available as the unwind dest of a catchswitch or cleanupret, this routine 377 /// searches top-down from the given pad and then up. To avoid worst-case 378 /// quadratic run-time given that approach, it uses a memo map to avoid 379 /// re-processing funclet trees. The callers that rewrite the IR as they go 380 /// take advantage of this, for correctness, by checking/forcing rewritten 381 /// pads' entries to match the original callee view. 382 static Value *getUnwindDestToken(Instruction *EHPad, 383 UnwindDestMemoTy &MemoMap) { 384 // Catchpads unwind to the same place as their catchswitch; 385 // redirct any queries on catchpads so the code below can 386 // deal with just catchswitches and cleanuppads. 387 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 388 EHPad = CPI->getCatchSwitch(); 389 390 // Check if we've already determined the unwind dest for this pad. 391 auto Memo = MemoMap.find(EHPad); 392 if (Memo != MemoMap.end()) 393 return Memo->second; 394 395 // Search EHPad and, if necessary, its descendants. 396 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 397 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 398 if (UnwindDestToken) 399 return UnwindDestToken; 400 401 // No information is available for this EHPad from itself or any of its 402 // descendants. An unwind all the way out to a pad in the caller would 403 // need also to agree with the unwind dest of the parent funclet, so 404 // search up the chain to try to find a funclet with information. Put 405 // null entries in the memo map to avoid re-processing as we go up. 406 MemoMap[EHPad] = nullptr; 407 #ifndef NDEBUG 408 SmallPtrSet<Instruction *, 4> TempMemos; 409 TempMemos.insert(EHPad); 410 #endif 411 Instruction *LastUselessPad = EHPad; 412 Value *AncestorToken; 413 for (AncestorToken = getParentPad(EHPad); 414 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 415 AncestorToken = getParentPad(AncestorToken)) { 416 // Skip over catchpads since they just follow their catchswitches. 417 if (isa<CatchPadInst>(AncestorPad)) 418 continue; 419 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 420 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 421 // call to getUnwindDestToken, that would mean that AncestorPad had no 422 // information in itself, its descendants, or its ancestors. If that 423 // were the case, then we should also have recorded the lack of information 424 // for the descendant that we're coming from. So assert that we don't 425 // find a null entry in the MemoMap for AncestorPad. 426 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 427 auto AncestorMemo = MemoMap.find(AncestorPad); 428 if (AncestorMemo == MemoMap.end()) { 429 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 430 } else { 431 UnwindDestToken = AncestorMemo->second; 432 } 433 if (UnwindDestToken) 434 break; 435 LastUselessPad = AncestorPad; 436 MemoMap[LastUselessPad] = nullptr; 437 #ifndef NDEBUG 438 TempMemos.insert(LastUselessPad); 439 #endif 440 } 441 442 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 443 // returned nullptr (and likewise for EHPad and any of its ancestors up to 444 // LastUselessPad), so LastUselessPad has no information from below. Since 445 // getUnwindDestTokenHelper must investigate all downward paths through 446 // no-information nodes to prove that a node has no information like this, 447 // and since any time it finds information it records it in the MemoMap for 448 // not just the immediately-containing funclet but also any ancestors also 449 // exited, it must be the case that, walking downward from LastUselessPad, 450 // visiting just those nodes which have not been mapped to an unwind dest 451 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 452 // they are just used to keep getUnwindDestTokenHelper from repeating work), 453 // any node visited must have been exhaustively searched with no information 454 // for it found. 455 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 456 while (!Worklist.empty()) { 457 Instruction *UselessPad = Worklist.pop_back_val(); 458 auto Memo = MemoMap.find(UselessPad); 459 if (Memo != MemoMap.end() && Memo->second) { 460 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 461 // that it is a funclet that does have information about unwinding to 462 // a particular destination; its parent was a useless pad. 463 // Since its parent has no information, the unwind edge must not escape 464 // the parent, and must target a sibling of this pad. This local unwind 465 // gives us no information about EHPad. Leave it and the subtree rooted 466 // at it alone. 467 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 468 continue; 469 } 470 // We know we don't have information for UselesPad. If it has an entry in 471 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 472 // added on this invocation of getUnwindDestToken; if a previous invocation 473 // recorded nullptr, it would have had to prove that the ancestors of 474 // UselessPad, which include LastUselessPad, had no information, and that 475 // in turn would have required proving that the descendants of 476 // LastUselesPad, which include EHPad, have no information about 477 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 478 // the MemoMap on that invocation, which isn't the case if we got here. 479 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 480 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 481 // information that we'd be contradicting by making a map entry for it 482 // (which is something that getUnwindDestTokenHelper must have proved for 483 // us to get here). Just assert on is direct users here; the checks in 484 // this downward walk at its descendants will verify that they don't have 485 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 486 // unwind edges or unwind to a sibling). 487 MemoMap[UselessPad] = UnwindDestToken; 488 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 489 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 490 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 491 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 492 for (User *U : CatchPad->users()) { 493 assert( 494 (!isa<InvokeInst>(U) || 495 (getParentPad( 496 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 497 CatchPad)) && 498 "Expected useless pad"); 499 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 500 Worklist.push_back(cast<Instruction>(U)); 501 } 502 } 503 } else { 504 assert(isa<CleanupPadInst>(UselessPad)); 505 for (User *U : UselessPad->users()) { 506 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 507 assert((!isa<InvokeInst>(U) || 508 (getParentPad( 509 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 510 UselessPad)) && 511 "Expected useless pad"); 512 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 513 Worklist.push_back(cast<Instruction>(U)); 514 } 515 } 516 } 517 518 return UnwindDestToken; 519 } 520 521 /// When we inline a basic block into an invoke, 522 /// we have to turn all of the calls that can throw into invokes. 523 /// This function analyze BB to see if there are any calls, and if so, 524 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 525 /// nodes in that block with the values specified in InvokeDestPHIValues. 526 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 527 BasicBlock *BB, BasicBlock *UnwindEdge, 528 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 529 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 530 Instruction *I = &*BBI++; 531 532 // We only need to check for function calls: inlined invoke 533 // instructions require no special handling. 534 CallInst *CI = dyn_cast<CallInst>(I); 535 536 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 537 continue; 538 539 // We do not need to (and in fact, cannot) convert possibly throwing calls 540 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 541 // invokes. The caller's "segment" of the deoptimization continuation 542 // attached to the newly inlined @llvm.experimental_deoptimize 543 // (resp. @llvm.experimental.guard) call should contain the exception 544 // handling logic, if any. 545 if (auto *F = CI->getCalledFunction()) 546 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 547 F->getIntrinsicID() == Intrinsic::experimental_guard) 548 continue; 549 550 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 551 // This call is nested inside a funclet. If that funclet has an unwind 552 // destination within the inlinee, then unwinding out of this call would 553 // be UB. Rewriting this call to an invoke which targets the inlined 554 // invoke's unwind dest would give the call's parent funclet multiple 555 // unwind destinations, which is something that subsequent EH table 556 // generation can't handle and that the veirifer rejects. So when we 557 // see such a call, leave it as a call. 558 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 559 Value *UnwindDestToken = 560 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 561 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 562 continue; 563 #ifndef NDEBUG 564 Instruction *MemoKey; 565 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 566 MemoKey = CatchPad->getCatchSwitch(); 567 else 568 MemoKey = FuncletPad; 569 assert(FuncletUnwindMap->count(MemoKey) && 570 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 571 "must get memoized to avoid confusing later searches"); 572 #endif // NDEBUG 573 } 574 575 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge); 576 return BB; 577 } 578 return nullptr; 579 } 580 581 /// If we inlined an invoke site, we need to convert calls 582 /// in the body of the inlined function into invokes. 583 /// 584 /// II is the invoke instruction being inlined. FirstNewBlock is the first 585 /// block of the inlined code (the last block is the end of the function), 586 /// and InlineCodeInfo is information about the code that got inlined. 587 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 588 ClonedCodeInfo &InlinedCodeInfo) { 589 BasicBlock *InvokeDest = II->getUnwindDest(); 590 591 Function *Caller = FirstNewBlock->getParent(); 592 593 // The inlined code is currently at the end of the function, scan from the 594 // start of the inlined code to its end, checking for stuff we need to 595 // rewrite. 596 LandingPadInliningInfo Invoke(II); 597 598 // Get all of the inlined landing pad instructions. 599 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 600 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 601 I != E; ++I) 602 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 603 InlinedLPads.insert(II->getLandingPadInst()); 604 605 // Append the clauses from the outer landing pad instruction into the inlined 606 // landing pad instructions. 607 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 608 for (LandingPadInst *InlinedLPad : InlinedLPads) { 609 unsigned OuterNum = OuterLPad->getNumClauses(); 610 InlinedLPad->reserveClauses(OuterNum); 611 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 612 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 613 if (OuterLPad->isCleanup()) 614 InlinedLPad->setCleanup(true); 615 } 616 617 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 618 BB != E; ++BB) { 619 if (InlinedCodeInfo.ContainsCalls) 620 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 621 &*BB, Invoke.getOuterResumeDest())) 622 // Update any PHI nodes in the exceptional block to indicate that there 623 // is now a new entry in them. 624 Invoke.addIncomingPHIValuesFor(NewBB); 625 626 // Forward any resumes that are remaining here. 627 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 628 Invoke.forwardResume(RI, InlinedLPads); 629 } 630 631 // Now that everything is happy, we have one final detail. The PHI nodes in 632 // the exception destination block still have entries due to the original 633 // invoke instruction. Eliminate these entries (which might even delete the 634 // PHI node) now. 635 InvokeDest->removePredecessor(II->getParent()); 636 } 637 638 /// If we inlined an invoke site, we need to convert calls 639 /// in the body of the inlined function into invokes. 640 /// 641 /// II is the invoke instruction being inlined. FirstNewBlock is the first 642 /// block of the inlined code (the last block is the end of the function), 643 /// and InlineCodeInfo is information about the code that got inlined. 644 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 645 ClonedCodeInfo &InlinedCodeInfo) { 646 BasicBlock *UnwindDest = II->getUnwindDest(); 647 Function *Caller = FirstNewBlock->getParent(); 648 649 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 650 651 // If there are PHI nodes in the unwind destination block, we need to keep 652 // track of which values came into them from the invoke before removing the 653 // edge from this block. 654 SmallVector<Value *, 8> UnwindDestPHIValues; 655 BasicBlock *InvokeBB = II->getParent(); 656 for (Instruction &I : *UnwindDest) { 657 // Save the value to use for this edge. 658 PHINode *PHI = dyn_cast<PHINode>(&I); 659 if (!PHI) 660 break; 661 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 662 } 663 664 // Add incoming-PHI values to the unwind destination block for the given basic 665 // block, using the values for the original invoke's source block. 666 auto UpdatePHINodes = [&](BasicBlock *Src) { 667 BasicBlock::iterator I = UnwindDest->begin(); 668 for (Value *V : UnwindDestPHIValues) { 669 PHINode *PHI = cast<PHINode>(I); 670 PHI->addIncoming(V, Src); 671 ++I; 672 } 673 }; 674 675 // This connects all the instructions which 'unwind to caller' to the invoke 676 // destination. 677 UnwindDestMemoTy FuncletUnwindMap; 678 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 679 BB != E; ++BB) { 680 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 681 if (CRI->unwindsToCaller()) { 682 auto *CleanupPad = CRI->getCleanupPad(); 683 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 684 CRI->eraseFromParent(); 685 UpdatePHINodes(&*BB); 686 // Finding a cleanupret with an unwind destination would confuse 687 // subsequent calls to getUnwindDestToken, so map the cleanuppad 688 // to short-circuit any such calls and recognize this as an "unwind 689 // to caller" cleanup. 690 assert(!FuncletUnwindMap.count(CleanupPad) || 691 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 692 FuncletUnwindMap[CleanupPad] = 693 ConstantTokenNone::get(Caller->getContext()); 694 } 695 } 696 697 Instruction *I = BB->getFirstNonPHI(); 698 if (!I->isEHPad()) 699 continue; 700 701 Instruction *Replacement = nullptr; 702 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 703 if (CatchSwitch->unwindsToCaller()) { 704 Value *UnwindDestToken; 705 if (auto *ParentPad = 706 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 707 // This catchswitch is nested inside another funclet. If that 708 // funclet has an unwind destination within the inlinee, then 709 // unwinding out of this catchswitch would be UB. Rewriting this 710 // catchswitch to unwind to the inlined invoke's unwind dest would 711 // give the parent funclet multiple unwind destinations, which is 712 // something that subsequent EH table generation can't handle and 713 // that the veirifer rejects. So when we see such a call, leave it 714 // as "unwind to caller". 715 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 716 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 717 continue; 718 } else { 719 // This catchswitch has no parent to inherit constraints from, and 720 // none of its descendants can have an unwind edge that exits it and 721 // targets another funclet in the inlinee. It may or may not have a 722 // descendant that definitively has an unwind to caller. In either 723 // case, we'll have to assume that any unwinds out of it may need to 724 // be routed to the caller, so treat it as though it has a definitive 725 // unwind to caller. 726 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 727 } 728 auto *NewCatchSwitch = CatchSwitchInst::Create( 729 CatchSwitch->getParentPad(), UnwindDest, 730 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 731 CatchSwitch); 732 for (BasicBlock *PadBB : CatchSwitch->handlers()) 733 NewCatchSwitch->addHandler(PadBB); 734 // Propagate info for the old catchswitch over to the new one in 735 // the unwind map. This also serves to short-circuit any subsequent 736 // checks for the unwind dest of this catchswitch, which would get 737 // confused if they found the outer handler in the callee. 738 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 739 Replacement = NewCatchSwitch; 740 } 741 } else if (!isa<FuncletPadInst>(I)) { 742 llvm_unreachable("unexpected EHPad!"); 743 } 744 745 if (Replacement) { 746 Replacement->takeName(I); 747 I->replaceAllUsesWith(Replacement); 748 I->eraseFromParent(); 749 UpdatePHINodes(&*BB); 750 } 751 } 752 753 if (InlinedCodeInfo.ContainsCalls) 754 for (Function::iterator BB = FirstNewBlock->getIterator(), 755 E = Caller->end(); 756 BB != E; ++BB) 757 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 758 &*BB, UnwindDest, &FuncletUnwindMap)) 759 // Update any PHI nodes in the exceptional block to indicate that there 760 // is now a new entry in them. 761 UpdatePHINodes(NewBB); 762 763 // Now that everything is happy, we have one final detail. The PHI nodes in 764 // the exception destination block still have entries due to the original 765 // invoke instruction. Eliminate these entries (which might even delete the 766 // PHI node) now. 767 UnwindDest->removePredecessor(InvokeBB); 768 } 769 770 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata, 771 /// that metadata should be propagated to all memory-accessing cloned 772 /// instructions. 773 static void PropagateParallelLoopAccessMetadata(CallSite CS, 774 ValueToValueMapTy &VMap) { 775 MDNode *M = 776 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 777 if (!M) 778 return; 779 780 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 781 VMI != VMIE; ++VMI) { 782 if (!VMI->second) 783 continue; 784 785 Instruction *NI = dyn_cast<Instruction>(VMI->second); 786 if (!NI) 787 continue; 788 789 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) { 790 M = MDNode::concatenate(PM, M); 791 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 792 } else if (NI->mayReadOrWriteMemory()) { 793 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 794 } 795 } 796 } 797 798 /// When inlining a function that contains noalias scope metadata, 799 /// this metadata needs to be cloned so that the inlined blocks 800 /// have different "unique scopes" at every call site. Were this not done, then 801 /// aliasing scopes from a function inlined into a caller multiple times could 802 /// not be differentiated (and this would lead to miscompiles because the 803 /// non-aliasing property communicated by the metadata could have 804 /// call-site-specific control dependencies). 805 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 806 const Function *CalledFunc = CS.getCalledFunction(); 807 SetVector<const MDNode *> MD; 808 809 // Note: We could only clone the metadata if it is already used in the 810 // caller. I'm omitting that check here because it might confuse 811 // inter-procedural alias analysis passes. We can revisit this if it becomes 812 // an efficiency or overhead problem. 813 814 for (const BasicBlock &I : *CalledFunc) 815 for (const Instruction &J : I) { 816 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope)) 817 MD.insert(M); 818 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias)) 819 MD.insert(M); 820 } 821 822 if (MD.empty()) 823 return; 824 825 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 826 // the set. 827 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 828 while (!Queue.empty()) { 829 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 830 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 831 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 832 if (MD.insert(M1)) 833 Queue.push_back(M1); 834 } 835 836 // Now we have a complete set of all metadata in the chains used to specify 837 // the noalias scopes and the lists of those scopes. 838 SmallVector<TempMDTuple, 16> DummyNodes; 839 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap; 840 for (const MDNode *I : MD) { 841 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None)); 842 MDMap[I].reset(DummyNodes.back().get()); 843 } 844 845 // Create new metadata nodes to replace the dummy nodes, replacing old 846 // metadata references with either a dummy node or an already-created new 847 // node. 848 for (const MDNode *I : MD) { 849 SmallVector<Metadata *, 4> NewOps; 850 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) { 851 const Metadata *V = I->getOperand(i); 852 if (const MDNode *M = dyn_cast<MDNode>(V)) 853 NewOps.push_back(MDMap[M]); 854 else 855 NewOps.push_back(const_cast<Metadata *>(V)); 856 } 857 858 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps); 859 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 860 assert(TempM->isTemporary() && "Expected temporary node"); 861 862 TempM->replaceAllUsesWith(NewM); 863 } 864 865 // Now replace the metadata in the new inlined instructions with the 866 // repacements from the map. 867 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 868 VMI != VMIE; ++VMI) { 869 if (!VMI->second) 870 continue; 871 872 Instruction *NI = dyn_cast<Instruction>(VMI->second); 873 if (!NI) 874 continue; 875 876 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 877 MDNode *NewMD = MDMap[M]; 878 // If the call site also had alias scope metadata (a list of scopes to 879 // which instructions inside it might belong), propagate those scopes to 880 // the inlined instructions. 881 if (MDNode *CSM = 882 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 883 NewMD = MDNode::concatenate(NewMD, CSM); 884 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 885 } else if (NI->mayReadOrWriteMemory()) { 886 if (MDNode *M = 887 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 888 NI->setMetadata(LLVMContext::MD_alias_scope, M); 889 } 890 891 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 892 MDNode *NewMD = MDMap[M]; 893 // If the call site also had noalias metadata (a list of scopes with 894 // which instructions inside it don't alias), propagate those scopes to 895 // the inlined instructions. 896 if (MDNode *CSM = 897 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 898 NewMD = MDNode::concatenate(NewMD, CSM); 899 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 900 } else if (NI->mayReadOrWriteMemory()) { 901 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 902 NI->setMetadata(LLVMContext::MD_noalias, M); 903 } 904 } 905 } 906 907 /// If the inlined function has noalias arguments, 908 /// then add new alias scopes for each noalias argument, tag the mapped noalias 909 /// parameters with noalias metadata specifying the new scope, and tag all 910 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 911 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 912 const DataLayout &DL, AAResults *CalleeAAR) { 913 if (!EnableNoAliasConversion) 914 return; 915 916 const Function *CalledFunc = CS.getCalledFunction(); 917 SmallVector<const Argument *, 4> NoAliasArgs; 918 919 for (const Argument &Arg : CalledFunc->args()) 920 if (Arg.hasNoAliasAttr() && !Arg.use_empty()) 921 NoAliasArgs.push_back(&Arg); 922 923 if (NoAliasArgs.empty()) 924 return; 925 926 // To do a good job, if a noalias variable is captured, we need to know if 927 // the capture point dominates the particular use we're considering. 928 DominatorTree DT; 929 DT.recalculate(const_cast<Function&>(*CalledFunc)); 930 931 // noalias indicates that pointer values based on the argument do not alias 932 // pointer values which are not based on it. So we add a new "scope" for each 933 // noalias function argument. Accesses using pointers based on that argument 934 // become part of that alias scope, accesses using pointers not based on that 935 // argument are tagged as noalias with that scope. 936 937 DenseMap<const Argument *, MDNode *> NewScopes; 938 MDBuilder MDB(CalledFunc->getContext()); 939 940 // Create a new scope domain for this function. 941 MDNode *NewDomain = 942 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 943 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 944 const Argument *A = NoAliasArgs[i]; 945 946 std::string Name = CalledFunc->getName(); 947 if (A->hasName()) { 948 Name += ": %"; 949 Name += A->getName(); 950 } else { 951 Name += ": argument "; 952 Name += utostr(i); 953 } 954 955 // Note: We always create a new anonymous root here. This is true regardless 956 // of the linkage of the callee because the aliasing "scope" is not just a 957 // property of the callee, but also all control dependencies in the caller. 958 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 959 NewScopes.insert(std::make_pair(A, NewScope)); 960 } 961 962 // Iterate over all new instructions in the map; for all memory-access 963 // instructions, add the alias scope metadata. 964 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 965 VMI != VMIE; ++VMI) { 966 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 967 if (!VMI->second) 968 continue; 969 970 Instruction *NI = dyn_cast<Instruction>(VMI->second); 971 if (!NI) 972 continue; 973 974 bool IsArgMemOnlyCall = false, IsFuncCall = false; 975 SmallVector<const Value *, 2> PtrArgs; 976 977 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 978 PtrArgs.push_back(LI->getPointerOperand()); 979 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 980 PtrArgs.push_back(SI->getPointerOperand()); 981 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 982 PtrArgs.push_back(VAAI->getPointerOperand()); 983 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 984 PtrArgs.push_back(CXI->getPointerOperand()); 985 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 986 PtrArgs.push_back(RMWI->getPointerOperand()); 987 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 988 // If we know that the call does not access memory, then we'll still 989 // know that about the inlined clone of this call site, and we don't 990 // need to add metadata. 991 if (ICS.doesNotAccessMemory()) 992 continue; 993 994 IsFuncCall = true; 995 if (CalleeAAR) { 996 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); 997 if (MRB == FMRB_OnlyAccessesArgumentPointees || 998 MRB == FMRB_OnlyReadsArgumentPointees) 999 IsArgMemOnlyCall = true; 1000 } 1001 1002 for (Value *Arg : ICS.args()) { 1003 // We need to check the underlying objects of all arguments, not just 1004 // the pointer arguments, because we might be passing pointers as 1005 // integers, etc. 1006 // However, if we know that the call only accesses pointer arguments, 1007 // then we only need to check the pointer arguments. 1008 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy()) 1009 continue; 1010 1011 PtrArgs.push_back(Arg); 1012 } 1013 } 1014 1015 // If we found no pointers, then this instruction is not suitable for 1016 // pairing with an instruction to receive aliasing metadata. 1017 // However, if this is a call, this we might just alias with none of the 1018 // noalias arguments. 1019 if (PtrArgs.empty() && !IsFuncCall) 1020 continue; 1021 1022 // It is possible that there is only one underlying object, but you 1023 // need to go through several PHIs to see it, and thus could be 1024 // repeated in the Objects list. 1025 SmallPtrSet<const Value *, 4> ObjSet; 1026 SmallVector<Metadata *, 4> Scopes, NoAliases; 1027 1028 SmallSetVector<const Argument *, 4> NAPtrArgs; 1029 for (const Value *V : PtrArgs) { 1030 SmallVector<Value *, 4> Objects; 1031 GetUnderlyingObjects(const_cast<Value*>(V), 1032 Objects, DL, /* LI = */ nullptr); 1033 1034 for (Value *O : Objects) 1035 ObjSet.insert(O); 1036 } 1037 1038 // Figure out if we're derived from anything that is not a noalias 1039 // argument. 1040 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 1041 for (const Value *V : ObjSet) { 1042 // Is this value a constant that cannot be derived from any pointer 1043 // value (we need to exclude constant expressions, for example, that 1044 // are formed from arithmetic on global symbols). 1045 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1046 isa<ConstantPointerNull>(V) || 1047 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1048 if (IsNonPtrConst) 1049 continue; 1050 1051 // If this is anything other than a noalias argument, then we cannot 1052 // completely describe the aliasing properties using alias.scope 1053 // metadata (and, thus, won't add any). 1054 if (const Argument *A = dyn_cast<Argument>(V)) { 1055 if (!A->hasNoAliasAttr()) 1056 UsesAliasingPtr = true; 1057 } else { 1058 UsesAliasingPtr = true; 1059 } 1060 1061 // If this is not some identified function-local object (which cannot 1062 // directly alias a noalias argument), or some other argument (which, 1063 // by definition, also cannot alias a noalias argument), then we could 1064 // alias a noalias argument that has been captured). 1065 if (!isa<Argument>(V) && 1066 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 1067 CanDeriveViaCapture = true; 1068 } 1069 1070 // A function call can always get captured noalias pointers (via other 1071 // parameters, globals, etc.). 1072 if (IsFuncCall && !IsArgMemOnlyCall) 1073 CanDeriveViaCapture = true; 1074 1075 // First, we want to figure out all of the sets with which we definitely 1076 // don't alias. Iterate over all noalias set, and add those for which: 1077 // 1. The noalias argument is not in the set of objects from which we 1078 // definitely derive. 1079 // 2. The noalias argument has not yet been captured. 1080 // An arbitrary function that might load pointers could see captured 1081 // noalias arguments via other noalias arguments or globals, and so we 1082 // must always check for prior capture. 1083 for (const Argument *A : NoAliasArgs) { 1084 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 1085 // It might be tempting to skip the 1086 // PointerMayBeCapturedBefore check if 1087 // A->hasNoCaptureAttr() is true, but this is 1088 // incorrect because nocapture only guarantees 1089 // that no copies outlive the function, not 1090 // that the value cannot be locally captured. 1091 !PointerMayBeCapturedBefore(A, 1092 /* ReturnCaptures */ false, 1093 /* StoreCaptures */ false, I, &DT))) 1094 NoAliases.push_back(NewScopes[A]); 1095 } 1096 1097 if (!NoAliases.empty()) 1098 NI->setMetadata(LLVMContext::MD_noalias, 1099 MDNode::concatenate( 1100 NI->getMetadata(LLVMContext::MD_noalias), 1101 MDNode::get(CalledFunc->getContext(), NoAliases))); 1102 1103 // Next, we want to figure out all of the sets to which we might belong. 1104 // We might belong to a set if the noalias argument is in the set of 1105 // underlying objects. If there is some non-noalias argument in our list 1106 // of underlying objects, then we cannot add a scope because the fact 1107 // that some access does not alias with any set of our noalias arguments 1108 // cannot itself guarantee that it does not alias with this access 1109 // (because there is some pointer of unknown origin involved and the 1110 // other access might also depend on this pointer). We also cannot add 1111 // scopes to arbitrary functions unless we know they don't access any 1112 // non-parameter pointer-values. 1113 bool CanAddScopes = !UsesAliasingPtr; 1114 if (CanAddScopes && IsFuncCall) 1115 CanAddScopes = IsArgMemOnlyCall; 1116 1117 if (CanAddScopes) 1118 for (const Argument *A : NoAliasArgs) { 1119 if (ObjSet.count(A)) 1120 Scopes.push_back(NewScopes[A]); 1121 } 1122 1123 if (!Scopes.empty()) 1124 NI->setMetadata( 1125 LLVMContext::MD_alias_scope, 1126 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1127 MDNode::get(CalledFunc->getContext(), Scopes))); 1128 } 1129 } 1130 } 1131 1132 /// If the inlined function has non-byval align arguments, then 1133 /// add @llvm.assume-based alignment assumptions to preserve this information. 1134 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 1135 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1136 return; 1137 1138 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller()); 1139 auto &DL = CS.getCaller()->getParent()->getDataLayout(); 1140 1141 // To avoid inserting redundant assumptions, we should check for assumptions 1142 // already in the caller. To do this, we might need a DT of the caller. 1143 DominatorTree DT; 1144 bool DTCalculated = false; 1145 1146 Function *CalledFunc = CS.getCalledFunction(); 1147 for (Argument &Arg : CalledFunc->args()) { 1148 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0; 1149 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) { 1150 if (!DTCalculated) { 1151 DT.recalculate(*CS.getCaller()); 1152 DTCalculated = true; 1153 } 1154 1155 // If we can already prove the asserted alignment in the context of the 1156 // caller, then don't bother inserting the assumption. 1157 Value *ArgVal = CS.getArgument(Arg.getArgNo()); 1158 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align) 1159 continue; 1160 1161 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction()) 1162 .CreateAlignmentAssumption(DL, ArgVal, Align); 1163 AC->registerAssumption(NewAsmp); 1164 } 1165 } 1166 } 1167 1168 /// Once we have cloned code over from a callee into the caller, 1169 /// update the specified callgraph to reflect the changes we made. 1170 /// Note that it's possible that not all code was copied over, so only 1171 /// some edges of the callgraph may remain. 1172 static void UpdateCallGraphAfterInlining(CallSite CS, 1173 Function::iterator FirstNewBlock, 1174 ValueToValueMapTy &VMap, 1175 InlineFunctionInfo &IFI) { 1176 CallGraph &CG = *IFI.CG; 1177 const Function *Caller = CS.getCaller(); 1178 const Function *Callee = CS.getCalledFunction(); 1179 CallGraphNode *CalleeNode = CG[Callee]; 1180 CallGraphNode *CallerNode = CG[Caller]; 1181 1182 // Since we inlined some uninlined call sites in the callee into the caller, 1183 // add edges from the caller to all of the callees of the callee. 1184 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 1185 1186 // Consider the case where CalleeNode == CallerNode. 1187 CallGraphNode::CalledFunctionsVector CallCache; 1188 if (CalleeNode == CallerNode) { 1189 CallCache.assign(I, E); 1190 I = CallCache.begin(); 1191 E = CallCache.end(); 1192 } 1193 1194 for (; I != E; ++I) { 1195 const Value *OrigCall = I->first; 1196 1197 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 1198 // Only copy the edge if the call was inlined! 1199 if (VMI == VMap.end() || VMI->second == nullptr) 1200 continue; 1201 1202 // If the call was inlined, but then constant folded, there is no edge to 1203 // add. Check for this case. 1204 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 1205 if (!NewCall) 1206 continue; 1207 1208 // We do not treat intrinsic calls like real function calls because we 1209 // expect them to become inline code; do not add an edge for an intrinsic. 1210 CallSite CS = CallSite(NewCall); 1211 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic()) 1212 continue; 1213 1214 // Remember that this call site got inlined for the client of 1215 // InlineFunction. 1216 IFI.InlinedCalls.push_back(NewCall); 1217 1218 // It's possible that inlining the callsite will cause it to go from an 1219 // indirect to a direct call by resolving a function pointer. If this 1220 // happens, set the callee of the new call site to a more precise 1221 // destination. This can also happen if the call graph node of the caller 1222 // was just unnecessarily imprecise. 1223 if (!I->second->getFunction()) 1224 if (Function *F = CallSite(NewCall).getCalledFunction()) { 1225 // Indirect call site resolved to direct call. 1226 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 1227 1228 continue; 1229 } 1230 1231 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 1232 } 1233 1234 // Update the call graph by deleting the edge from Callee to Caller. We must 1235 // do this after the loop above in case Caller and Callee are the same. 1236 CallerNode->removeCallEdgeFor(CS); 1237 } 1238 1239 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 1240 BasicBlock *InsertBlock, 1241 InlineFunctionInfo &IFI) { 1242 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 1243 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1244 1245 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); 1246 1247 // Always generate a memcpy of alignment 1 here because we don't know 1248 // the alignment of the src pointer. Other optimizations can infer 1249 // better alignment. 1250 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1); 1251 } 1252 1253 /// When inlining a call site that has a byval argument, 1254 /// we have to make the implicit memcpy explicit by adding it. 1255 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 1256 const Function *CalledFunc, 1257 InlineFunctionInfo &IFI, 1258 unsigned ByValAlignment) { 1259 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 1260 Type *AggTy = ArgTy->getElementType(); 1261 1262 Function *Caller = TheCall->getFunction(); 1263 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1264 1265 // If the called function is readonly, then it could not mutate the caller's 1266 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1267 // temporary. 1268 if (CalledFunc->onlyReadsMemory()) { 1269 // If the byval argument has a specified alignment that is greater than the 1270 // passed in pointer, then we either have to round up the input pointer or 1271 // give up on this transformation. 1272 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 1273 return Arg; 1274 1275 AssumptionCache *AC = 1276 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 1277 1278 // If the pointer is already known to be sufficiently aligned, or if we can 1279 // round it up to a larger alignment, then we don't need a temporary. 1280 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >= 1281 ByValAlignment) 1282 return Arg; 1283 1284 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1285 // for code quality, but rarely happens and is required for correctness. 1286 } 1287 1288 // Create the alloca. If we have DataLayout, use nice alignment. 1289 unsigned Align = DL.getPrefTypeAlignment(AggTy); 1290 1291 // If the byval had an alignment specified, we *must* use at least that 1292 // alignment, as it is required by the byval argument (and uses of the 1293 // pointer inside the callee). 1294 Align = std::max(Align, ByValAlignment); 1295 1296 Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(), 1297 nullptr, Align, Arg->getName(), 1298 &*Caller->begin()->begin()); 1299 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1300 1301 // Uses of the argument in the function should use our new alloca 1302 // instead. 1303 return NewAlloca; 1304 } 1305 1306 // Check whether this Value is used by a lifetime intrinsic. 1307 static bool isUsedByLifetimeMarker(Value *V) { 1308 for (User *U : V->users()) { 1309 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1310 switch (II->getIntrinsicID()) { 1311 default: break; 1312 case Intrinsic::lifetime_start: 1313 case Intrinsic::lifetime_end: 1314 return true; 1315 } 1316 } 1317 } 1318 return false; 1319 } 1320 1321 // Check whether the given alloca already has 1322 // lifetime.start or lifetime.end intrinsics. 1323 static bool hasLifetimeMarkers(AllocaInst *AI) { 1324 Type *Ty = AI->getType(); 1325 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1326 Ty->getPointerAddressSpace()); 1327 if (Ty == Int8PtrTy) 1328 return isUsedByLifetimeMarker(AI); 1329 1330 // Do a scan to find all the casts to i8*. 1331 for (User *U : AI->users()) { 1332 if (U->getType() != Int8PtrTy) continue; 1333 if (U->stripPointerCasts() != AI) continue; 1334 if (isUsedByLifetimeMarker(U)) 1335 return true; 1336 } 1337 return false; 1338 } 1339 1340 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1341 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1342 /// cannot be static. 1343 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1344 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1345 } 1346 1347 /// Update inlined instructions' line numbers to 1348 /// to encode location where these instructions are inlined. 1349 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 1350 Instruction *TheCall, bool CalleeHasDebugInfo) { 1351 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1352 if (!TheCallDL) 1353 return; 1354 1355 auto &Ctx = Fn->getContext(); 1356 DILocation *InlinedAtNode = TheCallDL; 1357 1358 // Create a unique call site, not to be confused with any other call from the 1359 // same location. 1360 InlinedAtNode = DILocation::getDistinct( 1361 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 1362 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 1363 1364 // Cache the inlined-at nodes as they're built so they are reused, without 1365 // this every instruction's inlined-at chain would become distinct from each 1366 // other. 1367 DenseMap<const MDNode *, MDNode *> IANodes; 1368 1369 for (; FI != Fn->end(); ++FI) { 1370 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1371 BI != BE; ++BI) { 1372 if (DebugLoc DL = BI->getDebugLoc()) { 1373 auto IA = DebugLoc::appendInlinedAt(DL, InlinedAtNode, BI->getContext(), 1374 IANodes); 1375 auto IDL = DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), IA); 1376 BI->setDebugLoc(IDL); 1377 continue; 1378 } 1379 1380 if (CalleeHasDebugInfo) 1381 continue; 1382 1383 // If the inlined instruction has no line number, make it look as if it 1384 // originates from the call location. This is important for 1385 // ((__always_inline__, __nodebug__)) functions which must use caller 1386 // location for all instructions in their function body. 1387 1388 // Don't update static allocas, as they may get moved later. 1389 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1390 if (allocaWouldBeStaticInEntry(AI)) 1391 continue; 1392 1393 BI->setDebugLoc(TheCallDL); 1394 } 1395 } 1396 } 1397 1398 /// Update the block frequencies of the caller after a callee has been inlined. 1399 /// 1400 /// Each block cloned into the caller has its block frequency scaled by the 1401 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 1402 /// callee's entry block gets the same frequency as the callsite block and the 1403 /// relative frequencies of all cloned blocks remain the same after cloning. 1404 static void updateCallerBFI(BasicBlock *CallSiteBlock, 1405 const ValueToValueMapTy &VMap, 1406 BlockFrequencyInfo *CallerBFI, 1407 BlockFrequencyInfo *CalleeBFI, 1408 const BasicBlock &CalleeEntryBlock) { 1409 SmallPtrSet<BasicBlock *, 16> ClonedBBs; 1410 for (auto const &Entry : VMap) { 1411 if (!isa<BasicBlock>(Entry.first) || !Entry.second) 1412 continue; 1413 auto *OrigBB = cast<BasicBlock>(Entry.first); 1414 auto *ClonedBB = cast<BasicBlock>(Entry.second); 1415 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency(); 1416 if (!ClonedBBs.insert(ClonedBB).second) { 1417 // Multiple blocks in the callee might get mapped to one cloned block in 1418 // the caller since we prune the callee as we clone it. When that happens, 1419 // we want to use the maximum among the original blocks' frequencies. 1420 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency(); 1421 if (NewFreq > Freq) 1422 Freq = NewFreq; 1423 } 1424 CallerBFI->setBlockFreq(ClonedBB, Freq); 1425 } 1426 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock)); 1427 CallerBFI->setBlockFreqAndScale( 1428 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(), 1429 ClonedBBs); 1430 } 1431 1432 /// Update the branch metadata for cloned call instructions. 1433 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, 1434 const Optional<uint64_t> &CalleeEntryCount, 1435 const Instruction *TheCall, 1436 ProfileSummaryInfo *PSI, 1437 BlockFrequencyInfo *CallerBFI) { 1438 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.getValue() < 1) 1439 return; 1440 Optional<uint64_t> CallSiteCount = 1441 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None; 1442 uint64_t CallCount = 1443 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0, 1444 CalleeEntryCount.getValue()); 1445 1446 for (auto const &Entry : VMap) 1447 if (isa<CallInst>(Entry.first)) 1448 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) 1449 CI->updateProfWeight(CallCount, CalleeEntryCount.getValue()); 1450 for (BasicBlock &BB : *Callee) 1451 // No need to update the callsite if it is pruned during inlining. 1452 if (VMap.count(&BB)) 1453 for (Instruction &I : BB) 1454 if (CallInst *CI = dyn_cast<CallInst>(&I)) 1455 CI->updateProfWeight(CalleeEntryCount.getValue() - CallCount, 1456 CalleeEntryCount.getValue()); 1457 } 1458 1459 /// Update the entry count of callee after inlining. 1460 /// 1461 /// The callsite's block count is subtracted from the callee's function entry 1462 /// count. 1463 static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB, 1464 Instruction *CallInst, Function *Callee, 1465 ProfileSummaryInfo *PSI) { 1466 // If the callee has a original count of N, and the estimated count of 1467 // callsite is M, the new callee count is set to N - M. M is estimated from 1468 // the caller's entry count, its entry block frequency and the block frequency 1469 // of the callsite. 1470 Optional<uint64_t> CalleeCount = Callee->getEntryCount(); 1471 if (!CalleeCount.hasValue() || !PSI) 1472 return; 1473 Optional<uint64_t> CallCount = PSI->getProfileCount(CallInst, CallerBFI); 1474 if (!CallCount.hasValue()) 1475 return; 1476 // Since CallSiteCount is an estimate, it could exceed the original callee 1477 // count and has to be set to 0. 1478 if (CallCount.getValue() > CalleeCount.getValue()) 1479 Callee->setEntryCount(0); 1480 else 1481 Callee->setEntryCount(CalleeCount.getValue() - CallCount.getValue()); 1482 } 1483 1484 /// This function inlines the called function into the basic block of the 1485 /// caller. This returns false if it is not possible to inline this call. 1486 /// The program is still in a well defined state if this occurs though. 1487 /// 1488 /// Note that this only does one level of inlining. For example, if the 1489 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1490 /// exists in the instruction stream. Similarly this will inline a recursive 1491 /// function by one level. 1492 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 1493 AAResults *CalleeAAR, bool InsertLifetime) { 1494 Instruction *TheCall = CS.getInstruction(); 1495 assert(TheCall->getParent() && TheCall->getFunction() 1496 && "Instruction not in function!"); 1497 1498 // If IFI has any state in it, zap it before we fill it in. 1499 IFI.reset(); 1500 1501 Function *CalledFunc = CS.getCalledFunction(); 1502 if (!CalledFunc || // Can't inline external function or indirect 1503 CalledFunc->isDeclaration() || // call, or call to a vararg function! 1504 CalledFunc->getFunctionType()->isVarArg()) return false; 1505 1506 // The inliner does not know how to inline through calls with operand bundles 1507 // in general ... 1508 if (CS.hasOperandBundles()) { 1509 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { 1510 uint32_t Tag = CS.getOperandBundleAt(i).getTagID(); 1511 // ... but it knows how to inline through "deopt" operand bundles ... 1512 if (Tag == LLVMContext::OB_deopt) 1513 continue; 1514 // ... and "funclet" operand bundles. 1515 if (Tag == LLVMContext::OB_funclet) 1516 continue; 1517 1518 return false; 1519 } 1520 } 1521 1522 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1523 // calls that we inline. 1524 bool MarkNoUnwind = CS.doesNotThrow(); 1525 1526 BasicBlock *OrigBB = TheCall->getParent(); 1527 Function *Caller = OrigBB->getParent(); 1528 1529 // GC poses two hazards to inlining, which only occur when the callee has GC: 1530 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1531 // caller. 1532 // 2. If the caller has a differing GC, it is invalid to inline. 1533 if (CalledFunc->hasGC()) { 1534 if (!Caller->hasGC()) 1535 Caller->setGC(CalledFunc->getGC()); 1536 else if (CalledFunc->getGC() != Caller->getGC()) 1537 return false; 1538 } 1539 1540 // Get the personality function from the callee if it contains a landing pad. 1541 Constant *CalledPersonality = 1542 CalledFunc->hasPersonalityFn() 1543 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1544 : nullptr; 1545 1546 // Find the personality function used by the landing pads of the caller. If it 1547 // exists, then check to see that it matches the personality function used in 1548 // the callee. 1549 Constant *CallerPersonality = 1550 Caller->hasPersonalityFn() 1551 ? Caller->getPersonalityFn()->stripPointerCasts() 1552 : nullptr; 1553 if (CalledPersonality) { 1554 if (!CallerPersonality) 1555 Caller->setPersonalityFn(CalledPersonality); 1556 // If the personality functions match, then we can perform the 1557 // inlining. Otherwise, we can't inline. 1558 // TODO: This isn't 100% true. Some personality functions are proper 1559 // supersets of others and can be used in place of the other. 1560 else if (CalledPersonality != CallerPersonality) 1561 return false; 1562 } 1563 1564 // We need to figure out which funclet the callsite was in so that we may 1565 // properly nest the callee. 1566 Instruction *CallSiteEHPad = nullptr; 1567 if (CallerPersonality) { 1568 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1569 if (isFuncletEHPersonality(Personality)) { 1570 Optional<OperandBundleUse> ParentFunclet = 1571 CS.getOperandBundle(LLVMContext::OB_funclet); 1572 if (ParentFunclet) 1573 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1574 1575 // OK, the inlining site is legal. What about the target function? 1576 1577 if (CallSiteEHPad) { 1578 if (Personality == EHPersonality::MSVC_CXX) { 1579 // The MSVC personality cannot tolerate catches getting inlined into 1580 // cleanup funclets. 1581 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1582 // Ok, the call site is within a cleanuppad. Let's check the callee 1583 // for catchpads. 1584 for (const BasicBlock &CalledBB : *CalledFunc) { 1585 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1586 return false; 1587 } 1588 } 1589 } else if (isAsynchronousEHPersonality(Personality)) { 1590 // SEH is even less tolerant, there may not be any sort of exceptional 1591 // funclet in the callee. 1592 for (const BasicBlock &CalledBB : *CalledFunc) { 1593 if (CalledBB.isEHPad()) 1594 return false; 1595 } 1596 } 1597 } 1598 } 1599 } 1600 1601 // Determine if we are dealing with a call in an EHPad which does not unwind 1602 // to caller. 1603 bool EHPadForCallUnwindsLocally = false; 1604 if (CallSiteEHPad && CS.isCall()) { 1605 UnwindDestMemoTy FuncletUnwindMap; 1606 Value *CallSiteUnwindDestToken = 1607 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 1608 1609 EHPadForCallUnwindsLocally = 1610 CallSiteUnwindDestToken && 1611 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 1612 } 1613 1614 // Get an iterator to the last basic block in the function, which will have 1615 // the new function inlined after it. 1616 Function::iterator LastBlock = --Caller->end(); 1617 1618 // Make sure to capture all of the return instructions from the cloned 1619 // function. 1620 SmallVector<ReturnInst*, 8> Returns; 1621 ClonedCodeInfo InlinedFunctionInfo; 1622 Function::iterator FirstNewBlock; 1623 1624 { // Scope to destroy VMap after cloning. 1625 ValueToValueMapTy VMap; 1626 // Keep a list of pair (dst, src) to emit byval initializations. 1627 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 1628 1629 auto &DL = Caller->getParent()->getDataLayout(); 1630 1631 assert(CalledFunc->arg_size() == CS.arg_size() && 1632 "No varargs calls can be inlined!"); 1633 1634 // Calculate the vector of arguments to pass into the function cloner, which 1635 // matches up the formal to the actual argument values. 1636 CallSite::arg_iterator AI = CS.arg_begin(); 1637 unsigned ArgNo = 0; 1638 for (Function::arg_iterator I = CalledFunc->arg_begin(), 1639 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1640 Value *ActualArg = *AI; 1641 1642 // When byval arguments actually inlined, we need to make the copy implied 1643 // by them explicit. However, we don't do this if the callee is readonly 1644 // or readnone, because the copy would be unneeded: the callee doesn't 1645 // modify the struct. 1646 if (CS.isByValArgument(ArgNo)) { 1647 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 1648 CalledFunc->getParamAlignment(ArgNo)); 1649 if (ActualArg != *AI) 1650 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 1651 } 1652 1653 VMap[&*I] = ActualArg; 1654 } 1655 1656 // Add alignment assumptions if necessary. We do this before the inlined 1657 // instructions are actually cloned into the caller so that we can easily 1658 // check what will be known at the start of the inlined code. 1659 AddAlignmentAssumptions(CS, IFI); 1660 1661 // We want the inliner to prune the code as it copies. We would LOVE to 1662 // have no dead or constant instructions leftover after inlining occurs 1663 // (which can happen, e.g., because an argument was constant), but we'll be 1664 // happy with whatever the cloner can do. 1665 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1666 /*ModuleLevelChanges=*/false, Returns, ".i", 1667 &InlinedFunctionInfo, TheCall); 1668 // Remember the first block that is newly cloned over. 1669 FirstNewBlock = LastBlock; ++FirstNewBlock; 1670 1671 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr) 1672 // Update the BFI of blocks cloned into the caller. 1673 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI, 1674 CalledFunc->front()); 1675 1676 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall, 1677 IFI.PSI, IFI.CallerBFI); 1678 // Update the profile count of callee. 1679 updateCalleeCount(IFI.CallerBFI, OrigBB, TheCall, CalledFunc, IFI.PSI); 1680 1681 // Inject byval arguments initialization. 1682 for (std::pair<Value*, Value*> &Init : ByValInit) 1683 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1684 &*FirstNewBlock, IFI); 1685 1686 Optional<OperandBundleUse> ParentDeopt = 1687 CS.getOperandBundle(LLVMContext::OB_deopt); 1688 if (ParentDeopt) { 1689 SmallVector<OperandBundleDef, 2> OpDefs; 1690 1691 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1692 Instruction *I = dyn_cast_or_null<Instruction>(VH); 1693 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef 1694 1695 OpDefs.clear(); 1696 1697 CallSite ICS(I); 1698 OpDefs.reserve(ICS.getNumOperandBundles()); 1699 1700 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) { 1701 auto ChildOB = ICS.getOperandBundleAt(i); 1702 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1703 // If the inlined call has other operand bundles, let them be 1704 OpDefs.emplace_back(ChildOB); 1705 continue; 1706 } 1707 1708 // It may be useful to separate this logic (of handling operand 1709 // bundles) out to a separate "policy" component if this gets crowded. 1710 // Prepend the parent's deoptimization continuation to the newly 1711 // inlined call's deoptimization continuation. 1712 std::vector<Value *> MergedDeoptArgs; 1713 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 1714 ChildOB.Inputs.size()); 1715 1716 MergedDeoptArgs.insert(MergedDeoptArgs.end(), 1717 ParentDeopt->Inputs.begin(), 1718 ParentDeopt->Inputs.end()); 1719 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(), 1720 ChildOB.Inputs.end()); 1721 1722 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 1723 } 1724 1725 Instruction *NewI = nullptr; 1726 if (isa<CallInst>(I)) 1727 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); 1728 else 1729 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); 1730 1731 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 1732 // this even if the call returns void. 1733 I->replaceAllUsesWith(NewI); 1734 1735 VH = nullptr; 1736 I->eraseFromParent(); 1737 } 1738 } 1739 1740 // Update the callgraph if requested. 1741 if (IFI.CG) 1742 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1743 1744 // For 'nodebug' functions, the associated DISubprogram is always null. 1745 // Conservatively avoid propagating the callsite debug location to 1746 // instructions inlined from a function whose DISubprogram is not null. 1747 fixupLineNumbers(Caller, FirstNewBlock, TheCall, 1748 CalledFunc->getSubprogram() != nullptr); 1749 1750 // Clone existing noalias metadata if necessary. 1751 CloneAliasScopeMetadata(CS, VMap); 1752 1753 // Add noalias metadata if necessary. 1754 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR); 1755 1756 // Propagate llvm.mem.parallel_loop_access if necessary. 1757 PropagateParallelLoopAccessMetadata(CS, VMap); 1758 1759 // Register any cloned assumptions. 1760 if (IFI.GetAssumptionCache) 1761 for (BasicBlock &NewBlock : 1762 make_range(FirstNewBlock->getIterator(), Caller->end())) 1763 for (Instruction &I : NewBlock) { 1764 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1765 if (II->getIntrinsicID() == Intrinsic::assume) 1766 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II); 1767 } 1768 } 1769 1770 // If there are any alloca instructions in the block that used to be the entry 1771 // block for the callee, move them to the entry block of the caller. First 1772 // calculate which instruction they should be inserted before. We insert the 1773 // instructions at the end of the current alloca list. 1774 { 1775 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1776 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1777 E = FirstNewBlock->end(); I != E; ) { 1778 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1779 if (!AI) continue; 1780 1781 // If the alloca is now dead, remove it. This often occurs due to code 1782 // specialization. 1783 if (AI->use_empty()) { 1784 AI->eraseFromParent(); 1785 continue; 1786 } 1787 1788 if (!allocaWouldBeStaticInEntry(AI)) 1789 continue; 1790 1791 // Keep track of the static allocas that we inline into the caller. 1792 IFI.StaticAllocas.push_back(AI); 1793 1794 // Scan for the block of allocas that we can move over, and move them 1795 // all at once. 1796 while (isa<AllocaInst>(I) && 1797 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 1798 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1799 ++I; 1800 } 1801 1802 // Transfer all of the allocas over in a block. Using splice means 1803 // that the instructions aren't removed from the symbol table, then 1804 // reinserted. 1805 Caller->getEntryBlock().getInstList().splice( 1806 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 1807 } 1808 // Move any dbg.declares describing the allocas into the entry basic block. 1809 DIBuilder DIB(*Caller->getParent()); 1810 for (auto &AI : IFI.StaticAllocas) 1811 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false); 1812 } 1813 1814 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 1815 if (InlinedFunctionInfo.ContainsCalls) { 1816 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1817 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1818 CallSiteTailKind = CI->getTailCallKind(); 1819 1820 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1821 ++BB) { 1822 for (Instruction &I : *BB) { 1823 CallInst *CI = dyn_cast<CallInst>(&I); 1824 if (!CI) 1825 continue; 1826 1827 if (Function *F = CI->getCalledFunction()) 1828 InlinedDeoptimizeCalls |= 1829 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 1830 1831 // We need to reduce the strength of any inlined tail calls. For 1832 // musttail, we have to avoid introducing potential unbounded stack 1833 // growth. For example, if functions 'f' and 'g' are mutually recursive 1834 // with musttail, we can inline 'g' into 'f' so long as we preserve 1835 // musttail on the cloned call to 'f'. If either the inlined call site 1836 // or the cloned call site is *not* musttail, the program already has 1837 // one frame of stack growth, so it's safe to remove musttail. Here is 1838 // a table of example transformations: 1839 // 1840 // f -> musttail g -> musttail f ==> f -> musttail f 1841 // f -> musttail g -> tail f ==> f -> tail f 1842 // f -> g -> musttail f ==> f -> f 1843 // f -> g -> tail f ==> f -> f 1844 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1845 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1846 CI->setTailCallKind(ChildTCK); 1847 InlinedMustTailCalls |= CI->isMustTailCall(); 1848 1849 // Calls inlined through a 'nounwind' call site should be marked 1850 // 'nounwind'. 1851 if (MarkNoUnwind) 1852 CI->setDoesNotThrow(); 1853 } 1854 } 1855 } 1856 1857 // Leave lifetime markers for the static alloca's, scoping them to the 1858 // function we just inlined. 1859 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1860 IRBuilder<> builder(&FirstNewBlock->front()); 1861 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1862 AllocaInst *AI = IFI.StaticAllocas[ai]; 1863 // Don't mark swifterror allocas. They can't have bitcast uses. 1864 if (AI->isSwiftError()) 1865 continue; 1866 1867 // If the alloca is already scoped to something smaller than the whole 1868 // function then there's no need to add redundant, less accurate markers. 1869 if (hasLifetimeMarkers(AI)) 1870 continue; 1871 1872 // Try to determine the size of the allocation. 1873 ConstantInt *AllocaSize = nullptr; 1874 if (ConstantInt *AIArraySize = 1875 dyn_cast<ConstantInt>(AI->getArraySize())) { 1876 auto &DL = Caller->getParent()->getDataLayout(); 1877 Type *AllocaType = AI->getAllocatedType(); 1878 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 1879 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1880 1881 // Don't add markers for zero-sized allocas. 1882 if (AllocaArraySize == 0) 1883 continue; 1884 1885 // Check that array size doesn't saturate uint64_t and doesn't 1886 // overflow when it's multiplied by type size. 1887 if (AllocaArraySize != std::numeric_limits<uint64_t>::max() && 1888 std::numeric_limits<uint64_t>::max() / AllocaArraySize >= 1889 AllocaTypeSize) { 1890 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1891 AllocaArraySize * AllocaTypeSize); 1892 } 1893 } 1894 1895 builder.CreateLifetimeStart(AI, AllocaSize); 1896 for (ReturnInst *RI : Returns) { 1897 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 1898 // call and a return. The return kills all local allocas. 1899 if (InlinedMustTailCalls && 1900 RI->getParent()->getTerminatingMustTailCall()) 1901 continue; 1902 if (InlinedDeoptimizeCalls && 1903 RI->getParent()->getTerminatingDeoptimizeCall()) 1904 continue; 1905 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1906 } 1907 } 1908 } 1909 1910 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1911 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1912 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1913 Module *M = Caller->getParent(); 1914 // Get the two intrinsics we care about. 1915 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1916 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1917 1918 // Insert the llvm.stacksave. 1919 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 1920 .CreateCall(StackSave, {}, "savedstack"); 1921 1922 // Insert a call to llvm.stackrestore before any return instructions in the 1923 // inlined function. 1924 for (ReturnInst *RI : Returns) { 1925 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 1926 // call and a return. The return will restore the stack pointer. 1927 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1928 continue; 1929 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 1930 continue; 1931 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1932 } 1933 } 1934 1935 // If we are inlining for an invoke instruction, we must make sure to rewrite 1936 // any call instructions into invoke instructions. This is sensitive to which 1937 // funclet pads were top-level in the inlinee, so must be done before 1938 // rewriting the "parent pad" links. 1939 if (auto *II = dyn_cast<InvokeInst>(TheCall)) { 1940 BasicBlock *UnwindDest = II->getUnwindDest(); 1941 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 1942 if (isa<LandingPadInst>(FirstNonPHI)) { 1943 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1944 } else { 1945 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1946 } 1947 } 1948 1949 // Update the lexical scopes of the new funclets and callsites. 1950 // Anything that had 'none' as its parent is now nested inside the callsite's 1951 // EHPad. 1952 1953 if (CallSiteEHPad) { 1954 for (Function::iterator BB = FirstNewBlock->getIterator(), 1955 E = Caller->end(); 1956 BB != E; ++BB) { 1957 // Add bundle operands to any top-level call sites. 1958 SmallVector<OperandBundleDef, 1> OpBundles; 1959 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { 1960 Instruction *I = &*BBI++; 1961 CallSite CS(I); 1962 if (!CS) 1963 continue; 1964 1965 // Skip call sites which are nounwind intrinsics. 1966 auto *CalledFn = 1967 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 1968 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow()) 1969 continue; 1970 1971 // Skip call sites which already have a "funclet" bundle. 1972 if (CS.getOperandBundle(LLVMContext::OB_funclet)) 1973 continue; 1974 1975 CS.getOperandBundlesAsDefs(OpBundles); 1976 OpBundles.emplace_back("funclet", CallSiteEHPad); 1977 1978 Instruction *NewInst; 1979 if (CS.isCall()) 1980 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); 1981 else 1982 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); 1983 NewInst->takeName(I); 1984 I->replaceAllUsesWith(NewInst); 1985 I->eraseFromParent(); 1986 1987 OpBundles.clear(); 1988 } 1989 1990 // It is problematic if the inlinee has a cleanupret which unwinds to 1991 // caller and we inline it into a call site which doesn't unwind but into 1992 // an EH pad that does. Such an edge must be dynamically unreachable. 1993 // As such, we replace the cleanupret with unreachable. 1994 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 1995 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 1996 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false); 1997 1998 Instruction *I = BB->getFirstNonPHI(); 1999 if (!I->isEHPad()) 2000 continue; 2001 2002 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 2003 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 2004 CatchSwitch->setParentPad(CallSiteEHPad); 2005 } else { 2006 auto *FPI = cast<FuncletPadInst>(I); 2007 if (isa<ConstantTokenNone>(FPI->getParentPad())) 2008 FPI->setParentPad(CallSiteEHPad); 2009 } 2010 } 2011 } 2012 2013 if (InlinedDeoptimizeCalls) { 2014 // We need to at least remove the deoptimizing returns from the Return set, 2015 // so that the control flow from those returns does not get merged into the 2016 // caller (but terminate it instead). If the caller's return type does not 2017 // match the callee's return type, we also need to change the return type of 2018 // the intrinsic. 2019 if (Caller->getReturnType() == TheCall->getType()) { 2020 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) { 2021 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 2022 }); 2023 Returns.erase(NewEnd, Returns.end()); 2024 } else { 2025 SmallVector<ReturnInst *, 8> NormalReturns; 2026 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 2027 Caller->getParent(), Intrinsic::experimental_deoptimize, 2028 {Caller->getReturnType()}); 2029 2030 for (ReturnInst *RI : Returns) { 2031 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 2032 if (!DeoptCall) { 2033 NormalReturns.push_back(RI); 2034 continue; 2035 } 2036 2037 // The calling convention on the deoptimize call itself may be bogus, 2038 // since the code we're inlining may have undefined behavior (and may 2039 // never actually execute at runtime); but all 2040 // @llvm.experimental.deoptimize declarations have to have the same 2041 // calling convention in a well-formed module. 2042 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 2043 NewDeoptIntrinsic->setCallingConv(CallingConv); 2044 auto *CurBB = RI->getParent(); 2045 RI->eraseFromParent(); 2046 2047 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(), 2048 DeoptCall->arg_end()); 2049 2050 SmallVector<OperandBundleDef, 1> OpBundles; 2051 DeoptCall->getOperandBundlesAsDefs(OpBundles); 2052 DeoptCall->eraseFromParent(); 2053 assert(!OpBundles.empty() && 2054 "Expected at least the deopt operand bundle"); 2055 2056 IRBuilder<> Builder(CurBB); 2057 CallInst *NewDeoptCall = 2058 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 2059 NewDeoptCall->setCallingConv(CallingConv); 2060 if (NewDeoptCall->getType()->isVoidTy()) 2061 Builder.CreateRetVoid(); 2062 else 2063 Builder.CreateRet(NewDeoptCall); 2064 } 2065 2066 // Leave behind the normal returns so we can merge control flow. 2067 std::swap(Returns, NormalReturns); 2068 } 2069 } 2070 2071 // Handle any inlined musttail call sites. In order for a new call site to be 2072 // musttail, the source of the clone and the inlined call site must have been 2073 // musttail. Therefore it's safe to return without merging control into the 2074 // phi below. 2075 if (InlinedMustTailCalls) { 2076 // Check if we need to bitcast the result of any musttail calls. 2077 Type *NewRetTy = Caller->getReturnType(); 2078 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 2079 2080 // Handle the returns preceded by musttail calls separately. 2081 SmallVector<ReturnInst *, 8> NormalReturns; 2082 for (ReturnInst *RI : Returns) { 2083 CallInst *ReturnedMustTail = 2084 RI->getParent()->getTerminatingMustTailCall(); 2085 if (!ReturnedMustTail) { 2086 NormalReturns.push_back(RI); 2087 continue; 2088 } 2089 if (!NeedBitCast) 2090 continue; 2091 2092 // Delete the old return and any preceding bitcast. 2093 BasicBlock *CurBB = RI->getParent(); 2094 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2095 RI->eraseFromParent(); 2096 if (OldCast) 2097 OldCast->eraseFromParent(); 2098 2099 // Insert a new bitcast and return with the right type. 2100 IRBuilder<> Builder(CurBB); 2101 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2102 } 2103 2104 // Leave behind the normal returns so we can merge control flow. 2105 std::swap(Returns, NormalReturns); 2106 } 2107 2108 // Now that all of the transforms on the inlined code have taken place but 2109 // before we splice the inlined code into the CFG and lose track of which 2110 // blocks were actually inlined, collect the call sites. We only do this if 2111 // call graph updates weren't requested, as those provide value handle based 2112 // tracking of inlined call sites instead. 2113 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) { 2114 // Otherwise just collect the raw call sites that were inlined. 2115 for (BasicBlock &NewBB : 2116 make_range(FirstNewBlock->getIterator(), Caller->end())) 2117 for (Instruction &I : NewBB) 2118 if (auto CS = CallSite(&I)) 2119 IFI.InlinedCallSites.push_back(CS); 2120 } 2121 2122 // If we cloned in _exactly one_ basic block, and if that block ends in a 2123 // return instruction, we splice the body of the inlined callee directly into 2124 // the calling basic block. 2125 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2126 // Move all of the instructions right before the call. 2127 OrigBB->getInstList().splice(TheCall->getIterator(), 2128 FirstNewBlock->getInstList(), 2129 FirstNewBlock->begin(), FirstNewBlock->end()); 2130 // Remove the cloned basic block. 2131 Caller->getBasicBlockList().pop_back(); 2132 2133 // If the call site was an invoke instruction, add a branch to the normal 2134 // destination. 2135 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2136 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 2137 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2138 } 2139 2140 // If the return instruction returned a value, replace uses of the call with 2141 // uses of the returned value. 2142 if (!TheCall->use_empty()) { 2143 ReturnInst *R = Returns[0]; 2144 if (TheCall == R->getReturnValue()) 2145 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2146 else 2147 TheCall->replaceAllUsesWith(R->getReturnValue()); 2148 } 2149 // Since we are now done with the Call/Invoke, we can delete it. 2150 TheCall->eraseFromParent(); 2151 2152 // Since we are now done with the return instruction, delete it also. 2153 Returns[0]->eraseFromParent(); 2154 2155 // We are now done with the inlining. 2156 return true; 2157 } 2158 2159 // Otherwise, we have the normal case, of more than one block to inline or 2160 // multiple return sites. 2161 2162 // We want to clone the entire callee function into the hole between the 2163 // "starter" and "ender" blocks. How we accomplish this depends on whether 2164 // this is an invoke instruction or a call instruction. 2165 BasicBlock *AfterCallBB; 2166 BranchInst *CreatedBranchToNormalDest = nullptr; 2167 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2168 2169 // Add an unconditional branch to make this look like the CallInst case... 2170 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 2171 2172 // Split the basic block. This guarantees that no PHI nodes will have to be 2173 // updated due to new incoming edges, and make the invoke case more 2174 // symmetric to the call case. 2175 AfterCallBB = 2176 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2177 CalledFunc->getName() + ".exit"); 2178 2179 } else { // It's a call 2180 // If this is a call instruction, we need to split the basic block that 2181 // the call lives in. 2182 // 2183 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(), 2184 CalledFunc->getName() + ".exit"); 2185 } 2186 2187 if (IFI.CallerBFI) { 2188 // Copy original BB's block frequency to AfterCallBB 2189 IFI.CallerBFI->setBlockFreq( 2190 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency()); 2191 } 2192 2193 // Change the branch that used to go to AfterCallBB to branch to the first 2194 // basic block of the inlined function. 2195 // 2196 TerminatorInst *Br = OrigBB->getTerminator(); 2197 assert(Br && Br->getOpcode() == Instruction::Br && 2198 "splitBasicBlock broken!"); 2199 Br->setOperand(0, &*FirstNewBlock); 2200 2201 // Now that the function is correct, make it a little bit nicer. In 2202 // particular, move the basic blocks inserted from the end of the function 2203 // into the space made by splitting the source basic block. 2204 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 2205 Caller->getBasicBlockList(), FirstNewBlock, 2206 Caller->end()); 2207 2208 // Handle all of the return instructions that we just cloned in, and eliminate 2209 // any users of the original call/invoke instruction. 2210 Type *RTy = CalledFunc->getReturnType(); 2211 2212 PHINode *PHI = nullptr; 2213 if (Returns.size() > 1) { 2214 // The PHI node should go at the front of the new basic block to merge all 2215 // possible incoming values. 2216 if (!TheCall->use_empty()) { 2217 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 2218 &AfterCallBB->front()); 2219 // Anything that used the result of the function call should now use the 2220 // PHI node as their operand. 2221 TheCall->replaceAllUsesWith(PHI); 2222 } 2223 2224 // Loop over all of the return instructions adding entries to the PHI node 2225 // as appropriate. 2226 if (PHI) { 2227 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2228 ReturnInst *RI = Returns[i]; 2229 assert(RI->getReturnValue()->getType() == PHI->getType() && 2230 "Ret value not consistent in function!"); 2231 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2232 } 2233 } 2234 2235 // Add a branch to the merge points and remove return instructions. 2236 DebugLoc Loc; 2237 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2238 ReturnInst *RI = Returns[i]; 2239 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2240 Loc = RI->getDebugLoc(); 2241 BI->setDebugLoc(Loc); 2242 RI->eraseFromParent(); 2243 } 2244 // We need to set the debug location to *somewhere* inside the 2245 // inlined function. The line number may be nonsensical, but the 2246 // instruction will at least be associated with the right 2247 // function. 2248 if (CreatedBranchToNormalDest) 2249 CreatedBranchToNormalDest->setDebugLoc(Loc); 2250 } else if (!Returns.empty()) { 2251 // Otherwise, if there is exactly one return value, just replace anything 2252 // using the return value of the call with the computed value. 2253 if (!TheCall->use_empty()) { 2254 if (TheCall == Returns[0]->getReturnValue()) 2255 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2256 else 2257 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 2258 } 2259 2260 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2261 BasicBlock *ReturnBB = Returns[0]->getParent(); 2262 ReturnBB->replaceAllUsesWith(AfterCallBB); 2263 2264 // Splice the code from the return block into the block that it will return 2265 // to, which contains the code that was after the call. 2266 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 2267 ReturnBB->getInstList()); 2268 2269 if (CreatedBranchToNormalDest) 2270 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2271 2272 // Delete the return instruction now and empty ReturnBB now. 2273 Returns[0]->eraseFromParent(); 2274 ReturnBB->eraseFromParent(); 2275 } else if (!TheCall->use_empty()) { 2276 // No returns, but something is using the return value of the call. Just 2277 // nuke the result. 2278 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2279 } 2280 2281 // Since we are now done with the Call/Invoke, we can delete it. 2282 TheCall->eraseFromParent(); 2283 2284 // If we inlined any musttail calls and the original return is now 2285 // unreachable, delete it. It can only contain a bitcast and ret. 2286 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 2287 AfterCallBB->eraseFromParent(); 2288 2289 // We should always be able to fold the entry block of the function into the 2290 // single predecessor of the block... 2291 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2292 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2293 2294 // Splice the code entry block into calling block, right before the 2295 // unconditional branch. 2296 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2297 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 2298 2299 // Remove the unconditional branch. 2300 OrigBB->getInstList().erase(Br); 2301 2302 // Now we can remove the CalleeEntry block, which is now empty. 2303 Caller->getBasicBlockList().erase(CalleeEntry); 2304 2305 // If we inserted a phi node, check to see if it has a single value (e.g. all 2306 // the entries are the same or undef). If so, remove the PHI so it doesn't 2307 // block other optimizations. 2308 if (PHI) { 2309 AssumptionCache *AC = 2310 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 2311 auto &DL = Caller->getParent()->getDataLayout(); 2312 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) { 2313 PHI->replaceAllUsesWith(V); 2314 PHI->eraseFromParent(); 2315 } 2316 } 2317 2318 return true; 2319 } 2320