1 //===- InlineFunction.cpp - Code to perform function inlining -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inlining of a function into a call site, resolving 11 // parameters and the return value as appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/StringExtras.h" 23 #include "llvm/ADT/iterator_range.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/BlockFrequencyInfo.h" 27 #include "llvm/Analysis/CallGraph.h" 28 #include "llvm/Analysis/CaptureTracking.h" 29 #include "llvm/Analysis/EHPersonalities.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/ProfileSummaryInfo.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Argument.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/CFG.h" 37 #include "llvm/IR/CallSite.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/Constants.h" 40 #include "llvm/IR/DIBuilder.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DebugInfoMetadata.h" 43 #include "llvm/IR/DebugLoc.h" 44 #include "llvm/IR/DerivedTypes.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/InstrTypes.h" 49 #include "llvm/IR/Instruction.h" 50 #include "llvm/IR/Instructions.h" 51 #include "llvm/IR/IntrinsicInst.h" 52 #include "llvm/IR/Intrinsics.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/IR/MDBuilder.h" 55 #include "llvm/IR/Metadata.h" 56 #include "llvm/IR/Module.h" 57 #include "llvm/IR/Type.h" 58 #include "llvm/IR/User.h" 59 #include "llvm/IR/Value.h" 60 #include "llvm/Support/Casting.h" 61 #include "llvm/Support/CommandLine.h" 62 #include "llvm/Support/ErrorHandling.h" 63 #include "llvm/Transforms/Utils/Cloning.h" 64 #include "llvm/Transforms/Utils/ValueMapper.h" 65 #include <algorithm> 66 #include <cassert> 67 #include <cstdint> 68 #include <iterator> 69 #include <limits> 70 #include <string> 71 #include <utility> 72 #include <vector> 73 74 using namespace llvm; 75 using ProfileCount = Function::ProfileCount; 76 77 static cl::opt<bool> 78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), 79 cl::Hidden, 80 cl::desc("Convert noalias attributes to metadata during inlining.")); 81 82 static cl::opt<bool> 83 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", 84 cl::init(true), cl::Hidden, 85 cl::desc("Convert align attributes to assumptions during inlining.")); 86 87 llvm::InlineResult llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI, 88 AAResults *CalleeAAR, 89 bool InsertLifetime) { 90 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime); 91 } 92 93 llvm::InlineResult llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI, 94 AAResults *CalleeAAR, 95 bool InsertLifetime) { 96 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime); 97 } 98 99 namespace { 100 101 /// A class for recording information about inlining a landing pad. 102 class LandingPadInliningInfo { 103 /// Destination of the invoke's unwind. 104 BasicBlock *OuterResumeDest; 105 106 /// Destination for the callee's resume. 107 BasicBlock *InnerResumeDest = nullptr; 108 109 /// LandingPadInst associated with the invoke. 110 LandingPadInst *CallerLPad = nullptr; 111 112 /// PHI for EH values from landingpad insts. 113 PHINode *InnerEHValuesPHI = nullptr; 114 115 SmallVector<Value*, 8> UnwindDestPHIValues; 116 117 public: 118 LandingPadInliningInfo(InvokeInst *II) 119 : OuterResumeDest(II->getUnwindDest()) { 120 // If there are PHI nodes in the unwind destination block, we need to keep 121 // track of which values came into them from the invoke before removing 122 // the edge from this block. 123 BasicBlock *InvokeBB = II->getParent(); 124 BasicBlock::iterator I = OuterResumeDest->begin(); 125 for (; isa<PHINode>(I); ++I) { 126 // Save the value to use for this edge. 127 PHINode *PHI = cast<PHINode>(I); 128 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 129 } 130 131 CallerLPad = cast<LandingPadInst>(I); 132 } 133 134 /// The outer unwind destination is the target of 135 /// unwind edges introduced for calls within the inlined function. 136 BasicBlock *getOuterResumeDest() const { 137 return OuterResumeDest; 138 } 139 140 BasicBlock *getInnerResumeDest(); 141 142 LandingPadInst *getLandingPadInst() const { return CallerLPad; } 143 144 /// Forward the 'resume' instruction to the caller's landing pad block. 145 /// When the landing pad block has only one predecessor, this is 146 /// a simple branch. When there is more than one predecessor, we need to 147 /// split the landing pad block after the landingpad instruction and jump 148 /// to there. 149 void forwardResume(ResumeInst *RI, 150 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads); 151 152 /// Add incoming-PHI values to the unwind destination block for the given 153 /// basic block, using the values for the original invoke's source block. 154 void addIncomingPHIValuesFor(BasicBlock *BB) const { 155 addIncomingPHIValuesForInto(BB, OuterResumeDest); 156 } 157 158 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const { 159 BasicBlock::iterator I = dest->begin(); 160 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 161 PHINode *phi = cast<PHINode>(I); 162 phi->addIncoming(UnwindDestPHIValues[i], src); 163 } 164 } 165 }; 166 167 } // end anonymous namespace 168 169 /// Get or create a target for the branch from ResumeInsts. 170 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() { 171 if (InnerResumeDest) return InnerResumeDest; 172 173 // Split the landing pad. 174 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator(); 175 InnerResumeDest = 176 OuterResumeDest->splitBasicBlock(SplitPoint, 177 OuterResumeDest->getName() + ".body"); 178 179 // The number of incoming edges we expect to the inner landing pad. 180 const unsigned PHICapacity = 2; 181 182 // Create corresponding new PHIs for all the PHIs in the outer landing pad. 183 Instruction *InsertPoint = &InnerResumeDest->front(); 184 BasicBlock::iterator I = OuterResumeDest->begin(); 185 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) { 186 PHINode *OuterPHI = cast<PHINode>(I); 187 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity, 188 OuterPHI->getName() + ".lpad-body", 189 InsertPoint); 190 OuterPHI->replaceAllUsesWith(InnerPHI); 191 InnerPHI->addIncoming(OuterPHI, OuterResumeDest); 192 } 193 194 // Create a PHI for the exception values. 195 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity, 196 "eh.lpad-body", InsertPoint); 197 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI); 198 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest); 199 200 // All done. 201 return InnerResumeDest; 202 } 203 204 /// Forward the 'resume' instruction to the caller's landing pad block. 205 /// When the landing pad block has only one predecessor, this is a simple 206 /// branch. When there is more than one predecessor, we need to split the 207 /// landing pad block after the landingpad instruction and jump to there. 208 void LandingPadInliningInfo::forwardResume( 209 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) { 210 BasicBlock *Dest = getInnerResumeDest(); 211 BasicBlock *Src = RI->getParent(); 212 213 BranchInst::Create(Dest, Src); 214 215 // Update the PHIs in the destination. They were inserted in an order which 216 // makes this work. 217 addIncomingPHIValuesForInto(Src, Dest); 218 219 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src); 220 RI->eraseFromParent(); 221 } 222 223 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper. 224 static Value *getParentPad(Value *EHPad) { 225 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad)) 226 return FPI->getParentPad(); 227 return cast<CatchSwitchInst>(EHPad)->getParentPad(); 228 } 229 230 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>; 231 232 /// Helper for getUnwindDestToken that does the descendant-ward part of 233 /// the search. 234 static Value *getUnwindDestTokenHelper(Instruction *EHPad, 235 UnwindDestMemoTy &MemoMap) { 236 SmallVector<Instruction *, 8> Worklist(1, EHPad); 237 238 while (!Worklist.empty()) { 239 Instruction *CurrentPad = Worklist.pop_back_val(); 240 // We only put pads on the worklist that aren't in the MemoMap. When 241 // we find an unwind dest for a pad we may update its ancestors, but 242 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad, 243 // so they should never get updated while queued on the worklist. 244 assert(!MemoMap.count(CurrentPad)); 245 Value *UnwindDestToken = nullptr; 246 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) { 247 if (CatchSwitch->hasUnwindDest()) { 248 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI(); 249 } else { 250 // Catchswitch doesn't have a 'nounwind' variant, and one might be 251 // annotated as "unwinds to caller" when really it's nounwind (see 252 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the 253 // parent's unwind dest from this. We can check its catchpads' 254 // descendants, since they might include a cleanuppad with an 255 // "unwinds to caller" cleanupret, which can be trusted. 256 for (auto HI = CatchSwitch->handler_begin(), 257 HE = CatchSwitch->handler_end(); 258 HI != HE && !UnwindDestToken; ++HI) { 259 BasicBlock *HandlerBlock = *HI; 260 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI()); 261 for (User *Child : CatchPad->users()) { 262 // Intentionally ignore invokes here -- since the catchswitch is 263 // marked "unwind to caller", it would be a verifier error if it 264 // contained an invoke which unwinds out of it, so any invoke we'd 265 // encounter must unwind to some child of the catch. 266 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child)) 267 continue; 268 269 Instruction *ChildPad = cast<Instruction>(Child); 270 auto Memo = MemoMap.find(ChildPad); 271 if (Memo == MemoMap.end()) { 272 // Haven't figured out this child pad yet; queue it. 273 Worklist.push_back(ChildPad); 274 continue; 275 } 276 // We've already checked this child, but might have found that 277 // it offers no proof either way. 278 Value *ChildUnwindDestToken = Memo->second; 279 if (!ChildUnwindDestToken) 280 continue; 281 // We already know the child's unwind dest, which can either 282 // be ConstantTokenNone to indicate unwind to caller, or can 283 // be another child of the catchpad. Only the former indicates 284 // the unwind dest of the catchswitch. 285 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) { 286 UnwindDestToken = ChildUnwindDestToken; 287 break; 288 } 289 assert(getParentPad(ChildUnwindDestToken) == CatchPad); 290 } 291 } 292 } 293 } else { 294 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad); 295 for (User *U : CleanupPad->users()) { 296 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) { 297 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest()) 298 UnwindDestToken = RetUnwindDest->getFirstNonPHI(); 299 else 300 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext()); 301 break; 302 } 303 Value *ChildUnwindDestToken; 304 if (auto *Invoke = dyn_cast<InvokeInst>(U)) { 305 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI(); 306 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) { 307 Instruction *ChildPad = cast<Instruction>(U); 308 auto Memo = MemoMap.find(ChildPad); 309 if (Memo == MemoMap.end()) { 310 // Haven't resolved this child yet; queue it and keep searching. 311 Worklist.push_back(ChildPad); 312 continue; 313 } 314 // We've checked this child, but still need to ignore it if it 315 // had no proof either way. 316 ChildUnwindDestToken = Memo->second; 317 if (!ChildUnwindDestToken) 318 continue; 319 } else { 320 // Not a relevant user of the cleanuppad 321 continue; 322 } 323 // In a well-formed program, the child/invoke must either unwind to 324 // an(other) child of the cleanup, or exit the cleanup. In the 325 // first case, continue searching. 326 if (isa<Instruction>(ChildUnwindDestToken) && 327 getParentPad(ChildUnwindDestToken) == CleanupPad) 328 continue; 329 UnwindDestToken = ChildUnwindDestToken; 330 break; 331 } 332 } 333 // If we haven't found an unwind dest for CurrentPad, we may have queued its 334 // children, so move on to the next in the worklist. 335 if (!UnwindDestToken) 336 continue; 337 338 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits 339 // any ancestors of CurrentPad up to but not including UnwindDestToken's 340 // parent pad. Record this in the memo map, and check to see if the 341 // original EHPad being queried is one of the ones exited. 342 Value *UnwindParent; 343 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken)) 344 UnwindParent = getParentPad(UnwindPad); 345 else 346 UnwindParent = nullptr; 347 bool ExitedOriginalPad = false; 348 for (Instruction *ExitedPad = CurrentPad; 349 ExitedPad && ExitedPad != UnwindParent; 350 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) { 351 // Skip over catchpads since they just follow their catchswitches. 352 if (isa<CatchPadInst>(ExitedPad)) 353 continue; 354 MemoMap[ExitedPad] = UnwindDestToken; 355 ExitedOriginalPad |= (ExitedPad == EHPad); 356 } 357 358 if (ExitedOriginalPad) 359 return UnwindDestToken; 360 361 // Continue the search. 362 } 363 364 // No definitive information is contained within this funclet. 365 return nullptr; 366 } 367 368 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad, 369 /// return that pad instruction. If it unwinds to caller, return 370 /// ConstantTokenNone. If it does not have a definitive unwind destination, 371 /// return nullptr. 372 /// 373 /// This routine gets invoked for calls in funclets in inlinees when inlining 374 /// an invoke. Since many funclets don't have calls inside them, it's queried 375 /// on-demand rather than building a map of pads to unwind dests up front. 376 /// Determining a funclet's unwind dest may require recursively searching its 377 /// descendants, and also ancestors and cousins if the descendants don't provide 378 /// an answer. Since most funclets will have their unwind dest immediately 379 /// available as the unwind dest of a catchswitch or cleanupret, this routine 380 /// searches top-down from the given pad and then up. To avoid worst-case 381 /// quadratic run-time given that approach, it uses a memo map to avoid 382 /// re-processing funclet trees. The callers that rewrite the IR as they go 383 /// take advantage of this, for correctness, by checking/forcing rewritten 384 /// pads' entries to match the original callee view. 385 static Value *getUnwindDestToken(Instruction *EHPad, 386 UnwindDestMemoTy &MemoMap) { 387 // Catchpads unwind to the same place as their catchswitch; 388 // redirct any queries on catchpads so the code below can 389 // deal with just catchswitches and cleanuppads. 390 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad)) 391 EHPad = CPI->getCatchSwitch(); 392 393 // Check if we've already determined the unwind dest for this pad. 394 auto Memo = MemoMap.find(EHPad); 395 if (Memo != MemoMap.end()) 396 return Memo->second; 397 398 // Search EHPad and, if necessary, its descendants. 399 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap); 400 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0)); 401 if (UnwindDestToken) 402 return UnwindDestToken; 403 404 // No information is available for this EHPad from itself or any of its 405 // descendants. An unwind all the way out to a pad in the caller would 406 // need also to agree with the unwind dest of the parent funclet, so 407 // search up the chain to try to find a funclet with information. Put 408 // null entries in the memo map to avoid re-processing as we go up. 409 MemoMap[EHPad] = nullptr; 410 #ifndef NDEBUG 411 SmallPtrSet<Instruction *, 4> TempMemos; 412 TempMemos.insert(EHPad); 413 #endif 414 Instruction *LastUselessPad = EHPad; 415 Value *AncestorToken; 416 for (AncestorToken = getParentPad(EHPad); 417 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken); 418 AncestorToken = getParentPad(AncestorToken)) { 419 // Skip over catchpads since they just follow their catchswitches. 420 if (isa<CatchPadInst>(AncestorPad)) 421 continue; 422 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we 423 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this 424 // call to getUnwindDestToken, that would mean that AncestorPad had no 425 // information in itself, its descendants, or its ancestors. If that 426 // were the case, then we should also have recorded the lack of information 427 // for the descendant that we're coming from. So assert that we don't 428 // find a null entry in the MemoMap for AncestorPad. 429 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]); 430 auto AncestorMemo = MemoMap.find(AncestorPad); 431 if (AncestorMemo == MemoMap.end()) { 432 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap); 433 } else { 434 UnwindDestToken = AncestorMemo->second; 435 } 436 if (UnwindDestToken) 437 break; 438 LastUselessPad = AncestorPad; 439 MemoMap[LastUselessPad] = nullptr; 440 #ifndef NDEBUG 441 TempMemos.insert(LastUselessPad); 442 #endif 443 } 444 445 // We know that getUnwindDestTokenHelper was called on LastUselessPad and 446 // returned nullptr (and likewise for EHPad and any of its ancestors up to 447 // LastUselessPad), so LastUselessPad has no information from below. Since 448 // getUnwindDestTokenHelper must investigate all downward paths through 449 // no-information nodes to prove that a node has no information like this, 450 // and since any time it finds information it records it in the MemoMap for 451 // not just the immediately-containing funclet but also any ancestors also 452 // exited, it must be the case that, walking downward from LastUselessPad, 453 // visiting just those nodes which have not been mapped to an unwind dest 454 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since 455 // they are just used to keep getUnwindDestTokenHelper from repeating work), 456 // any node visited must have been exhaustively searched with no information 457 // for it found. 458 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad); 459 while (!Worklist.empty()) { 460 Instruction *UselessPad = Worklist.pop_back_val(); 461 auto Memo = MemoMap.find(UselessPad); 462 if (Memo != MemoMap.end() && Memo->second) { 463 // Here the name 'UselessPad' is a bit of a misnomer, because we've found 464 // that it is a funclet that does have information about unwinding to 465 // a particular destination; its parent was a useless pad. 466 // Since its parent has no information, the unwind edge must not escape 467 // the parent, and must target a sibling of this pad. This local unwind 468 // gives us no information about EHPad. Leave it and the subtree rooted 469 // at it alone. 470 assert(getParentPad(Memo->second) == getParentPad(UselessPad)); 471 continue; 472 } 473 // We know we don't have information for UselesPad. If it has an entry in 474 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos 475 // added on this invocation of getUnwindDestToken; if a previous invocation 476 // recorded nullptr, it would have had to prove that the ancestors of 477 // UselessPad, which include LastUselessPad, had no information, and that 478 // in turn would have required proving that the descendants of 479 // LastUselesPad, which include EHPad, have no information about 480 // LastUselessPad, which would imply that EHPad was mapped to nullptr in 481 // the MemoMap on that invocation, which isn't the case if we got here. 482 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad)); 483 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind 484 // information that we'd be contradicting by making a map entry for it 485 // (which is something that getUnwindDestTokenHelper must have proved for 486 // us to get here). Just assert on is direct users here; the checks in 487 // this downward walk at its descendants will verify that they don't have 488 // any unwind edges that exit 'UselessPad' either (i.e. they either have no 489 // unwind edges or unwind to a sibling). 490 MemoMap[UselessPad] = UnwindDestToken; 491 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) { 492 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad"); 493 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) { 494 auto *CatchPad = HandlerBlock->getFirstNonPHI(); 495 for (User *U : CatchPad->users()) { 496 assert( 497 (!isa<InvokeInst>(U) || 498 (getParentPad( 499 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 500 CatchPad)) && 501 "Expected useless pad"); 502 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 503 Worklist.push_back(cast<Instruction>(U)); 504 } 505 } 506 } else { 507 assert(isa<CleanupPadInst>(UselessPad)); 508 for (User *U : UselessPad->users()) { 509 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad"); 510 assert((!isa<InvokeInst>(U) || 511 (getParentPad( 512 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) == 513 UselessPad)) && 514 "Expected useless pad"); 515 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U)) 516 Worklist.push_back(cast<Instruction>(U)); 517 } 518 } 519 } 520 521 return UnwindDestToken; 522 } 523 524 /// When we inline a basic block into an invoke, 525 /// we have to turn all of the calls that can throw into invokes. 526 /// This function analyze BB to see if there are any calls, and if so, 527 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI 528 /// nodes in that block with the values specified in InvokeDestPHIValues. 529 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( 530 BasicBlock *BB, BasicBlock *UnwindEdge, 531 UnwindDestMemoTy *FuncletUnwindMap = nullptr) { 532 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 533 Instruction *I = &*BBI++; 534 535 // We only need to check for function calls: inlined invoke 536 // instructions require no special handling. 537 CallInst *CI = dyn_cast<CallInst>(I); 538 539 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue())) 540 continue; 541 542 // We do not need to (and in fact, cannot) convert possibly throwing calls 543 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into 544 // invokes. The caller's "segment" of the deoptimization continuation 545 // attached to the newly inlined @llvm.experimental_deoptimize 546 // (resp. @llvm.experimental.guard) call should contain the exception 547 // handling logic, if any. 548 if (auto *F = CI->getCalledFunction()) 549 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize || 550 F->getIntrinsicID() == Intrinsic::experimental_guard) 551 continue; 552 553 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { 554 // This call is nested inside a funclet. If that funclet has an unwind 555 // destination within the inlinee, then unwinding out of this call would 556 // be UB. Rewriting this call to an invoke which targets the inlined 557 // invoke's unwind dest would give the call's parent funclet multiple 558 // unwind destinations, which is something that subsequent EH table 559 // generation can't handle and that the veirifer rejects. So when we 560 // see such a call, leave it as a call. 561 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]); 562 Value *UnwindDestToken = 563 getUnwindDestToken(FuncletPad, *FuncletUnwindMap); 564 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 565 continue; 566 #ifndef NDEBUG 567 Instruction *MemoKey; 568 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad)) 569 MemoKey = CatchPad->getCatchSwitch(); 570 else 571 MemoKey = FuncletPad; 572 assert(FuncletUnwindMap->count(MemoKey) && 573 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken && 574 "must get memoized to avoid confusing later searches"); 575 #endif // NDEBUG 576 } 577 578 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge); 579 return BB; 580 } 581 return nullptr; 582 } 583 584 /// If we inlined an invoke site, we need to convert calls 585 /// in the body of the inlined function into invokes. 586 /// 587 /// II is the invoke instruction being inlined. FirstNewBlock is the first 588 /// block of the inlined code (the last block is the end of the function), 589 /// and InlineCodeInfo is information about the code that got inlined. 590 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, 591 ClonedCodeInfo &InlinedCodeInfo) { 592 BasicBlock *InvokeDest = II->getUnwindDest(); 593 594 Function *Caller = FirstNewBlock->getParent(); 595 596 // The inlined code is currently at the end of the function, scan from the 597 // start of the inlined code to its end, checking for stuff we need to 598 // rewrite. 599 LandingPadInliningInfo Invoke(II); 600 601 // Get all of the inlined landing pad instructions. 602 SmallPtrSet<LandingPadInst*, 16> InlinedLPads; 603 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end(); 604 I != E; ++I) 605 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) 606 InlinedLPads.insert(II->getLandingPadInst()); 607 608 // Append the clauses from the outer landing pad instruction into the inlined 609 // landing pad instructions. 610 LandingPadInst *OuterLPad = Invoke.getLandingPadInst(); 611 for (LandingPadInst *InlinedLPad : InlinedLPads) { 612 unsigned OuterNum = OuterLPad->getNumClauses(); 613 InlinedLPad->reserveClauses(OuterNum); 614 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx) 615 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx)); 616 if (OuterLPad->isCleanup()) 617 InlinedLPad->setCleanup(true); 618 } 619 620 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 621 BB != E; ++BB) { 622 if (InlinedCodeInfo.ContainsCalls) 623 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 624 &*BB, Invoke.getOuterResumeDest())) 625 // Update any PHI nodes in the exceptional block to indicate that there 626 // is now a new entry in them. 627 Invoke.addIncomingPHIValuesFor(NewBB); 628 629 // Forward any resumes that are remaining here. 630 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) 631 Invoke.forwardResume(RI, InlinedLPads); 632 } 633 634 // Now that everything is happy, we have one final detail. The PHI nodes in 635 // the exception destination block still have entries due to the original 636 // invoke instruction. Eliminate these entries (which might even delete the 637 // PHI node) now. 638 InvokeDest->removePredecessor(II->getParent()); 639 } 640 641 /// If we inlined an invoke site, we need to convert calls 642 /// in the body of the inlined function into invokes. 643 /// 644 /// II is the invoke instruction being inlined. FirstNewBlock is the first 645 /// block of the inlined code (the last block is the end of the function), 646 /// and InlineCodeInfo is information about the code that got inlined. 647 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, 648 ClonedCodeInfo &InlinedCodeInfo) { 649 BasicBlock *UnwindDest = II->getUnwindDest(); 650 Function *Caller = FirstNewBlock->getParent(); 651 652 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!"); 653 654 // If there are PHI nodes in the unwind destination block, we need to keep 655 // track of which values came into them from the invoke before removing the 656 // edge from this block. 657 SmallVector<Value *, 8> UnwindDestPHIValues; 658 BasicBlock *InvokeBB = II->getParent(); 659 for (Instruction &I : *UnwindDest) { 660 // Save the value to use for this edge. 661 PHINode *PHI = dyn_cast<PHINode>(&I); 662 if (!PHI) 663 break; 664 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB)); 665 } 666 667 // Add incoming-PHI values to the unwind destination block for the given basic 668 // block, using the values for the original invoke's source block. 669 auto UpdatePHINodes = [&](BasicBlock *Src) { 670 BasicBlock::iterator I = UnwindDest->begin(); 671 for (Value *V : UnwindDestPHIValues) { 672 PHINode *PHI = cast<PHINode>(I); 673 PHI->addIncoming(V, Src); 674 ++I; 675 } 676 }; 677 678 // This connects all the instructions which 'unwind to caller' to the invoke 679 // destination. 680 UnwindDestMemoTy FuncletUnwindMap; 681 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end(); 682 BB != E; ++BB) { 683 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) { 684 if (CRI->unwindsToCaller()) { 685 auto *CleanupPad = CRI->getCleanupPad(); 686 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI); 687 CRI->eraseFromParent(); 688 UpdatePHINodes(&*BB); 689 // Finding a cleanupret with an unwind destination would confuse 690 // subsequent calls to getUnwindDestToken, so map the cleanuppad 691 // to short-circuit any such calls and recognize this as an "unwind 692 // to caller" cleanup. 693 assert(!FuncletUnwindMap.count(CleanupPad) || 694 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad])); 695 FuncletUnwindMap[CleanupPad] = 696 ConstantTokenNone::get(Caller->getContext()); 697 } 698 } 699 700 Instruction *I = BB->getFirstNonPHI(); 701 if (!I->isEHPad()) 702 continue; 703 704 Instruction *Replacement = nullptr; 705 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 706 if (CatchSwitch->unwindsToCaller()) { 707 Value *UnwindDestToken; 708 if (auto *ParentPad = 709 dyn_cast<Instruction>(CatchSwitch->getParentPad())) { 710 // This catchswitch is nested inside another funclet. If that 711 // funclet has an unwind destination within the inlinee, then 712 // unwinding out of this catchswitch would be UB. Rewriting this 713 // catchswitch to unwind to the inlined invoke's unwind dest would 714 // give the parent funclet multiple unwind destinations, which is 715 // something that subsequent EH table generation can't handle and 716 // that the veirifer rejects. So when we see such a call, leave it 717 // as "unwind to caller". 718 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap); 719 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken)) 720 continue; 721 } else { 722 // This catchswitch has no parent to inherit constraints from, and 723 // none of its descendants can have an unwind edge that exits it and 724 // targets another funclet in the inlinee. It may or may not have a 725 // descendant that definitively has an unwind to caller. In either 726 // case, we'll have to assume that any unwinds out of it may need to 727 // be routed to the caller, so treat it as though it has a definitive 728 // unwind to caller. 729 UnwindDestToken = ConstantTokenNone::get(Caller->getContext()); 730 } 731 auto *NewCatchSwitch = CatchSwitchInst::Create( 732 CatchSwitch->getParentPad(), UnwindDest, 733 CatchSwitch->getNumHandlers(), CatchSwitch->getName(), 734 CatchSwitch); 735 for (BasicBlock *PadBB : CatchSwitch->handlers()) 736 NewCatchSwitch->addHandler(PadBB); 737 // Propagate info for the old catchswitch over to the new one in 738 // the unwind map. This also serves to short-circuit any subsequent 739 // checks for the unwind dest of this catchswitch, which would get 740 // confused if they found the outer handler in the callee. 741 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken; 742 Replacement = NewCatchSwitch; 743 } 744 } else if (!isa<FuncletPadInst>(I)) { 745 llvm_unreachable("unexpected EHPad!"); 746 } 747 748 if (Replacement) { 749 Replacement->takeName(I); 750 I->replaceAllUsesWith(Replacement); 751 I->eraseFromParent(); 752 UpdatePHINodes(&*BB); 753 } 754 } 755 756 if (InlinedCodeInfo.ContainsCalls) 757 for (Function::iterator BB = FirstNewBlock->getIterator(), 758 E = Caller->end(); 759 BB != E; ++BB) 760 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke( 761 &*BB, UnwindDest, &FuncletUnwindMap)) 762 // Update any PHI nodes in the exceptional block to indicate that there 763 // is now a new entry in them. 764 UpdatePHINodes(NewBB); 765 766 // Now that everything is happy, we have one final detail. The PHI nodes in 767 // the exception destination block still have entries due to the original 768 // invoke instruction. Eliminate these entries (which might even delete the 769 // PHI node) now. 770 UnwindDest->removePredecessor(InvokeBB); 771 } 772 773 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata, 774 /// that metadata should be propagated to all memory-accessing cloned 775 /// instructions. 776 static void PropagateParallelLoopAccessMetadata(CallSite CS, 777 ValueToValueMapTy &VMap) { 778 MDNode *M = 779 CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access); 780 if (!M) 781 return; 782 783 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 784 VMI != VMIE; ++VMI) { 785 if (!VMI->second) 786 continue; 787 788 Instruction *NI = dyn_cast<Instruction>(VMI->second); 789 if (!NI) 790 continue; 791 792 if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) { 793 M = MDNode::concatenate(PM, M); 794 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 795 } else if (NI->mayReadOrWriteMemory()) { 796 NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M); 797 } 798 } 799 } 800 801 /// When inlining a function that contains noalias scope metadata, 802 /// this metadata needs to be cloned so that the inlined blocks 803 /// have different "unique scopes" at every call site. Were this not done, then 804 /// aliasing scopes from a function inlined into a caller multiple times could 805 /// not be differentiated (and this would lead to miscompiles because the 806 /// non-aliasing property communicated by the metadata could have 807 /// call-site-specific control dependencies). 808 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) { 809 const Function *CalledFunc = CS.getCalledFunction(); 810 SetVector<const MDNode *> MD; 811 812 // Note: We could only clone the metadata if it is already used in the 813 // caller. I'm omitting that check here because it might confuse 814 // inter-procedural alias analysis passes. We can revisit this if it becomes 815 // an efficiency or overhead problem. 816 817 for (const BasicBlock &I : *CalledFunc) 818 for (const Instruction &J : I) { 819 if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope)) 820 MD.insert(M); 821 if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias)) 822 MD.insert(M); 823 } 824 825 if (MD.empty()) 826 return; 827 828 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to 829 // the set. 830 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end()); 831 while (!Queue.empty()) { 832 const MDNode *M = cast<MDNode>(Queue.pop_back_val()); 833 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i) 834 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i))) 835 if (MD.insert(M1)) 836 Queue.push_back(M1); 837 } 838 839 // Now we have a complete set of all metadata in the chains used to specify 840 // the noalias scopes and the lists of those scopes. 841 SmallVector<TempMDTuple, 16> DummyNodes; 842 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap; 843 for (const MDNode *I : MD) { 844 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None)); 845 MDMap[I].reset(DummyNodes.back().get()); 846 } 847 848 // Create new metadata nodes to replace the dummy nodes, replacing old 849 // metadata references with either a dummy node or an already-created new 850 // node. 851 for (const MDNode *I : MD) { 852 SmallVector<Metadata *, 4> NewOps; 853 for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) { 854 const Metadata *V = I->getOperand(i); 855 if (const MDNode *M = dyn_cast<MDNode>(V)) 856 NewOps.push_back(MDMap[M]); 857 else 858 NewOps.push_back(const_cast<Metadata *>(V)); 859 } 860 861 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps); 862 MDTuple *TempM = cast<MDTuple>(MDMap[I]); 863 assert(TempM->isTemporary() && "Expected temporary node"); 864 865 TempM->replaceAllUsesWith(NewM); 866 } 867 868 // Now replace the metadata in the new inlined instructions with the 869 // repacements from the map. 870 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 871 VMI != VMIE; ++VMI) { 872 if (!VMI->second) 873 continue; 874 875 Instruction *NI = dyn_cast<Instruction>(VMI->second); 876 if (!NI) 877 continue; 878 879 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) { 880 MDNode *NewMD = MDMap[M]; 881 // If the call site also had alias scope metadata (a list of scopes to 882 // which instructions inside it might belong), propagate those scopes to 883 // the inlined instructions. 884 if (MDNode *CSM = 885 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 886 NewMD = MDNode::concatenate(NewMD, CSM); 887 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD); 888 } else if (NI->mayReadOrWriteMemory()) { 889 if (MDNode *M = 890 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope)) 891 NI->setMetadata(LLVMContext::MD_alias_scope, M); 892 } 893 894 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) { 895 MDNode *NewMD = MDMap[M]; 896 // If the call site also had noalias metadata (a list of scopes with 897 // which instructions inside it don't alias), propagate those scopes to 898 // the inlined instructions. 899 if (MDNode *CSM = 900 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 901 NewMD = MDNode::concatenate(NewMD, CSM); 902 NI->setMetadata(LLVMContext::MD_noalias, NewMD); 903 } else if (NI->mayReadOrWriteMemory()) { 904 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias)) 905 NI->setMetadata(LLVMContext::MD_noalias, M); 906 } 907 } 908 } 909 910 /// If the inlined function has noalias arguments, 911 /// then add new alias scopes for each noalias argument, tag the mapped noalias 912 /// parameters with noalias metadata specifying the new scope, and tag all 913 /// non-derived loads, stores and memory intrinsics with the new alias scopes. 914 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap, 915 const DataLayout &DL, AAResults *CalleeAAR) { 916 if (!EnableNoAliasConversion) 917 return; 918 919 const Function *CalledFunc = CS.getCalledFunction(); 920 SmallVector<const Argument *, 4> NoAliasArgs; 921 922 for (const Argument &Arg : CalledFunc->args()) 923 if (Arg.hasNoAliasAttr() && !Arg.use_empty()) 924 NoAliasArgs.push_back(&Arg); 925 926 if (NoAliasArgs.empty()) 927 return; 928 929 // To do a good job, if a noalias variable is captured, we need to know if 930 // the capture point dominates the particular use we're considering. 931 DominatorTree DT; 932 DT.recalculate(const_cast<Function&>(*CalledFunc)); 933 934 // noalias indicates that pointer values based on the argument do not alias 935 // pointer values which are not based on it. So we add a new "scope" for each 936 // noalias function argument. Accesses using pointers based on that argument 937 // become part of that alias scope, accesses using pointers not based on that 938 // argument are tagged as noalias with that scope. 939 940 DenseMap<const Argument *, MDNode *> NewScopes; 941 MDBuilder MDB(CalledFunc->getContext()); 942 943 // Create a new scope domain for this function. 944 MDNode *NewDomain = 945 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName()); 946 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) { 947 const Argument *A = NoAliasArgs[i]; 948 949 std::string Name = CalledFunc->getName(); 950 if (A->hasName()) { 951 Name += ": %"; 952 Name += A->getName(); 953 } else { 954 Name += ": argument "; 955 Name += utostr(i); 956 } 957 958 // Note: We always create a new anonymous root here. This is true regardless 959 // of the linkage of the callee because the aliasing "scope" is not just a 960 // property of the callee, but also all control dependencies in the caller. 961 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name); 962 NewScopes.insert(std::make_pair(A, NewScope)); 963 } 964 965 // Iterate over all new instructions in the map; for all memory-access 966 // instructions, add the alias scope metadata. 967 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end(); 968 VMI != VMIE; ++VMI) { 969 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) { 970 if (!VMI->second) 971 continue; 972 973 Instruction *NI = dyn_cast<Instruction>(VMI->second); 974 if (!NI) 975 continue; 976 977 bool IsArgMemOnlyCall = false, IsFuncCall = false; 978 SmallVector<const Value *, 2> PtrArgs; 979 980 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) 981 PtrArgs.push_back(LI->getPointerOperand()); 982 else if (const StoreInst *SI = dyn_cast<StoreInst>(I)) 983 PtrArgs.push_back(SI->getPointerOperand()); 984 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I)) 985 PtrArgs.push_back(VAAI->getPointerOperand()); 986 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I)) 987 PtrArgs.push_back(CXI->getPointerOperand()); 988 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I)) 989 PtrArgs.push_back(RMWI->getPointerOperand()); 990 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) { 991 // If we know that the call does not access memory, then we'll still 992 // know that about the inlined clone of this call site, and we don't 993 // need to add metadata. 994 if (ICS.doesNotAccessMemory()) 995 continue; 996 997 IsFuncCall = true; 998 if (CalleeAAR) { 999 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS); 1000 if (MRB == FMRB_OnlyAccessesArgumentPointees || 1001 MRB == FMRB_OnlyReadsArgumentPointees) 1002 IsArgMemOnlyCall = true; 1003 } 1004 1005 for (Value *Arg : ICS.args()) { 1006 // We need to check the underlying objects of all arguments, not just 1007 // the pointer arguments, because we might be passing pointers as 1008 // integers, etc. 1009 // However, if we know that the call only accesses pointer arguments, 1010 // then we only need to check the pointer arguments. 1011 if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy()) 1012 continue; 1013 1014 PtrArgs.push_back(Arg); 1015 } 1016 } 1017 1018 // If we found no pointers, then this instruction is not suitable for 1019 // pairing with an instruction to receive aliasing metadata. 1020 // However, if this is a call, this we might just alias with none of the 1021 // noalias arguments. 1022 if (PtrArgs.empty() && !IsFuncCall) 1023 continue; 1024 1025 // It is possible that there is only one underlying object, but you 1026 // need to go through several PHIs to see it, and thus could be 1027 // repeated in the Objects list. 1028 SmallPtrSet<const Value *, 4> ObjSet; 1029 SmallVector<Metadata *, 4> Scopes, NoAliases; 1030 1031 SmallSetVector<const Argument *, 4> NAPtrArgs; 1032 for (const Value *V : PtrArgs) { 1033 SmallVector<Value *, 4> Objects; 1034 GetUnderlyingObjects(const_cast<Value*>(V), 1035 Objects, DL, /* LI = */ nullptr); 1036 1037 for (Value *O : Objects) 1038 ObjSet.insert(O); 1039 } 1040 1041 // Figure out if we're derived from anything that is not a noalias 1042 // argument. 1043 bool CanDeriveViaCapture = false, UsesAliasingPtr = false; 1044 for (const Value *V : ObjSet) { 1045 // Is this value a constant that cannot be derived from any pointer 1046 // value (we need to exclude constant expressions, for example, that 1047 // are formed from arithmetic on global symbols). 1048 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) || 1049 isa<ConstantPointerNull>(V) || 1050 isa<ConstantDataVector>(V) || isa<UndefValue>(V); 1051 if (IsNonPtrConst) 1052 continue; 1053 1054 // If this is anything other than a noalias argument, then we cannot 1055 // completely describe the aliasing properties using alias.scope 1056 // metadata (and, thus, won't add any). 1057 if (const Argument *A = dyn_cast<Argument>(V)) { 1058 if (!A->hasNoAliasAttr()) 1059 UsesAliasingPtr = true; 1060 } else { 1061 UsesAliasingPtr = true; 1062 } 1063 1064 // If this is not some identified function-local object (which cannot 1065 // directly alias a noalias argument), or some other argument (which, 1066 // by definition, also cannot alias a noalias argument), then we could 1067 // alias a noalias argument that has been captured). 1068 if (!isa<Argument>(V) && 1069 !isIdentifiedFunctionLocal(const_cast<Value*>(V))) 1070 CanDeriveViaCapture = true; 1071 } 1072 1073 // A function call can always get captured noalias pointers (via other 1074 // parameters, globals, etc.). 1075 if (IsFuncCall && !IsArgMemOnlyCall) 1076 CanDeriveViaCapture = true; 1077 1078 // First, we want to figure out all of the sets with which we definitely 1079 // don't alias. Iterate over all noalias set, and add those for which: 1080 // 1. The noalias argument is not in the set of objects from which we 1081 // definitely derive. 1082 // 2. The noalias argument has not yet been captured. 1083 // An arbitrary function that might load pointers could see captured 1084 // noalias arguments via other noalias arguments or globals, and so we 1085 // must always check for prior capture. 1086 for (const Argument *A : NoAliasArgs) { 1087 if (!ObjSet.count(A) && (!CanDeriveViaCapture || 1088 // It might be tempting to skip the 1089 // PointerMayBeCapturedBefore check if 1090 // A->hasNoCaptureAttr() is true, but this is 1091 // incorrect because nocapture only guarantees 1092 // that no copies outlive the function, not 1093 // that the value cannot be locally captured. 1094 !PointerMayBeCapturedBefore(A, 1095 /* ReturnCaptures */ false, 1096 /* StoreCaptures */ false, I, &DT))) 1097 NoAliases.push_back(NewScopes[A]); 1098 } 1099 1100 if (!NoAliases.empty()) 1101 NI->setMetadata(LLVMContext::MD_noalias, 1102 MDNode::concatenate( 1103 NI->getMetadata(LLVMContext::MD_noalias), 1104 MDNode::get(CalledFunc->getContext(), NoAliases))); 1105 1106 // Next, we want to figure out all of the sets to which we might belong. 1107 // We might belong to a set if the noalias argument is in the set of 1108 // underlying objects. If there is some non-noalias argument in our list 1109 // of underlying objects, then we cannot add a scope because the fact 1110 // that some access does not alias with any set of our noalias arguments 1111 // cannot itself guarantee that it does not alias with this access 1112 // (because there is some pointer of unknown origin involved and the 1113 // other access might also depend on this pointer). We also cannot add 1114 // scopes to arbitrary functions unless we know they don't access any 1115 // non-parameter pointer-values. 1116 bool CanAddScopes = !UsesAliasingPtr; 1117 if (CanAddScopes && IsFuncCall) 1118 CanAddScopes = IsArgMemOnlyCall; 1119 1120 if (CanAddScopes) 1121 for (const Argument *A : NoAliasArgs) { 1122 if (ObjSet.count(A)) 1123 Scopes.push_back(NewScopes[A]); 1124 } 1125 1126 if (!Scopes.empty()) 1127 NI->setMetadata( 1128 LLVMContext::MD_alias_scope, 1129 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope), 1130 MDNode::get(CalledFunc->getContext(), Scopes))); 1131 } 1132 } 1133 } 1134 1135 /// If the inlined function has non-byval align arguments, then 1136 /// add @llvm.assume-based alignment assumptions to preserve this information. 1137 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) { 1138 if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache) 1139 return; 1140 1141 AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller()); 1142 auto &DL = CS.getCaller()->getParent()->getDataLayout(); 1143 1144 // To avoid inserting redundant assumptions, we should check for assumptions 1145 // already in the caller. To do this, we might need a DT of the caller. 1146 DominatorTree DT; 1147 bool DTCalculated = false; 1148 1149 Function *CalledFunc = CS.getCalledFunction(); 1150 for (Argument &Arg : CalledFunc->args()) { 1151 unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0; 1152 if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) { 1153 if (!DTCalculated) { 1154 DT.recalculate(*CS.getCaller()); 1155 DTCalculated = true; 1156 } 1157 1158 // If we can already prove the asserted alignment in the context of the 1159 // caller, then don't bother inserting the assumption. 1160 Value *ArgVal = CS.getArgument(Arg.getArgNo()); 1161 if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align) 1162 continue; 1163 1164 CallInst *NewAsmp = IRBuilder<>(CS.getInstruction()) 1165 .CreateAlignmentAssumption(DL, ArgVal, Align); 1166 AC->registerAssumption(NewAsmp); 1167 } 1168 } 1169 } 1170 1171 /// Once we have cloned code over from a callee into the caller, 1172 /// update the specified callgraph to reflect the changes we made. 1173 /// Note that it's possible that not all code was copied over, so only 1174 /// some edges of the callgraph may remain. 1175 static void UpdateCallGraphAfterInlining(CallSite CS, 1176 Function::iterator FirstNewBlock, 1177 ValueToValueMapTy &VMap, 1178 InlineFunctionInfo &IFI) { 1179 CallGraph &CG = *IFI.CG; 1180 const Function *Caller = CS.getCaller(); 1181 const Function *Callee = CS.getCalledFunction(); 1182 CallGraphNode *CalleeNode = CG[Callee]; 1183 CallGraphNode *CallerNode = CG[Caller]; 1184 1185 // Since we inlined some uninlined call sites in the callee into the caller, 1186 // add edges from the caller to all of the callees of the callee. 1187 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end(); 1188 1189 // Consider the case where CalleeNode == CallerNode. 1190 CallGraphNode::CalledFunctionsVector CallCache; 1191 if (CalleeNode == CallerNode) { 1192 CallCache.assign(I, E); 1193 I = CallCache.begin(); 1194 E = CallCache.end(); 1195 } 1196 1197 for (; I != E; ++I) { 1198 const Value *OrigCall = I->first; 1199 1200 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall); 1201 // Only copy the edge if the call was inlined! 1202 if (VMI == VMap.end() || VMI->second == nullptr) 1203 continue; 1204 1205 // If the call was inlined, but then constant folded, there is no edge to 1206 // add. Check for this case. 1207 Instruction *NewCall = dyn_cast<Instruction>(VMI->second); 1208 if (!NewCall) 1209 continue; 1210 1211 // We do not treat intrinsic calls like real function calls because we 1212 // expect them to become inline code; do not add an edge for an intrinsic. 1213 CallSite CS = CallSite(NewCall); 1214 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic()) 1215 continue; 1216 1217 // Remember that this call site got inlined for the client of 1218 // InlineFunction. 1219 IFI.InlinedCalls.push_back(NewCall); 1220 1221 // It's possible that inlining the callsite will cause it to go from an 1222 // indirect to a direct call by resolving a function pointer. If this 1223 // happens, set the callee of the new call site to a more precise 1224 // destination. This can also happen if the call graph node of the caller 1225 // was just unnecessarily imprecise. 1226 if (!I->second->getFunction()) 1227 if (Function *F = CallSite(NewCall).getCalledFunction()) { 1228 // Indirect call site resolved to direct call. 1229 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]); 1230 1231 continue; 1232 } 1233 1234 CallerNode->addCalledFunction(CallSite(NewCall), I->second); 1235 } 1236 1237 // Update the call graph by deleting the edge from Callee to Caller. We must 1238 // do this after the loop above in case Caller and Callee are the same. 1239 CallerNode->removeCallEdgeFor(CS); 1240 } 1241 1242 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, 1243 BasicBlock *InsertBlock, 1244 InlineFunctionInfo &IFI) { 1245 Type *AggTy = cast<PointerType>(Src->getType())->getElementType(); 1246 IRBuilder<> Builder(InsertBlock, InsertBlock->begin()); 1247 1248 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy)); 1249 1250 // Always generate a memcpy of alignment 1 here because we don't know 1251 // the alignment of the src pointer. Other optimizations can infer 1252 // better alignment. 1253 Builder.CreateMemCpy(Dst, /*DstAlign*/1, Src, /*SrcAlign*/1, Size); 1254 } 1255 1256 /// When inlining a call site that has a byval argument, 1257 /// we have to make the implicit memcpy explicit by adding it. 1258 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, 1259 const Function *CalledFunc, 1260 InlineFunctionInfo &IFI, 1261 unsigned ByValAlignment) { 1262 PointerType *ArgTy = cast<PointerType>(Arg->getType()); 1263 Type *AggTy = ArgTy->getElementType(); 1264 1265 Function *Caller = TheCall->getFunction(); 1266 const DataLayout &DL = Caller->getParent()->getDataLayout(); 1267 1268 // If the called function is readonly, then it could not mutate the caller's 1269 // copy of the byval'd memory. In this case, it is safe to elide the copy and 1270 // temporary. 1271 if (CalledFunc->onlyReadsMemory()) { 1272 // If the byval argument has a specified alignment that is greater than the 1273 // passed in pointer, then we either have to round up the input pointer or 1274 // give up on this transformation. 1275 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment. 1276 return Arg; 1277 1278 AssumptionCache *AC = 1279 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 1280 1281 // If the pointer is already known to be sufficiently aligned, or if we can 1282 // round it up to a larger alignment, then we don't need a temporary. 1283 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >= 1284 ByValAlignment) 1285 return Arg; 1286 1287 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad 1288 // for code quality, but rarely happens and is required for correctness. 1289 } 1290 1291 // Create the alloca. If we have DataLayout, use nice alignment. 1292 unsigned Align = DL.getPrefTypeAlignment(AggTy); 1293 1294 // If the byval had an alignment specified, we *must* use at least that 1295 // alignment, as it is required by the byval argument (and uses of the 1296 // pointer inside the callee). 1297 Align = std::max(Align, ByValAlignment); 1298 1299 Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(), 1300 nullptr, Align, Arg->getName(), 1301 &*Caller->begin()->begin()); 1302 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca)); 1303 1304 // Uses of the argument in the function should use our new alloca 1305 // instead. 1306 return NewAlloca; 1307 } 1308 1309 // Check whether this Value is used by a lifetime intrinsic. 1310 static bool isUsedByLifetimeMarker(Value *V) { 1311 for (User *U : V->users()) { 1312 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1313 switch (II->getIntrinsicID()) { 1314 default: break; 1315 case Intrinsic::lifetime_start: 1316 case Intrinsic::lifetime_end: 1317 return true; 1318 } 1319 } 1320 } 1321 return false; 1322 } 1323 1324 // Check whether the given alloca already has 1325 // lifetime.start or lifetime.end intrinsics. 1326 static bool hasLifetimeMarkers(AllocaInst *AI) { 1327 Type *Ty = AI->getType(); 1328 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(), 1329 Ty->getPointerAddressSpace()); 1330 if (Ty == Int8PtrTy) 1331 return isUsedByLifetimeMarker(AI); 1332 1333 // Do a scan to find all the casts to i8*. 1334 for (User *U : AI->users()) { 1335 if (U->getType() != Int8PtrTy) continue; 1336 if (U->stripPointerCasts() != AI) continue; 1337 if (isUsedByLifetimeMarker(U)) 1338 return true; 1339 } 1340 return false; 1341 } 1342 1343 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry 1344 /// block. Allocas used in inalloca calls and allocas of dynamic array size 1345 /// cannot be static. 1346 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) { 1347 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca(); 1348 } 1349 1350 /// Update inlined instructions' line numbers to 1351 /// to encode location where these instructions are inlined. 1352 static void fixupLineNumbers(Function *Fn, Function::iterator FI, 1353 Instruction *TheCall, bool CalleeHasDebugInfo) { 1354 const DebugLoc &TheCallDL = TheCall->getDebugLoc(); 1355 if (!TheCallDL) 1356 return; 1357 1358 auto &Ctx = Fn->getContext(); 1359 DILocation *InlinedAtNode = TheCallDL; 1360 1361 // Create a unique call site, not to be confused with any other call from the 1362 // same location. 1363 InlinedAtNode = DILocation::getDistinct( 1364 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(), 1365 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt()); 1366 1367 // Cache the inlined-at nodes as they're built so they are reused, without 1368 // this every instruction's inlined-at chain would become distinct from each 1369 // other. 1370 DenseMap<const MDNode *, MDNode *> IANodes; 1371 1372 for (; FI != Fn->end(); ++FI) { 1373 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); 1374 BI != BE; ++BI) { 1375 if (DebugLoc DL = BI->getDebugLoc()) { 1376 auto IA = DebugLoc::appendInlinedAt(DL, InlinedAtNode, BI->getContext(), 1377 IANodes); 1378 auto IDL = DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), IA); 1379 BI->setDebugLoc(IDL); 1380 continue; 1381 } 1382 1383 if (CalleeHasDebugInfo) 1384 continue; 1385 1386 // If the inlined instruction has no line number, make it look as if it 1387 // originates from the call location. This is important for 1388 // ((__always_inline__, __nodebug__)) functions which must use caller 1389 // location for all instructions in their function body. 1390 1391 // Don't update static allocas, as they may get moved later. 1392 if (auto *AI = dyn_cast<AllocaInst>(BI)) 1393 if (allocaWouldBeStaticInEntry(AI)) 1394 continue; 1395 1396 BI->setDebugLoc(TheCallDL); 1397 } 1398 } 1399 } 1400 1401 /// Update the block frequencies of the caller after a callee has been inlined. 1402 /// 1403 /// Each block cloned into the caller has its block frequency scaled by the 1404 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of 1405 /// callee's entry block gets the same frequency as the callsite block and the 1406 /// relative frequencies of all cloned blocks remain the same after cloning. 1407 static void updateCallerBFI(BasicBlock *CallSiteBlock, 1408 const ValueToValueMapTy &VMap, 1409 BlockFrequencyInfo *CallerBFI, 1410 BlockFrequencyInfo *CalleeBFI, 1411 const BasicBlock &CalleeEntryBlock) { 1412 SmallPtrSet<BasicBlock *, 16> ClonedBBs; 1413 for (auto const &Entry : VMap) { 1414 if (!isa<BasicBlock>(Entry.first) || !Entry.second) 1415 continue; 1416 auto *OrigBB = cast<BasicBlock>(Entry.first); 1417 auto *ClonedBB = cast<BasicBlock>(Entry.second); 1418 uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency(); 1419 if (!ClonedBBs.insert(ClonedBB).second) { 1420 // Multiple blocks in the callee might get mapped to one cloned block in 1421 // the caller since we prune the callee as we clone it. When that happens, 1422 // we want to use the maximum among the original blocks' frequencies. 1423 uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency(); 1424 if (NewFreq > Freq) 1425 Freq = NewFreq; 1426 } 1427 CallerBFI->setBlockFreq(ClonedBB, Freq); 1428 } 1429 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock)); 1430 CallerBFI->setBlockFreqAndScale( 1431 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(), 1432 ClonedBBs); 1433 } 1434 1435 /// Update the branch metadata for cloned call instructions. 1436 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, 1437 const ProfileCount &CalleeEntryCount, 1438 const Instruction *TheCall, 1439 ProfileSummaryInfo *PSI, 1440 BlockFrequencyInfo *CallerBFI) { 1441 if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() || 1442 CalleeEntryCount.getCount() < 1) 1443 return; 1444 auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None; 1445 uint64_t CallCount = 1446 std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0, 1447 CalleeEntryCount.getCount()); 1448 1449 for (auto const &Entry : VMap) 1450 if (isa<CallInst>(Entry.first)) 1451 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second)) 1452 CI->updateProfWeight(CallCount, CalleeEntryCount.getCount()); 1453 for (BasicBlock &BB : *Callee) 1454 // No need to update the callsite if it is pruned during inlining. 1455 if (VMap.count(&BB)) 1456 for (Instruction &I : BB) 1457 if (CallInst *CI = dyn_cast<CallInst>(&I)) 1458 CI->updateProfWeight(CalleeEntryCount.getCount() - CallCount, 1459 CalleeEntryCount.getCount()); 1460 } 1461 1462 /// Update the entry count of callee after inlining. 1463 /// 1464 /// The callsite's block count is subtracted from the callee's function entry 1465 /// count. 1466 static void updateCalleeCount(BlockFrequencyInfo *CallerBFI, BasicBlock *CallBB, 1467 Instruction *CallInst, Function *Callee, 1468 ProfileSummaryInfo *PSI) { 1469 // If the callee has a original count of N, and the estimated count of 1470 // callsite is M, the new callee count is set to N - M. M is estimated from 1471 // the caller's entry count, its entry block frequency and the block frequency 1472 // of the callsite. 1473 auto CalleeCount = Callee->getEntryCount(); 1474 if (!CalleeCount.hasValue() || !PSI) 1475 return; 1476 auto CallCount = PSI->getProfileCount(CallInst, CallerBFI); 1477 if (!CallCount.hasValue()) 1478 return; 1479 // Since CallSiteCount is an estimate, it could exceed the original callee 1480 // count and has to be set to 0. 1481 if (CallCount.getValue() > CalleeCount.getCount()) 1482 CalleeCount.setCount(0); 1483 else 1484 CalleeCount.setCount(CalleeCount.getCount() - CallCount.getValue()); 1485 Callee->setEntryCount(CalleeCount); 1486 } 1487 1488 /// This function inlines the called function into the basic block of the 1489 /// caller. This returns false if it is not possible to inline this call. 1490 /// The program is still in a well defined state if this occurs though. 1491 /// 1492 /// Note that this only does one level of inlining. For example, if the 1493 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now 1494 /// exists in the instruction stream. Similarly this will inline a recursive 1495 /// function by one level. 1496 llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI, 1497 AAResults *CalleeAAR, 1498 bool InsertLifetime, 1499 Function *ForwardVarArgsTo) { 1500 Instruction *TheCall = CS.getInstruction(); 1501 assert(TheCall->getParent() && TheCall->getFunction() 1502 && "Instruction not in function!"); 1503 1504 // If IFI has any state in it, zap it before we fill it in. 1505 IFI.reset(); 1506 1507 Function *CalledFunc = CS.getCalledFunction(); 1508 if (!CalledFunc || // Can't inline external function or indirect 1509 CalledFunc->isDeclaration()) // call! 1510 return "external or indirect"; 1511 1512 // The inliner does not know how to inline through calls with operand bundles 1513 // in general ... 1514 if (CS.hasOperandBundles()) { 1515 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) { 1516 uint32_t Tag = CS.getOperandBundleAt(i).getTagID(); 1517 // ... but it knows how to inline through "deopt" operand bundles ... 1518 if (Tag == LLVMContext::OB_deopt) 1519 continue; 1520 // ... and "funclet" operand bundles. 1521 if (Tag == LLVMContext::OB_funclet) 1522 continue; 1523 1524 return "unsupported operand bundle"; 1525 } 1526 } 1527 1528 // If the call to the callee cannot throw, set the 'nounwind' flag on any 1529 // calls that we inline. 1530 bool MarkNoUnwind = CS.doesNotThrow(); 1531 1532 BasicBlock *OrigBB = TheCall->getParent(); 1533 Function *Caller = OrigBB->getParent(); 1534 1535 // GC poses two hazards to inlining, which only occur when the callee has GC: 1536 // 1. If the caller has no GC, then the callee's GC must be propagated to the 1537 // caller. 1538 // 2. If the caller has a differing GC, it is invalid to inline. 1539 if (CalledFunc->hasGC()) { 1540 if (!Caller->hasGC()) 1541 Caller->setGC(CalledFunc->getGC()); 1542 else if (CalledFunc->getGC() != Caller->getGC()) 1543 return "incompatible GC"; 1544 } 1545 1546 // Get the personality function from the callee if it contains a landing pad. 1547 Constant *CalledPersonality = 1548 CalledFunc->hasPersonalityFn() 1549 ? CalledFunc->getPersonalityFn()->stripPointerCasts() 1550 : nullptr; 1551 1552 // Find the personality function used by the landing pads of the caller. If it 1553 // exists, then check to see that it matches the personality function used in 1554 // the callee. 1555 Constant *CallerPersonality = 1556 Caller->hasPersonalityFn() 1557 ? Caller->getPersonalityFn()->stripPointerCasts() 1558 : nullptr; 1559 if (CalledPersonality) { 1560 if (!CallerPersonality) 1561 Caller->setPersonalityFn(CalledPersonality); 1562 // If the personality functions match, then we can perform the 1563 // inlining. Otherwise, we can't inline. 1564 // TODO: This isn't 100% true. Some personality functions are proper 1565 // supersets of others and can be used in place of the other. 1566 else if (CalledPersonality != CallerPersonality) 1567 return "incompatible personality"; 1568 } 1569 1570 // We need to figure out which funclet the callsite was in so that we may 1571 // properly nest the callee. 1572 Instruction *CallSiteEHPad = nullptr; 1573 if (CallerPersonality) { 1574 EHPersonality Personality = classifyEHPersonality(CallerPersonality); 1575 if (isScopedEHPersonality(Personality)) { 1576 Optional<OperandBundleUse> ParentFunclet = 1577 CS.getOperandBundle(LLVMContext::OB_funclet); 1578 if (ParentFunclet) 1579 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front()); 1580 1581 // OK, the inlining site is legal. What about the target function? 1582 1583 if (CallSiteEHPad) { 1584 if (Personality == EHPersonality::MSVC_CXX) { 1585 // The MSVC personality cannot tolerate catches getting inlined into 1586 // cleanup funclets. 1587 if (isa<CleanupPadInst>(CallSiteEHPad)) { 1588 // Ok, the call site is within a cleanuppad. Let's check the callee 1589 // for catchpads. 1590 for (const BasicBlock &CalledBB : *CalledFunc) { 1591 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI())) 1592 return "catch in cleanup funclet"; 1593 } 1594 } 1595 } else if (isAsynchronousEHPersonality(Personality)) { 1596 // SEH is even less tolerant, there may not be any sort of exceptional 1597 // funclet in the callee. 1598 for (const BasicBlock &CalledBB : *CalledFunc) { 1599 if (CalledBB.isEHPad()) 1600 return "SEH in cleanup funclet"; 1601 } 1602 } 1603 } 1604 } 1605 } 1606 1607 // Determine if we are dealing with a call in an EHPad which does not unwind 1608 // to caller. 1609 bool EHPadForCallUnwindsLocally = false; 1610 if (CallSiteEHPad && CS.isCall()) { 1611 UnwindDestMemoTy FuncletUnwindMap; 1612 Value *CallSiteUnwindDestToken = 1613 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap); 1614 1615 EHPadForCallUnwindsLocally = 1616 CallSiteUnwindDestToken && 1617 !isa<ConstantTokenNone>(CallSiteUnwindDestToken); 1618 } 1619 1620 // Get an iterator to the last basic block in the function, which will have 1621 // the new function inlined after it. 1622 Function::iterator LastBlock = --Caller->end(); 1623 1624 // Make sure to capture all of the return instructions from the cloned 1625 // function. 1626 SmallVector<ReturnInst*, 8> Returns; 1627 ClonedCodeInfo InlinedFunctionInfo; 1628 Function::iterator FirstNewBlock; 1629 1630 { // Scope to destroy VMap after cloning. 1631 ValueToValueMapTy VMap; 1632 // Keep a list of pair (dst, src) to emit byval initializations. 1633 SmallVector<std::pair<Value*, Value*>, 4> ByValInit; 1634 1635 auto &DL = Caller->getParent()->getDataLayout(); 1636 1637 // Calculate the vector of arguments to pass into the function cloner, which 1638 // matches up the formal to the actual argument values. 1639 CallSite::arg_iterator AI = CS.arg_begin(); 1640 unsigned ArgNo = 0; 1641 for (Function::arg_iterator I = CalledFunc->arg_begin(), 1642 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) { 1643 Value *ActualArg = *AI; 1644 1645 // When byval arguments actually inlined, we need to make the copy implied 1646 // by them explicit. However, we don't do this if the callee is readonly 1647 // or readnone, because the copy would be unneeded: the callee doesn't 1648 // modify the struct. 1649 if (CS.isByValArgument(ArgNo)) { 1650 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI, 1651 CalledFunc->getParamAlignment(ArgNo)); 1652 if (ActualArg != *AI) 1653 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI)); 1654 } 1655 1656 VMap[&*I] = ActualArg; 1657 } 1658 1659 // Add alignment assumptions if necessary. We do this before the inlined 1660 // instructions are actually cloned into the caller so that we can easily 1661 // check what will be known at the start of the inlined code. 1662 AddAlignmentAssumptions(CS, IFI); 1663 1664 // We want the inliner to prune the code as it copies. We would LOVE to 1665 // have no dead or constant instructions leftover after inlining occurs 1666 // (which can happen, e.g., because an argument was constant), but we'll be 1667 // happy with whatever the cloner can do. 1668 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, 1669 /*ModuleLevelChanges=*/false, Returns, ".i", 1670 &InlinedFunctionInfo, TheCall); 1671 // Remember the first block that is newly cloned over. 1672 FirstNewBlock = LastBlock; ++FirstNewBlock; 1673 1674 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr) 1675 // Update the BFI of blocks cloned into the caller. 1676 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI, 1677 CalledFunc->front()); 1678 1679 updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall, 1680 IFI.PSI, IFI.CallerBFI); 1681 // Update the profile count of callee. 1682 updateCalleeCount(IFI.CallerBFI, OrigBB, TheCall, CalledFunc, IFI.PSI); 1683 1684 // Inject byval arguments initialization. 1685 for (std::pair<Value*, Value*> &Init : ByValInit) 1686 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(), 1687 &*FirstNewBlock, IFI); 1688 1689 Optional<OperandBundleUse> ParentDeopt = 1690 CS.getOperandBundle(LLVMContext::OB_deopt); 1691 if (ParentDeopt) { 1692 SmallVector<OperandBundleDef, 2> OpDefs; 1693 1694 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) { 1695 Instruction *I = dyn_cast_or_null<Instruction>(VH); 1696 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef 1697 1698 OpDefs.clear(); 1699 1700 CallSite ICS(I); 1701 OpDefs.reserve(ICS.getNumOperandBundles()); 1702 1703 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) { 1704 auto ChildOB = ICS.getOperandBundleAt(i); 1705 if (ChildOB.getTagID() != LLVMContext::OB_deopt) { 1706 // If the inlined call has other operand bundles, let them be 1707 OpDefs.emplace_back(ChildOB); 1708 continue; 1709 } 1710 1711 // It may be useful to separate this logic (of handling operand 1712 // bundles) out to a separate "policy" component if this gets crowded. 1713 // Prepend the parent's deoptimization continuation to the newly 1714 // inlined call's deoptimization continuation. 1715 std::vector<Value *> MergedDeoptArgs; 1716 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() + 1717 ChildOB.Inputs.size()); 1718 1719 MergedDeoptArgs.insert(MergedDeoptArgs.end(), 1720 ParentDeopt->Inputs.begin(), 1721 ParentDeopt->Inputs.end()); 1722 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(), 1723 ChildOB.Inputs.end()); 1724 1725 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs)); 1726 } 1727 1728 Instruction *NewI = nullptr; 1729 if (isa<CallInst>(I)) 1730 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I); 1731 else 1732 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I); 1733 1734 // Note: the RAUW does the appropriate fixup in VMap, so we need to do 1735 // this even if the call returns void. 1736 I->replaceAllUsesWith(NewI); 1737 1738 VH = nullptr; 1739 I->eraseFromParent(); 1740 } 1741 } 1742 1743 // Update the callgraph if requested. 1744 if (IFI.CG) 1745 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI); 1746 1747 // For 'nodebug' functions, the associated DISubprogram is always null. 1748 // Conservatively avoid propagating the callsite debug location to 1749 // instructions inlined from a function whose DISubprogram is not null. 1750 fixupLineNumbers(Caller, FirstNewBlock, TheCall, 1751 CalledFunc->getSubprogram() != nullptr); 1752 1753 // Clone existing noalias metadata if necessary. 1754 CloneAliasScopeMetadata(CS, VMap); 1755 1756 // Add noalias metadata if necessary. 1757 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR); 1758 1759 // Propagate llvm.mem.parallel_loop_access if necessary. 1760 PropagateParallelLoopAccessMetadata(CS, VMap); 1761 1762 // Register any cloned assumptions. 1763 if (IFI.GetAssumptionCache) 1764 for (BasicBlock &NewBlock : 1765 make_range(FirstNewBlock->getIterator(), Caller->end())) 1766 for (Instruction &I : NewBlock) { 1767 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 1768 if (II->getIntrinsicID() == Intrinsic::assume) 1769 (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II); 1770 } 1771 } 1772 1773 // If there are any alloca instructions in the block that used to be the entry 1774 // block for the callee, move them to the entry block of the caller. First 1775 // calculate which instruction they should be inserted before. We insert the 1776 // instructions at the end of the current alloca list. 1777 { 1778 BasicBlock::iterator InsertPoint = Caller->begin()->begin(); 1779 for (BasicBlock::iterator I = FirstNewBlock->begin(), 1780 E = FirstNewBlock->end(); I != E; ) { 1781 AllocaInst *AI = dyn_cast<AllocaInst>(I++); 1782 if (!AI) continue; 1783 1784 // If the alloca is now dead, remove it. This often occurs due to code 1785 // specialization. 1786 if (AI->use_empty()) { 1787 AI->eraseFromParent(); 1788 continue; 1789 } 1790 1791 if (!allocaWouldBeStaticInEntry(AI)) 1792 continue; 1793 1794 // Keep track of the static allocas that we inline into the caller. 1795 IFI.StaticAllocas.push_back(AI); 1796 1797 // Scan for the block of allocas that we can move over, and move them 1798 // all at once. 1799 while (isa<AllocaInst>(I) && 1800 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) { 1801 IFI.StaticAllocas.push_back(cast<AllocaInst>(I)); 1802 ++I; 1803 } 1804 1805 // Transfer all of the allocas over in a block. Using splice means 1806 // that the instructions aren't removed from the symbol table, then 1807 // reinserted. 1808 Caller->getEntryBlock().getInstList().splice( 1809 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I); 1810 } 1811 // Move any dbg.declares describing the allocas into the entry basic block. 1812 DIBuilder DIB(*Caller->getParent()); 1813 for (auto &AI : IFI.StaticAllocas) 1814 replaceDbgDeclareForAlloca(AI, AI, DIB, DIExpression::NoDeref, 0, 1815 DIExpression::NoDeref); 1816 } 1817 1818 SmallVector<Value*,4> VarArgsToForward; 1819 SmallVector<AttributeSet, 4> VarArgsAttrs; 1820 for (unsigned i = CalledFunc->getFunctionType()->getNumParams(); 1821 i < CS.getNumArgOperands(); i++) { 1822 VarArgsToForward.push_back(CS.getArgOperand(i)); 1823 VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i)); 1824 } 1825 1826 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false; 1827 if (InlinedFunctionInfo.ContainsCalls) { 1828 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None; 1829 if (CallInst *CI = dyn_cast<CallInst>(TheCall)) 1830 CallSiteTailKind = CI->getTailCallKind(); 1831 1832 // For inlining purposes, the "notail" marker is the same as no marker. 1833 if (CallSiteTailKind == CallInst::TCK_NoTail) 1834 CallSiteTailKind = CallInst::TCK_None; 1835 1836 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; 1837 ++BB) { 1838 for (auto II = BB->begin(); II != BB->end();) { 1839 Instruction &I = *II++; 1840 CallInst *CI = dyn_cast<CallInst>(&I); 1841 if (!CI) 1842 continue; 1843 1844 // Forward varargs from inlined call site to calls to the 1845 // ForwardVarArgsTo function, if requested, and to musttail calls. 1846 if (!VarArgsToForward.empty() && 1847 ((ForwardVarArgsTo && 1848 CI->getCalledFunction() == ForwardVarArgsTo) || 1849 CI->isMustTailCall())) { 1850 // Collect attributes for non-vararg parameters. 1851 AttributeList Attrs = CI->getAttributes(); 1852 SmallVector<AttributeSet, 8> ArgAttrs; 1853 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) { 1854 for (unsigned ArgNo = 0; 1855 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo) 1856 ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo)); 1857 } 1858 1859 // Add VarArg attributes. 1860 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end()); 1861 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(), 1862 Attrs.getRetAttributes(), ArgAttrs); 1863 // Add VarArgs to existing parameters. 1864 SmallVector<Value *, 6> Params(CI->arg_operands()); 1865 Params.append(VarArgsToForward.begin(), VarArgsToForward.end()); 1866 CallInst *NewCI = 1867 CallInst::Create(CI->getCalledFunction() ? CI->getCalledFunction() 1868 : CI->getCalledValue(), 1869 Params, "", CI); 1870 NewCI->setDebugLoc(CI->getDebugLoc()); 1871 NewCI->setAttributes(Attrs); 1872 NewCI->setCallingConv(CI->getCallingConv()); 1873 CI->replaceAllUsesWith(NewCI); 1874 CI->eraseFromParent(); 1875 CI = NewCI; 1876 } 1877 1878 if (Function *F = CI->getCalledFunction()) 1879 InlinedDeoptimizeCalls |= 1880 F->getIntrinsicID() == Intrinsic::experimental_deoptimize; 1881 1882 // We need to reduce the strength of any inlined tail calls. For 1883 // musttail, we have to avoid introducing potential unbounded stack 1884 // growth. For example, if functions 'f' and 'g' are mutually recursive 1885 // with musttail, we can inline 'g' into 'f' so long as we preserve 1886 // musttail on the cloned call to 'f'. If either the inlined call site 1887 // or the cloned call site is *not* musttail, the program already has 1888 // one frame of stack growth, so it's safe to remove musttail. Here is 1889 // a table of example transformations: 1890 // 1891 // f -> musttail g -> musttail f ==> f -> musttail f 1892 // f -> musttail g -> tail f ==> f -> tail f 1893 // f -> g -> musttail f ==> f -> f 1894 // f -> g -> tail f ==> f -> f 1895 // 1896 // Inlined notail calls should remain notail calls. 1897 CallInst::TailCallKind ChildTCK = CI->getTailCallKind(); 1898 if (ChildTCK != CallInst::TCK_NoTail) 1899 ChildTCK = std::min(CallSiteTailKind, ChildTCK); 1900 CI->setTailCallKind(ChildTCK); 1901 InlinedMustTailCalls |= CI->isMustTailCall(); 1902 1903 // Calls inlined through a 'nounwind' call site should be marked 1904 // 'nounwind'. 1905 if (MarkNoUnwind) 1906 CI->setDoesNotThrow(); 1907 } 1908 } 1909 } 1910 1911 // Leave lifetime markers for the static alloca's, scoping them to the 1912 // function we just inlined. 1913 if (InsertLifetime && !IFI.StaticAllocas.empty()) { 1914 IRBuilder<> builder(&FirstNewBlock->front()); 1915 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) { 1916 AllocaInst *AI = IFI.StaticAllocas[ai]; 1917 // Don't mark swifterror allocas. They can't have bitcast uses. 1918 if (AI->isSwiftError()) 1919 continue; 1920 1921 // If the alloca is already scoped to something smaller than the whole 1922 // function then there's no need to add redundant, less accurate markers. 1923 if (hasLifetimeMarkers(AI)) 1924 continue; 1925 1926 // Try to determine the size of the allocation. 1927 ConstantInt *AllocaSize = nullptr; 1928 if (ConstantInt *AIArraySize = 1929 dyn_cast<ConstantInt>(AI->getArraySize())) { 1930 auto &DL = Caller->getParent()->getDataLayout(); 1931 Type *AllocaType = AI->getAllocatedType(); 1932 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType); 1933 uint64_t AllocaArraySize = AIArraySize->getLimitedValue(); 1934 1935 // Don't add markers for zero-sized allocas. 1936 if (AllocaArraySize == 0) 1937 continue; 1938 1939 // Check that array size doesn't saturate uint64_t and doesn't 1940 // overflow when it's multiplied by type size. 1941 if (AllocaArraySize != std::numeric_limits<uint64_t>::max() && 1942 std::numeric_limits<uint64_t>::max() / AllocaArraySize >= 1943 AllocaTypeSize) { 1944 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()), 1945 AllocaArraySize * AllocaTypeSize); 1946 } 1947 } 1948 1949 builder.CreateLifetimeStart(AI, AllocaSize); 1950 for (ReturnInst *RI : Returns) { 1951 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize 1952 // call and a return. The return kills all local allocas. 1953 if (InlinedMustTailCalls && 1954 RI->getParent()->getTerminatingMustTailCall()) 1955 continue; 1956 if (InlinedDeoptimizeCalls && 1957 RI->getParent()->getTerminatingDeoptimizeCall()) 1958 continue; 1959 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize); 1960 } 1961 } 1962 } 1963 1964 // If the inlined code contained dynamic alloca instructions, wrap the inlined 1965 // code with llvm.stacksave/llvm.stackrestore intrinsics. 1966 if (InlinedFunctionInfo.ContainsDynamicAllocas) { 1967 Module *M = Caller->getParent(); 1968 // Get the two intrinsics we care about. 1969 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave); 1970 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore); 1971 1972 // Insert the llvm.stacksave. 1973 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin()) 1974 .CreateCall(StackSave, {}, "savedstack"); 1975 1976 // Insert a call to llvm.stackrestore before any return instructions in the 1977 // inlined function. 1978 for (ReturnInst *RI : Returns) { 1979 // Don't insert llvm.stackrestore calls between a musttail or deoptimize 1980 // call and a return. The return will restore the stack pointer. 1981 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall()) 1982 continue; 1983 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall()) 1984 continue; 1985 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr); 1986 } 1987 } 1988 1989 // If we are inlining for an invoke instruction, we must make sure to rewrite 1990 // any call instructions into invoke instructions. This is sensitive to which 1991 // funclet pads were top-level in the inlinee, so must be done before 1992 // rewriting the "parent pad" links. 1993 if (auto *II = dyn_cast<InvokeInst>(TheCall)) { 1994 BasicBlock *UnwindDest = II->getUnwindDest(); 1995 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI(); 1996 if (isa<LandingPadInst>(FirstNonPHI)) { 1997 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo); 1998 } else { 1999 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo); 2000 } 2001 } 2002 2003 // Update the lexical scopes of the new funclets and callsites. 2004 // Anything that had 'none' as its parent is now nested inside the callsite's 2005 // EHPad. 2006 2007 if (CallSiteEHPad) { 2008 for (Function::iterator BB = FirstNewBlock->getIterator(), 2009 E = Caller->end(); 2010 BB != E; ++BB) { 2011 // Add bundle operands to any top-level call sites. 2012 SmallVector<OperandBundleDef, 1> OpBundles; 2013 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) { 2014 Instruction *I = &*BBI++; 2015 CallSite CS(I); 2016 if (!CS) 2017 continue; 2018 2019 // Skip call sites which are nounwind intrinsics. 2020 auto *CalledFn = 2021 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts()); 2022 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow()) 2023 continue; 2024 2025 // Skip call sites which already have a "funclet" bundle. 2026 if (CS.getOperandBundle(LLVMContext::OB_funclet)) 2027 continue; 2028 2029 CS.getOperandBundlesAsDefs(OpBundles); 2030 OpBundles.emplace_back("funclet", CallSiteEHPad); 2031 2032 Instruction *NewInst; 2033 if (CS.isCall()) 2034 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I); 2035 else 2036 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I); 2037 NewInst->takeName(I); 2038 I->replaceAllUsesWith(NewInst); 2039 I->eraseFromParent(); 2040 2041 OpBundles.clear(); 2042 } 2043 2044 // It is problematic if the inlinee has a cleanupret which unwinds to 2045 // caller and we inline it into a call site which doesn't unwind but into 2046 // an EH pad that does. Such an edge must be dynamically unreachable. 2047 // As such, we replace the cleanupret with unreachable. 2048 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator())) 2049 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally) 2050 changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false); 2051 2052 Instruction *I = BB->getFirstNonPHI(); 2053 if (!I->isEHPad()) 2054 continue; 2055 2056 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) { 2057 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad())) 2058 CatchSwitch->setParentPad(CallSiteEHPad); 2059 } else { 2060 auto *FPI = cast<FuncletPadInst>(I); 2061 if (isa<ConstantTokenNone>(FPI->getParentPad())) 2062 FPI->setParentPad(CallSiteEHPad); 2063 } 2064 } 2065 } 2066 2067 if (InlinedDeoptimizeCalls) { 2068 // We need to at least remove the deoptimizing returns from the Return set, 2069 // so that the control flow from those returns does not get merged into the 2070 // caller (but terminate it instead). If the caller's return type does not 2071 // match the callee's return type, we also need to change the return type of 2072 // the intrinsic. 2073 if (Caller->getReturnType() == TheCall->getType()) { 2074 auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) { 2075 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr; 2076 }); 2077 Returns.erase(NewEnd, Returns.end()); 2078 } else { 2079 SmallVector<ReturnInst *, 8> NormalReturns; 2080 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration( 2081 Caller->getParent(), Intrinsic::experimental_deoptimize, 2082 {Caller->getReturnType()}); 2083 2084 for (ReturnInst *RI : Returns) { 2085 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall(); 2086 if (!DeoptCall) { 2087 NormalReturns.push_back(RI); 2088 continue; 2089 } 2090 2091 // The calling convention on the deoptimize call itself may be bogus, 2092 // since the code we're inlining may have undefined behavior (and may 2093 // never actually execute at runtime); but all 2094 // @llvm.experimental.deoptimize declarations have to have the same 2095 // calling convention in a well-formed module. 2096 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv(); 2097 NewDeoptIntrinsic->setCallingConv(CallingConv); 2098 auto *CurBB = RI->getParent(); 2099 RI->eraseFromParent(); 2100 2101 SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(), 2102 DeoptCall->arg_end()); 2103 2104 SmallVector<OperandBundleDef, 1> OpBundles; 2105 DeoptCall->getOperandBundlesAsDefs(OpBundles); 2106 DeoptCall->eraseFromParent(); 2107 assert(!OpBundles.empty() && 2108 "Expected at least the deopt operand bundle"); 2109 2110 IRBuilder<> Builder(CurBB); 2111 CallInst *NewDeoptCall = 2112 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles); 2113 NewDeoptCall->setCallingConv(CallingConv); 2114 if (NewDeoptCall->getType()->isVoidTy()) 2115 Builder.CreateRetVoid(); 2116 else 2117 Builder.CreateRet(NewDeoptCall); 2118 } 2119 2120 // Leave behind the normal returns so we can merge control flow. 2121 std::swap(Returns, NormalReturns); 2122 } 2123 } 2124 2125 // Handle any inlined musttail call sites. In order for a new call site to be 2126 // musttail, the source of the clone and the inlined call site must have been 2127 // musttail. Therefore it's safe to return without merging control into the 2128 // phi below. 2129 if (InlinedMustTailCalls) { 2130 // Check if we need to bitcast the result of any musttail calls. 2131 Type *NewRetTy = Caller->getReturnType(); 2132 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy; 2133 2134 // Handle the returns preceded by musttail calls separately. 2135 SmallVector<ReturnInst *, 8> NormalReturns; 2136 for (ReturnInst *RI : Returns) { 2137 CallInst *ReturnedMustTail = 2138 RI->getParent()->getTerminatingMustTailCall(); 2139 if (!ReturnedMustTail) { 2140 NormalReturns.push_back(RI); 2141 continue; 2142 } 2143 if (!NeedBitCast) 2144 continue; 2145 2146 // Delete the old return and any preceding bitcast. 2147 BasicBlock *CurBB = RI->getParent(); 2148 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue()); 2149 RI->eraseFromParent(); 2150 if (OldCast) 2151 OldCast->eraseFromParent(); 2152 2153 // Insert a new bitcast and return with the right type. 2154 IRBuilder<> Builder(CurBB); 2155 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy)); 2156 } 2157 2158 // Leave behind the normal returns so we can merge control flow. 2159 std::swap(Returns, NormalReturns); 2160 } 2161 2162 // Now that all of the transforms on the inlined code have taken place but 2163 // before we splice the inlined code into the CFG and lose track of which 2164 // blocks were actually inlined, collect the call sites. We only do this if 2165 // call graph updates weren't requested, as those provide value handle based 2166 // tracking of inlined call sites instead. 2167 if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) { 2168 // Otherwise just collect the raw call sites that were inlined. 2169 for (BasicBlock &NewBB : 2170 make_range(FirstNewBlock->getIterator(), Caller->end())) 2171 for (Instruction &I : NewBB) 2172 if (auto CS = CallSite(&I)) 2173 IFI.InlinedCallSites.push_back(CS); 2174 } 2175 2176 // If we cloned in _exactly one_ basic block, and if that block ends in a 2177 // return instruction, we splice the body of the inlined callee directly into 2178 // the calling basic block. 2179 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) { 2180 // Move all of the instructions right before the call. 2181 OrigBB->getInstList().splice(TheCall->getIterator(), 2182 FirstNewBlock->getInstList(), 2183 FirstNewBlock->begin(), FirstNewBlock->end()); 2184 // Remove the cloned basic block. 2185 Caller->getBasicBlockList().pop_back(); 2186 2187 // If the call site was an invoke instruction, add a branch to the normal 2188 // destination. 2189 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2190 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall); 2191 NewBr->setDebugLoc(Returns[0]->getDebugLoc()); 2192 } 2193 2194 // If the return instruction returned a value, replace uses of the call with 2195 // uses of the returned value. 2196 if (!TheCall->use_empty()) { 2197 ReturnInst *R = Returns[0]; 2198 if (TheCall == R->getReturnValue()) 2199 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2200 else 2201 TheCall->replaceAllUsesWith(R->getReturnValue()); 2202 } 2203 // Since we are now done with the Call/Invoke, we can delete it. 2204 TheCall->eraseFromParent(); 2205 2206 // Since we are now done with the return instruction, delete it also. 2207 Returns[0]->eraseFromParent(); 2208 2209 // We are now done with the inlining. 2210 return true; 2211 } 2212 2213 // Otherwise, we have the normal case, of more than one block to inline or 2214 // multiple return sites. 2215 2216 // We want to clone the entire callee function into the hole between the 2217 // "starter" and "ender" blocks. How we accomplish this depends on whether 2218 // this is an invoke instruction or a call instruction. 2219 BasicBlock *AfterCallBB; 2220 BranchInst *CreatedBranchToNormalDest = nullptr; 2221 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) { 2222 2223 // Add an unconditional branch to make this look like the CallInst case... 2224 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall); 2225 2226 // Split the basic block. This guarantees that no PHI nodes will have to be 2227 // updated due to new incoming edges, and make the invoke case more 2228 // symmetric to the call case. 2229 AfterCallBB = 2230 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(), 2231 CalledFunc->getName() + ".exit"); 2232 2233 } else { // It's a call 2234 // If this is a call instruction, we need to split the basic block that 2235 // the call lives in. 2236 // 2237 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(), 2238 CalledFunc->getName() + ".exit"); 2239 } 2240 2241 if (IFI.CallerBFI) { 2242 // Copy original BB's block frequency to AfterCallBB 2243 IFI.CallerBFI->setBlockFreq( 2244 AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency()); 2245 } 2246 2247 // Change the branch that used to go to AfterCallBB to branch to the first 2248 // basic block of the inlined function. 2249 // 2250 TerminatorInst *Br = OrigBB->getTerminator(); 2251 assert(Br && Br->getOpcode() == Instruction::Br && 2252 "splitBasicBlock broken!"); 2253 Br->setOperand(0, &*FirstNewBlock); 2254 2255 // Now that the function is correct, make it a little bit nicer. In 2256 // particular, move the basic blocks inserted from the end of the function 2257 // into the space made by splitting the source basic block. 2258 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(), 2259 Caller->getBasicBlockList(), FirstNewBlock, 2260 Caller->end()); 2261 2262 // Handle all of the return instructions that we just cloned in, and eliminate 2263 // any users of the original call/invoke instruction. 2264 Type *RTy = CalledFunc->getReturnType(); 2265 2266 PHINode *PHI = nullptr; 2267 if (Returns.size() > 1) { 2268 // The PHI node should go at the front of the new basic block to merge all 2269 // possible incoming values. 2270 if (!TheCall->use_empty()) { 2271 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(), 2272 &AfterCallBB->front()); 2273 // Anything that used the result of the function call should now use the 2274 // PHI node as their operand. 2275 TheCall->replaceAllUsesWith(PHI); 2276 } 2277 2278 // Loop over all of the return instructions adding entries to the PHI node 2279 // as appropriate. 2280 if (PHI) { 2281 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2282 ReturnInst *RI = Returns[i]; 2283 assert(RI->getReturnValue()->getType() == PHI->getType() && 2284 "Ret value not consistent in function!"); 2285 PHI->addIncoming(RI->getReturnValue(), RI->getParent()); 2286 } 2287 } 2288 2289 // Add a branch to the merge points and remove return instructions. 2290 DebugLoc Loc; 2291 for (unsigned i = 0, e = Returns.size(); i != e; ++i) { 2292 ReturnInst *RI = Returns[i]; 2293 BranchInst* BI = BranchInst::Create(AfterCallBB, RI); 2294 Loc = RI->getDebugLoc(); 2295 BI->setDebugLoc(Loc); 2296 RI->eraseFromParent(); 2297 } 2298 // We need to set the debug location to *somewhere* inside the 2299 // inlined function. The line number may be nonsensical, but the 2300 // instruction will at least be associated with the right 2301 // function. 2302 if (CreatedBranchToNormalDest) 2303 CreatedBranchToNormalDest->setDebugLoc(Loc); 2304 } else if (!Returns.empty()) { 2305 // Otherwise, if there is exactly one return value, just replace anything 2306 // using the return value of the call with the computed value. 2307 if (!TheCall->use_empty()) { 2308 if (TheCall == Returns[0]->getReturnValue()) 2309 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2310 else 2311 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue()); 2312 } 2313 2314 // Update PHI nodes that use the ReturnBB to use the AfterCallBB. 2315 BasicBlock *ReturnBB = Returns[0]->getParent(); 2316 ReturnBB->replaceAllUsesWith(AfterCallBB); 2317 2318 // Splice the code from the return block into the block that it will return 2319 // to, which contains the code that was after the call. 2320 AfterCallBB->getInstList().splice(AfterCallBB->begin(), 2321 ReturnBB->getInstList()); 2322 2323 if (CreatedBranchToNormalDest) 2324 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc()); 2325 2326 // Delete the return instruction now and empty ReturnBB now. 2327 Returns[0]->eraseFromParent(); 2328 ReturnBB->eraseFromParent(); 2329 } else if (!TheCall->use_empty()) { 2330 // No returns, but something is using the return value of the call. Just 2331 // nuke the result. 2332 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType())); 2333 } 2334 2335 // Since we are now done with the Call/Invoke, we can delete it. 2336 TheCall->eraseFromParent(); 2337 2338 // If we inlined any musttail calls and the original return is now 2339 // unreachable, delete it. It can only contain a bitcast and ret. 2340 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB)) 2341 AfterCallBB->eraseFromParent(); 2342 2343 // We should always be able to fold the entry block of the function into the 2344 // single predecessor of the block... 2345 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!"); 2346 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0); 2347 2348 // Splice the code entry block into calling block, right before the 2349 // unconditional branch. 2350 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes 2351 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList()); 2352 2353 // Remove the unconditional branch. 2354 OrigBB->getInstList().erase(Br); 2355 2356 // Now we can remove the CalleeEntry block, which is now empty. 2357 Caller->getBasicBlockList().erase(CalleeEntry); 2358 2359 // If we inserted a phi node, check to see if it has a single value (e.g. all 2360 // the entries are the same or undef). If so, remove the PHI so it doesn't 2361 // block other optimizations. 2362 if (PHI) { 2363 AssumptionCache *AC = 2364 IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr; 2365 auto &DL = Caller->getParent()->getDataLayout(); 2366 if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) { 2367 PHI->replaceAllUsesWith(V); 2368 PHI->eraseFromParent(); 2369 } 2370 } 2371 2372 return true; 2373 } 2374