1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/CallSite.h"
29 #include "llvm/IR/CFG.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfo.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/DIBuilder.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include "llvm/Support/CommandLine.h"
44 #include <algorithm>
45 
46 using namespace llvm;
47 
48 static cl::opt<bool>
49 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
50   cl::Hidden,
51   cl::desc("Convert noalias attributes to metadata during inlining."));
52 
53 static cl::opt<bool>
54 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
55   cl::init(true), cl::Hidden,
56   cl::desc("Convert align attributes to assumptions during inlining."));
57 
58 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
59                           AAResults *CalleeAAR, bool InsertLifetime) {
60   return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
61 }
62 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
63                           AAResults *CalleeAAR, bool InsertLifetime) {
64   return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
65 }
66 
67 namespace {
68   /// A class for recording information about inlining a landing pad.
69   class LandingPadInliningInfo {
70     BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
71     BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
72     LandingPadInst *CallerLPad;  ///< LandingPadInst associated with the invoke.
73     PHINode *InnerEHValuesPHI;   ///< PHI for EH values from landingpad insts.
74     SmallVector<Value*, 8> UnwindDestPHIValues;
75 
76   public:
77     LandingPadInliningInfo(InvokeInst *II)
78       : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
79         CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
80       // If there are PHI nodes in the unwind destination block, we need to keep
81       // track of which values came into them from the invoke before removing
82       // the edge from this block.
83       llvm::BasicBlock *InvokeBB = II->getParent();
84       BasicBlock::iterator I = OuterResumeDest->begin();
85       for (; isa<PHINode>(I); ++I) {
86         // Save the value to use for this edge.
87         PHINode *PHI = cast<PHINode>(I);
88         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
89       }
90 
91       CallerLPad = cast<LandingPadInst>(I);
92     }
93 
94     /// The outer unwind destination is the target of
95     /// unwind edges introduced for calls within the inlined function.
96     BasicBlock *getOuterResumeDest() const {
97       return OuterResumeDest;
98     }
99 
100     BasicBlock *getInnerResumeDest();
101 
102     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
103 
104     /// Forward the 'resume' instruction to the caller's landing pad block.
105     /// When the landing pad block has only one predecessor, this is
106     /// a simple branch. When there is more than one predecessor, we need to
107     /// split the landing pad block after the landingpad instruction and jump
108     /// to there.
109     void forwardResume(ResumeInst *RI,
110                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
111 
112     /// Add incoming-PHI values to the unwind destination block for the given
113     /// basic block, using the values for the original invoke's source block.
114     void addIncomingPHIValuesFor(BasicBlock *BB) const {
115       addIncomingPHIValuesForInto(BB, OuterResumeDest);
116     }
117 
118     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
119       BasicBlock::iterator I = dest->begin();
120       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
121         PHINode *phi = cast<PHINode>(I);
122         phi->addIncoming(UnwindDestPHIValues[i], src);
123       }
124     }
125   };
126 } // anonymous namespace
127 
128 /// Get or create a target for the branch from ResumeInsts.
129 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
130   if (InnerResumeDest) return InnerResumeDest;
131 
132   // Split the landing pad.
133   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
134   InnerResumeDest =
135     OuterResumeDest->splitBasicBlock(SplitPoint,
136                                      OuterResumeDest->getName() + ".body");
137 
138   // The number of incoming edges we expect to the inner landing pad.
139   const unsigned PHICapacity = 2;
140 
141   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
142   Instruction *InsertPoint = &InnerResumeDest->front();
143   BasicBlock::iterator I = OuterResumeDest->begin();
144   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
145     PHINode *OuterPHI = cast<PHINode>(I);
146     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
147                                         OuterPHI->getName() + ".lpad-body",
148                                         InsertPoint);
149     OuterPHI->replaceAllUsesWith(InnerPHI);
150     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
151   }
152 
153   // Create a PHI for the exception values.
154   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
155                                      "eh.lpad-body", InsertPoint);
156   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
157   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
158 
159   // All done.
160   return InnerResumeDest;
161 }
162 
163 /// Forward the 'resume' instruction to the caller's landing pad block.
164 /// When the landing pad block has only one predecessor, this is a simple
165 /// branch. When there is more than one predecessor, we need to split the
166 /// landing pad block after the landingpad instruction and jump to there.
167 void LandingPadInliningInfo::forwardResume(
168     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
169   BasicBlock *Dest = getInnerResumeDest();
170   BasicBlock *Src = RI->getParent();
171 
172   BranchInst::Create(Dest, Src);
173 
174   // Update the PHIs in the destination. They were inserted in an order which
175   // makes this work.
176   addIncomingPHIValuesForInto(Src, Dest);
177 
178   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
179   RI->eraseFromParent();
180 }
181 
182 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
183 static Value *getParentPad(Value *EHPad) {
184   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
185     return FPI->getParentPad();
186   return cast<CatchSwitchInst>(EHPad)->getParentPad();
187 }
188 
189 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy;
190 
191 /// Helper for getUnwindDestToken that does the descendant-ward part of
192 /// the search.
193 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
194                                        UnwindDestMemoTy &MemoMap) {
195   SmallVector<Instruction *, 8> Worklist(1, EHPad);
196 
197   while (!Worklist.empty()) {
198     Instruction *CurrentPad = Worklist.pop_back_val();
199     // We only put pads on the worklist that aren't in the MemoMap.  When
200     // we find an unwind dest for a pad we may update its ancestors, but
201     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
202     // so they should never get updated while queued on the worklist.
203     assert(!MemoMap.count(CurrentPad));
204     Value *UnwindDestToken = nullptr;
205     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
206       if (CatchSwitch->hasUnwindDest()) {
207         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
208       } else {
209         // Catchswitch doesn't have a 'nounwind' variant, and one might be
210         // annotated as "unwinds to caller" when really it's nounwind (see
211         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
212         // parent's unwind dest from this.  We can check its catchpads'
213         // descendants, since they might include a cleanuppad with an
214         // "unwinds to caller" cleanupret, which can be trusted.
215         for (auto HI = CatchSwitch->handler_begin(),
216                   HE = CatchSwitch->handler_end();
217              HI != HE && !UnwindDestToken; ++HI) {
218           BasicBlock *HandlerBlock = *HI;
219           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
220           for (User *Child : CatchPad->users()) {
221             // Intentionally ignore invokes here -- since the catchswitch is
222             // marked "unwind to caller", it would be a verifier error if it
223             // contained an invoke which unwinds out of it, so any invoke we'd
224             // encounter must unwind to some child of the catch.
225             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
226               continue;
227 
228             Instruction *ChildPad = cast<Instruction>(Child);
229             auto Memo = MemoMap.find(ChildPad);
230             if (Memo == MemoMap.end()) {
231               // Haven't figure out this child pad yet; queue it.
232               Worklist.push_back(ChildPad);
233               continue;
234             }
235             // We've already checked this child, but might have found that
236             // it offers no proof either way.
237             Value *ChildUnwindDestToken = Memo->second;
238             if (!ChildUnwindDestToken)
239               continue;
240             // We already know the child's unwind dest, which can either
241             // be ConstantTokenNone to indicate unwind to caller, or can
242             // be another child of the catchpad.  Only the former indicates
243             // the unwind dest of the catchswitch.
244             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
245               UnwindDestToken = ChildUnwindDestToken;
246               break;
247             }
248             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
249           }
250         }
251       }
252     } else {
253       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
254       for (User *U : CleanupPad->users()) {
255         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
256           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
257             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
258           else
259             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
260           break;
261         }
262         Value *ChildUnwindDestToken;
263         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
264           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
265         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
266           Instruction *ChildPad = cast<Instruction>(U);
267           auto Memo = MemoMap.find(ChildPad);
268           if (Memo == MemoMap.end()) {
269             // Haven't resolved this child yet; queue it and keep searching.
270             Worklist.push_back(ChildPad);
271             continue;
272           }
273           // We've checked this child, but still need to ignore it if it
274           // had no proof either way.
275           ChildUnwindDestToken = Memo->second;
276           if (!ChildUnwindDestToken)
277             continue;
278         } else {
279           // Not a relevant user of the cleanuppad
280           continue;
281         }
282         // In a well-formed program, the child/invoke must either unwind to
283         // an(other) child of the cleanup, or exit the cleanup.  In the
284         // first case, continue searching.
285         if (isa<Instruction>(ChildUnwindDestToken) &&
286             getParentPad(ChildUnwindDestToken) == CleanupPad)
287           continue;
288         UnwindDestToken = ChildUnwindDestToken;
289         break;
290       }
291     }
292     // If we haven't found an unwind dest for CurrentPad, we may have queued its
293     // children, so move on to the next in the worklist.
294     if (!UnwindDestToken)
295       continue;
296 
297     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
298     // any ancestors of CurrentPad up to but not including UnwindDestToken's
299     // parent pad.  Record this in the memo map, and check to see if the
300     // original EHPad being queried is one of the ones exited.
301     Value *UnwindParent;
302     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
303       UnwindParent = getParentPad(UnwindPad);
304     else
305       UnwindParent = nullptr;
306     bool ExitedOriginalPad = false;
307     for (Instruction *ExitedPad = CurrentPad;
308          ExitedPad && ExitedPad != UnwindParent;
309          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
310       // Skip over catchpads since they just follow their catchswitches.
311       if (isa<CatchPadInst>(ExitedPad))
312         continue;
313       MemoMap[ExitedPad] = UnwindDestToken;
314       ExitedOriginalPad |= (ExitedPad == EHPad);
315     }
316 
317     if (ExitedOriginalPad)
318       return UnwindDestToken;
319 
320     // Continue the search.
321   }
322 
323   // No definitive information is contained within this funclet.
324   return nullptr;
325 }
326 
327 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
328 /// return that pad instruction.  If it unwinds to caller, return
329 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
330 /// return nullptr.
331 ///
332 /// This routine gets invoked for calls in funclets in inlinees when inlining
333 /// an invoke.  Since many funclets don't have calls inside them, it's queried
334 /// on-demand rather than building a map of pads to unwind dests up front.
335 /// Determining a funclet's unwind dest may require recursively searching its
336 /// descendants, and also ancestors and cousins if the descendants don't provide
337 /// an answer.  Since most funclets will have their unwind dest immediately
338 /// available as the unwind dest of a catchswitch or cleanupret, this routine
339 /// searches top-down from the given pad and then up. To avoid worst-case
340 /// quadratic run-time given that approach, it uses a memo map to avoid
341 /// re-processing funclet trees.  The callers that rewrite the IR as they go
342 /// take advantage of this, for correctness, by checking/forcing rewritten
343 /// pads' entries to match the original callee view.
344 static Value *getUnwindDestToken(Instruction *EHPad,
345                                  UnwindDestMemoTy &MemoMap) {
346   // Catchpads unwind to the same place as their catchswitch;
347   // redirct any queries on catchpads so the code below can
348   // deal with just catchswitches and cleanuppads.
349   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
350     EHPad = CPI->getCatchSwitch();
351 
352   // Check if we've already determined the unwind dest for this pad.
353   auto Memo = MemoMap.find(EHPad);
354   if (Memo != MemoMap.end())
355     return Memo->second;
356 
357   // Search EHPad and, if necessary, its descendants.
358   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
359   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
360   if (UnwindDestToken)
361     return UnwindDestToken;
362 
363   // No information is available for this EHPad from itself or any of its
364   // descendants.  An unwind all the way out to a pad in the caller would
365   // need also to agree with the unwind dest of the parent funclet, so
366   // search up the chain to try to find a funclet with information.  Put
367   // null entries in the memo map to avoid re-processing as we go up.
368   MemoMap[EHPad] = nullptr;
369   Instruction *LastUselessPad = EHPad;
370   Value *AncestorToken;
371   for (AncestorToken = getParentPad(EHPad);
372        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
373        AncestorToken = getParentPad(AncestorToken)) {
374     // Skip over catchpads since they just follow their catchswitches.
375     if (isa<CatchPadInst>(AncestorPad))
376       continue;
377     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
378     auto AncestorMemo = MemoMap.find(AncestorPad);
379     if (AncestorMemo == MemoMap.end()) {
380       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
381     } else {
382       UnwindDestToken = AncestorMemo->second;
383     }
384     if (UnwindDestToken)
385       break;
386     LastUselessPad = AncestorPad;
387   }
388 
389   // Since the whole tree under LastUselessPad has no information, it all must
390   // match UnwindDestToken; record that to avoid repeating the search.
391   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
392   while (!Worklist.empty()) {
393     Instruction *UselessPad = Worklist.pop_back_val();
394     assert(!MemoMap.count(UselessPad) || MemoMap[UselessPad] == nullptr);
395     MemoMap[UselessPad] = UnwindDestToken;
396     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
397       for (BasicBlock *HandlerBlock : CatchSwitch->handlers())
398         for (User *U : HandlerBlock->getFirstNonPHI()->users())
399           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
400             Worklist.push_back(cast<Instruction>(U));
401     } else {
402       assert(isa<CleanupPadInst>(UselessPad));
403       for (User *U : UselessPad->users())
404         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
405           Worklist.push_back(cast<Instruction>(U));
406     }
407   }
408 
409   return UnwindDestToken;
410 }
411 
412 /// When we inline a basic block into an invoke,
413 /// we have to turn all of the calls that can throw into invokes.
414 /// This function analyze BB to see if there are any calls, and if so,
415 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
416 /// nodes in that block with the values specified in InvokeDestPHIValues.
417 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
418     BasicBlock *BB, BasicBlock *UnwindEdge,
419     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
420   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
421     Instruction *I = &*BBI++;
422 
423     // We only need to check for function calls: inlined invoke
424     // instructions require no special handling.
425     CallInst *CI = dyn_cast<CallInst>(I);
426 
427     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
428       continue;
429 
430     // We do not need to (and in fact, cannot) convert possibly throwing calls
431     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
432     // invokes.  The caller's "segment" of the deoptimization continuation
433     // attached to the newly inlined @llvm.experimental_deoptimize
434     // (resp. @llvm.experimental.guard) call should contain the exception
435     // handling logic, if any.
436     if (auto *F = CI->getCalledFunction())
437       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
438           F->getIntrinsicID() == Intrinsic::experimental_guard)
439         continue;
440 
441     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
442       // This call is nested inside a funclet.  If that funclet has an unwind
443       // destination within the inlinee, then unwinding out of this call would
444       // be UB.  Rewriting this call to an invoke which targets the inlined
445       // invoke's unwind dest would give the call's parent funclet multiple
446       // unwind destinations, which is something that subsequent EH table
447       // generation can't handle and that the veirifer rejects.  So when we
448       // see such a call, leave it as a call.
449       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
450       Value *UnwindDestToken =
451           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
452       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
453         continue;
454 #ifndef NDEBUG
455       Instruction *MemoKey;
456       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
457         MemoKey = CatchPad->getCatchSwitch();
458       else
459         MemoKey = FuncletPad;
460       assert(FuncletUnwindMap->count(MemoKey) &&
461              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
462              "must get memoized to avoid confusing later searches");
463 #endif // NDEBUG
464     }
465 
466     // Convert this function call into an invoke instruction.  First, split the
467     // basic block.
468     BasicBlock *Split =
469         BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
470 
471     // Delete the unconditional branch inserted by splitBasicBlock
472     BB->getInstList().pop_back();
473 
474     // Create the new invoke instruction.
475     SmallVector<Value*, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
476     SmallVector<OperandBundleDef, 1> OpBundles;
477 
478     CI->getOperandBundlesAsDefs(OpBundles);
479 
480     // Note: we're round tripping operand bundles through memory here, and that
481     // can potentially be avoided with a cleverer API design that we do not have
482     // as of this time.
483 
484     InvokeInst *II =
485         InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge, InvokeArgs,
486                            OpBundles, CI->getName(), BB);
487     II->setDebugLoc(CI->getDebugLoc());
488     II->setCallingConv(CI->getCallingConv());
489     II->setAttributes(CI->getAttributes());
490 
491     // Make sure that anything using the call now uses the invoke!  This also
492     // updates the CallGraph if present, because it uses a WeakVH.
493     CI->replaceAllUsesWith(II);
494 
495     // Delete the original call
496     Split->getInstList().pop_front();
497     return BB;
498   }
499   return nullptr;
500 }
501 
502 /// If we inlined an invoke site, we need to convert calls
503 /// in the body of the inlined function into invokes.
504 ///
505 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
506 /// block of the inlined code (the last block is the end of the function),
507 /// and InlineCodeInfo is information about the code that got inlined.
508 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
509                                     ClonedCodeInfo &InlinedCodeInfo) {
510   BasicBlock *InvokeDest = II->getUnwindDest();
511 
512   Function *Caller = FirstNewBlock->getParent();
513 
514   // The inlined code is currently at the end of the function, scan from the
515   // start of the inlined code to its end, checking for stuff we need to
516   // rewrite.
517   LandingPadInliningInfo Invoke(II);
518 
519   // Get all of the inlined landing pad instructions.
520   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
521   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
522        I != E; ++I)
523     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
524       InlinedLPads.insert(II->getLandingPadInst());
525 
526   // Append the clauses from the outer landing pad instruction into the inlined
527   // landing pad instructions.
528   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
529   for (LandingPadInst *InlinedLPad : InlinedLPads) {
530     unsigned OuterNum = OuterLPad->getNumClauses();
531     InlinedLPad->reserveClauses(OuterNum);
532     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
533       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
534     if (OuterLPad->isCleanup())
535       InlinedLPad->setCleanup(true);
536   }
537 
538   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
539        BB != E; ++BB) {
540     if (InlinedCodeInfo.ContainsCalls)
541       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
542               &*BB, Invoke.getOuterResumeDest()))
543         // Update any PHI nodes in the exceptional block to indicate that there
544         // is now a new entry in them.
545         Invoke.addIncomingPHIValuesFor(NewBB);
546 
547     // Forward any resumes that are remaining here.
548     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
549       Invoke.forwardResume(RI, InlinedLPads);
550   }
551 
552   // Now that everything is happy, we have one final detail.  The PHI nodes in
553   // the exception destination block still have entries due to the original
554   // invoke instruction. Eliminate these entries (which might even delete the
555   // PHI node) now.
556   InvokeDest->removePredecessor(II->getParent());
557 }
558 
559 /// If we inlined an invoke site, we need to convert calls
560 /// in the body of the inlined function into invokes.
561 ///
562 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
563 /// block of the inlined code (the last block is the end of the function),
564 /// and InlineCodeInfo is information about the code that got inlined.
565 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
566                                ClonedCodeInfo &InlinedCodeInfo) {
567   BasicBlock *UnwindDest = II->getUnwindDest();
568   Function *Caller = FirstNewBlock->getParent();
569 
570   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
571 
572   // If there are PHI nodes in the unwind destination block, we need to keep
573   // track of which values came into them from the invoke before removing the
574   // edge from this block.
575   SmallVector<Value *, 8> UnwindDestPHIValues;
576   llvm::BasicBlock *InvokeBB = II->getParent();
577   for (Instruction &I : *UnwindDest) {
578     // Save the value to use for this edge.
579     PHINode *PHI = dyn_cast<PHINode>(&I);
580     if (!PHI)
581       break;
582     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
583   }
584 
585   // Add incoming-PHI values to the unwind destination block for the given basic
586   // block, using the values for the original invoke's source block.
587   auto UpdatePHINodes = [&](BasicBlock *Src) {
588     BasicBlock::iterator I = UnwindDest->begin();
589     for (Value *V : UnwindDestPHIValues) {
590       PHINode *PHI = cast<PHINode>(I);
591       PHI->addIncoming(V, Src);
592       ++I;
593     }
594   };
595 
596   // This connects all the instructions which 'unwind to caller' to the invoke
597   // destination.
598   UnwindDestMemoTy FuncletUnwindMap;
599   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
600        BB != E; ++BB) {
601     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
602       if (CRI->unwindsToCaller()) {
603         auto *CleanupPad = CRI->getCleanupPad();
604         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
605         CRI->eraseFromParent();
606         UpdatePHINodes(&*BB);
607         // Finding a cleanupret with an unwind destination would confuse
608         // subsequent calls to getUnwindDestToken, so map the cleanuppad
609         // to short-circuit any such calls and recognize this as an "unwind
610         // to caller" cleanup.
611         assert(!FuncletUnwindMap.count(CleanupPad) ||
612                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
613         FuncletUnwindMap[CleanupPad] =
614             ConstantTokenNone::get(Caller->getContext());
615       }
616     }
617 
618     Instruction *I = BB->getFirstNonPHI();
619     if (!I->isEHPad())
620       continue;
621 
622     Instruction *Replacement = nullptr;
623     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
624       if (CatchSwitch->unwindsToCaller()) {
625         Value *UnwindDestToken;
626         if (auto *ParentPad =
627                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
628           // This catchswitch is nested inside another funclet.  If that
629           // funclet has an unwind destination within the inlinee, then
630           // unwinding out of this catchswitch would be UB.  Rewriting this
631           // catchswitch to unwind to the inlined invoke's unwind dest would
632           // give the parent funclet multiple unwind destinations, which is
633           // something that subsequent EH table generation can't handle and
634           // that the veirifer rejects.  So when we see such a call, leave it
635           // as "unwind to caller".
636           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
637           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
638             continue;
639         } else {
640           // This catchswitch has no parent to inherit constraints from, and
641           // none of its descendants can have an unwind edge that exits it and
642           // targets another funclet in the inlinee.  It may or may not have a
643           // descendant that definitively has an unwind to caller.  In either
644           // case, we'll have to assume that any unwinds out of it may need to
645           // be routed to the caller, so treat it as though it has a definitive
646           // unwind to caller.
647           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
648         }
649         auto *NewCatchSwitch = CatchSwitchInst::Create(
650             CatchSwitch->getParentPad(), UnwindDest,
651             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
652             CatchSwitch);
653         for (BasicBlock *PadBB : CatchSwitch->handlers())
654           NewCatchSwitch->addHandler(PadBB);
655         // Propagate info for the old catchswitch over to the new one in
656         // the unwind map.  This also serves to short-circuit any subsequent
657         // checks for the unwind dest of this catchswitch, which would get
658         // confused if they found the outer handler in the callee.
659         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
660         Replacement = NewCatchSwitch;
661       }
662     } else if (!isa<FuncletPadInst>(I)) {
663       llvm_unreachable("unexpected EHPad!");
664     }
665 
666     if (Replacement) {
667       Replacement->takeName(I);
668       I->replaceAllUsesWith(Replacement);
669       I->eraseFromParent();
670       UpdatePHINodes(&*BB);
671     }
672   }
673 
674   if (InlinedCodeInfo.ContainsCalls)
675     for (Function::iterator BB = FirstNewBlock->getIterator(),
676                             E = Caller->end();
677          BB != E; ++BB)
678       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
679               &*BB, UnwindDest, &FuncletUnwindMap))
680         // Update any PHI nodes in the exceptional block to indicate that there
681         // is now a new entry in them.
682         UpdatePHINodes(NewBB);
683 
684   // Now that everything is happy, we have one final detail.  The PHI nodes in
685   // the exception destination block still have entries due to the original
686   // invoke instruction. Eliminate these entries (which might even delete the
687   // PHI node) now.
688   UnwindDest->removePredecessor(InvokeBB);
689 }
690 
691 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
692 /// that metadata should be propagated to all memory-accessing cloned
693 /// instructions.
694 static void PropagateParallelLoopAccessMetadata(CallSite CS,
695                                                 ValueToValueMapTy &VMap) {
696   MDNode *M =
697     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
698   if (!M)
699     return;
700 
701   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
702        VMI != VMIE; ++VMI) {
703     if (!VMI->second)
704       continue;
705 
706     Instruction *NI = dyn_cast<Instruction>(VMI->second);
707     if (!NI)
708       continue;
709 
710     if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
711         M = MDNode::concatenate(PM, M);
712       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
713     } else if (NI->mayReadOrWriteMemory()) {
714       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
715     }
716   }
717 }
718 
719 /// When inlining a function that contains noalias scope metadata,
720 /// this metadata needs to be cloned so that the inlined blocks
721 /// have different "unqiue scopes" at every call site. Were this not done, then
722 /// aliasing scopes from a function inlined into a caller multiple times could
723 /// not be differentiated (and this would lead to miscompiles because the
724 /// non-aliasing property communicated by the metadata could have
725 /// call-site-specific control dependencies).
726 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
727   const Function *CalledFunc = CS.getCalledFunction();
728   SetVector<const MDNode *> MD;
729 
730   // Note: We could only clone the metadata if it is already used in the
731   // caller. I'm omitting that check here because it might confuse
732   // inter-procedural alias analysis passes. We can revisit this if it becomes
733   // an efficiency or overhead problem.
734 
735   for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
736        I != IE; ++I)
737     for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
738       if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
739         MD.insert(M);
740       if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
741         MD.insert(M);
742     }
743 
744   if (MD.empty())
745     return;
746 
747   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
748   // the set.
749   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
750   while (!Queue.empty()) {
751     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
752     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
753       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
754         if (MD.insert(M1))
755           Queue.push_back(M1);
756   }
757 
758   // Now we have a complete set of all metadata in the chains used to specify
759   // the noalias scopes and the lists of those scopes.
760   SmallVector<TempMDTuple, 16> DummyNodes;
761   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
762   for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
763        I != IE; ++I) {
764     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
765     MDMap[*I].reset(DummyNodes.back().get());
766   }
767 
768   // Create new metadata nodes to replace the dummy nodes, replacing old
769   // metadata references with either a dummy node or an already-created new
770   // node.
771   for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
772        I != IE; ++I) {
773     SmallVector<Metadata *, 4> NewOps;
774     for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
775       const Metadata *V = (*I)->getOperand(i);
776       if (const MDNode *M = dyn_cast<MDNode>(V))
777         NewOps.push_back(MDMap[M]);
778       else
779         NewOps.push_back(const_cast<Metadata *>(V));
780     }
781 
782     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
783     MDTuple *TempM = cast<MDTuple>(MDMap[*I]);
784     assert(TempM->isTemporary() && "Expected temporary node");
785 
786     TempM->replaceAllUsesWith(NewM);
787   }
788 
789   // Now replace the metadata in the new inlined instructions with the
790   // repacements from the map.
791   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
792        VMI != VMIE; ++VMI) {
793     if (!VMI->second)
794       continue;
795 
796     Instruction *NI = dyn_cast<Instruction>(VMI->second);
797     if (!NI)
798       continue;
799 
800     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
801       MDNode *NewMD = MDMap[M];
802       // If the call site also had alias scope metadata (a list of scopes to
803       // which instructions inside it might belong), propagate those scopes to
804       // the inlined instructions.
805       if (MDNode *CSM =
806               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
807         NewMD = MDNode::concatenate(NewMD, CSM);
808       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
809     } else if (NI->mayReadOrWriteMemory()) {
810       if (MDNode *M =
811               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
812         NI->setMetadata(LLVMContext::MD_alias_scope, M);
813     }
814 
815     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
816       MDNode *NewMD = MDMap[M];
817       // If the call site also had noalias metadata (a list of scopes with
818       // which instructions inside it don't alias), propagate those scopes to
819       // the inlined instructions.
820       if (MDNode *CSM =
821               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
822         NewMD = MDNode::concatenate(NewMD, CSM);
823       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
824     } else if (NI->mayReadOrWriteMemory()) {
825       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
826         NI->setMetadata(LLVMContext::MD_noalias, M);
827     }
828   }
829 }
830 
831 /// If the inlined function has noalias arguments,
832 /// then add new alias scopes for each noalias argument, tag the mapped noalias
833 /// parameters with noalias metadata specifying the new scope, and tag all
834 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
835 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
836                                   const DataLayout &DL, AAResults *CalleeAAR) {
837   if (!EnableNoAliasConversion)
838     return;
839 
840   const Function *CalledFunc = CS.getCalledFunction();
841   SmallVector<const Argument *, 4> NoAliasArgs;
842 
843   for (const Argument &Arg : CalledFunc->args())
844     if (Arg.hasNoAliasAttr() && !Arg.use_empty())
845       NoAliasArgs.push_back(&Arg);
846 
847   if (NoAliasArgs.empty())
848     return;
849 
850   // To do a good job, if a noalias variable is captured, we need to know if
851   // the capture point dominates the particular use we're considering.
852   DominatorTree DT;
853   DT.recalculate(const_cast<Function&>(*CalledFunc));
854 
855   // noalias indicates that pointer values based on the argument do not alias
856   // pointer values which are not based on it. So we add a new "scope" for each
857   // noalias function argument. Accesses using pointers based on that argument
858   // become part of that alias scope, accesses using pointers not based on that
859   // argument are tagged as noalias with that scope.
860 
861   DenseMap<const Argument *, MDNode *> NewScopes;
862   MDBuilder MDB(CalledFunc->getContext());
863 
864   // Create a new scope domain for this function.
865   MDNode *NewDomain =
866     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
867   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
868     const Argument *A = NoAliasArgs[i];
869 
870     std::string Name = CalledFunc->getName();
871     if (A->hasName()) {
872       Name += ": %";
873       Name += A->getName();
874     } else {
875       Name += ": argument ";
876       Name += utostr(i);
877     }
878 
879     // Note: We always create a new anonymous root here. This is true regardless
880     // of the linkage of the callee because the aliasing "scope" is not just a
881     // property of the callee, but also all control dependencies in the caller.
882     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
883     NewScopes.insert(std::make_pair(A, NewScope));
884   }
885 
886   // Iterate over all new instructions in the map; for all memory-access
887   // instructions, add the alias scope metadata.
888   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
889        VMI != VMIE; ++VMI) {
890     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
891       if (!VMI->second)
892         continue;
893 
894       Instruction *NI = dyn_cast<Instruction>(VMI->second);
895       if (!NI)
896         continue;
897 
898       bool IsArgMemOnlyCall = false, IsFuncCall = false;
899       SmallVector<const Value *, 2> PtrArgs;
900 
901       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
902         PtrArgs.push_back(LI->getPointerOperand());
903       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
904         PtrArgs.push_back(SI->getPointerOperand());
905       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
906         PtrArgs.push_back(VAAI->getPointerOperand());
907       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
908         PtrArgs.push_back(CXI->getPointerOperand());
909       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
910         PtrArgs.push_back(RMWI->getPointerOperand());
911       else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
912         // If we know that the call does not access memory, then we'll still
913         // know that about the inlined clone of this call site, and we don't
914         // need to add metadata.
915         if (ICS.doesNotAccessMemory())
916           continue;
917 
918         IsFuncCall = true;
919         if (CalleeAAR) {
920           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
921           if (MRB == FMRB_OnlyAccessesArgumentPointees ||
922               MRB == FMRB_OnlyReadsArgumentPointees)
923             IsArgMemOnlyCall = true;
924         }
925 
926         for (Value *Arg : ICS.args()) {
927           // We need to check the underlying objects of all arguments, not just
928           // the pointer arguments, because we might be passing pointers as
929           // integers, etc.
930           // However, if we know that the call only accesses pointer arguments,
931           // then we only need to check the pointer arguments.
932           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
933             continue;
934 
935           PtrArgs.push_back(Arg);
936         }
937       }
938 
939       // If we found no pointers, then this instruction is not suitable for
940       // pairing with an instruction to receive aliasing metadata.
941       // However, if this is a call, this we might just alias with none of the
942       // noalias arguments.
943       if (PtrArgs.empty() && !IsFuncCall)
944         continue;
945 
946       // It is possible that there is only one underlying object, but you
947       // need to go through several PHIs to see it, and thus could be
948       // repeated in the Objects list.
949       SmallPtrSet<const Value *, 4> ObjSet;
950       SmallVector<Metadata *, 4> Scopes, NoAliases;
951 
952       SmallSetVector<const Argument *, 4> NAPtrArgs;
953       for (const Value *V : PtrArgs) {
954         SmallVector<Value *, 4> Objects;
955         GetUnderlyingObjects(const_cast<Value*>(V),
956                              Objects, DL, /* LI = */ nullptr);
957 
958         for (Value *O : Objects)
959           ObjSet.insert(O);
960       }
961 
962       // Figure out if we're derived from anything that is not a noalias
963       // argument.
964       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
965       for (const Value *V : ObjSet) {
966         // Is this value a constant that cannot be derived from any pointer
967         // value (we need to exclude constant expressions, for example, that
968         // are formed from arithmetic on global symbols).
969         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
970                              isa<ConstantPointerNull>(V) ||
971                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
972         if (IsNonPtrConst)
973           continue;
974 
975         // If this is anything other than a noalias argument, then we cannot
976         // completely describe the aliasing properties using alias.scope
977         // metadata (and, thus, won't add any).
978         if (const Argument *A = dyn_cast<Argument>(V)) {
979           if (!A->hasNoAliasAttr())
980             UsesAliasingPtr = true;
981         } else {
982           UsesAliasingPtr = true;
983         }
984 
985         // If this is not some identified function-local object (which cannot
986         // directly alias a noalias argument), or some other argument (which,
987         // by definition, also cannot alias a noalias argument), then we could
988         // alias a noalias argument that has been captured).
989         if (!isa<Argument>(V) &&
990             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
991           CanDeriveViaCapture = true;
992       }
993 
994       // A function call can always get captured noalias pointers (via other
995       // parameters, globals, etc.).
996       if (IsFuncCall && !IsArgMemOnlyCall)
997         CanDeriveViaCapture = true;
998 
999       // First, we want to figure out all of the sets with which we definitely
1000       // don't alias. Iterate over all noalias set, and add those for which:
1001       //   1. The noalias argument is not in the set of objects from which we
1002       //      definitely derive.
1003       //   2. The noalias argument has not yet been captured.
1004       // An arbitrary function that might load pointers could see captured
1005       // noalias arguments via other noalias arguments or globals, and so we
1006       // must always check for prior capture.
1007       for (const Argument *A : NoAliasArgs) {
1008         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1009                                  // It might be tempting to skip the
1010                                  // PointerMayBeCapturedBefore check if
1011                                  // A->hasNoCaptureAttr() is true, but this is
1012                                  // incorrect because nocapture only guarantees
1013                                  // that no copies outlive the function, not
1014                                  // that the value cannot be locally captured.
1015                                  !PointerMayBeCapturedBefore(A,
1016                                    /* ReturnCaptures */ false,
1017                                    /* StoreCaptures */ false, I, &DT)))
1018           NoAliases.push_back(NewScopes[A]);
1019       }
1020 
1021       if (!NoAliases.empty())
1022         NI->setMetadata(LLVMContext::MD_noalias,
1023                         MDNode::concatenate(
1024                             NI->getMetadata(LLVMContext::MD_noalias),
1025                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1026 
1027       // Next, we want to figure out all of the sets to which we might belong.
1028       // We might belong to a set if the noalias argument is in the set of
1029       // underlying objects. If there is some non-noalias argument in our list
1030       // of underlying objects, then we cannot add a scope because the fact
1031       // that some access does not alias with any set of our noalias arguments
1032       // cannot itself guarantee that it does not alias with this access
1033       // (because there is some pointer of unknown origin involved and the
1034       // other access might also depend on this pointer). We also cannot add
1035       // scopes to arbitrary functions unless we know they don't access any
1036       // non-parameter pointer-values.
1037       bool CanAddScopes = !UsesAliasingPtr;
1038       if (CanAddScopes && IsFuncCall)
1039         CanAddScopes = IsArgMemOnlyCall;
1040 
1041       if (CanAddScopes)
1042         for (const Argument *A : NoAliasArgs) {
1043           if (ObjSet.count(A))
1044             Scopes.push_back(NewScopes[A]);
1045         }
1046 
1047       if (!Scopes.empty())
1048         NI->setMetadata(
1049             LLVMContext::MD_alias_scope,
1050             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1051                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1052     }
1053   }
1054 }
1055 
1056 /// If the inlined function has non-byval align arguments, then
1057 /// add @llvm.assume-based alignment assumptions to preserve this information.
1058 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1059   if (!PreserveAlignmentAssumptions)
1060     return;
1061   auto &DL = CS.getCaller()->getParent()->getDataLayout();
1062 
1063   // To avoid inserting redundant assumptions, we should check for assumptions
1064   // already in the caller. To do this, we might need a DT of the caller.
1065   DominatorTree DT;
1066   bool DTCalculated = false;
1067 
1068   Function *CalledFunc = CS.getCalledFunction();
1069   for (Function::arg_iterator I = CalledFunc->arg_begin(),
1070                               E = CalledFunc->arg_end();
1071        I != E; ++I) {
1072     unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
1073     if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
1074       if (!DTCalculated) {
1075         DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
1076                                                ->getParent()));
1077         DTCalculated = true;
1078       }
1079 
1080       // If we can already prove the asserted alignment in the context of the
1081       // caller, then don't bother inserting the assumption.
1082       Value *Arg = CS.getArgument(I->getArgNo());
1083       if (getKnownAlignment(Arg, DL, CS.getInstruction(),
1084                             &IFI.ACT->getAssumptionCache(*CS.getCaller()),
1085                             &DT) >= Align)
1086         continue;
1087 
1088       IRBuilder<>(CS.getInstruction())
1089           .CreateAlignmentAssumption(DL, Arg, Align);
1090     }
1091   }
1092 }
1093 
1094 /// Once we have cloned code over from a callee into the caller,
1095 /// update the specified callgraph to reflect the changes we made.
1096 /// Note that it's possible that not all code was copied over, so only
1097 /// some edges of the callgraph may remain.
1098 static void UpdateCallGraphAfterInlining(CallSite CS,
1099                                          Function::iterator FirstNewBlock,
1100                                          ValueToValueMapTy &VMap,
1101                                          InlineFunctionInfo &IFI) {
1102   CallGraph &CG = *IFI.CG;
1103   const Function *Caller = CS.getInstruction()->getParent()->getParent();
1104   const Function *Callee = CS.getCalledFunction();
1105   CallGraphNode *CalleeNode = CG[Callee];
1106   CallGraphNode *CallerNode = CG[Caller];
1107 
1108   // Since we inlined some uninlined call sites in the callee into the caller,
1109   // add edges from the caller to all of the callees of the callee.
1110   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1111 
1112   // Consider the case where CalleeNode == CallerNode.
1113   CallGraphNode::CalledFunctionsVector CallCache;
1114   if (CalleeNode == CallerNode) {
1115     CallCache.assign(I, E);
1116     I = CallCache.begin();
1117     E = CallCache.end();
1118   }
1119 
1120   for (; I != E; ++I) {
1121     const Value *OrigCall = I->first;
1122 
1123     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1124     // Only copy the edge if the call was inlined!
1125     if (VMI == VMap.end() || VMI->second == nullptr)
1126       continue;
1127 
1128     // If the call was inlined, but then constant folded, there is no edge to
1129     // add.  Check for this case.
1130     Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1131     if (!NewCall)
1132       continue;
1133 
1134     // We do not treat intrinsic calls like real function calls because we
1135     // expect them to become inline code; do not add an edge for an intrinsic.
1136     CallSite CS = CallSite(NewCall);
1137     if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1138       continue;
1139 
1140     // Remember that this call site got inlined for the client of
1141     // InlineFunction.
1142     IFI.InlinedCalls.push_back(NewCall);
1143 
1144     // It's possible that inlining the callsite will cause it to go from an
1145     // indirect to a direct call by resolving a function pointer.  If this
1146     // happens, set the callee of the new call site to a more precise
1147     // destination.  This can also happen if the call graph node of the caller
1148     // was just unnecessarily imprecise.
1149     if (!I->second->getFunction())
1150       if (Function *F = CallSite(NewCall).getCalledFunction()) {
1151         // Indirect call site resolved to direct call.
1152         CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1153 
1154         continue;
1155       }
1156 
1157     CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1158   }
1159 
1160   // Update the call graph by deleting the edge from Callee to Caller.  We must
1161   // do this after the loop above in case Caller and Callee are the same.
1162   CallerNode->removeCallEdgeFor(CS);
1163 }
1164 
1165 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1166                                     BasicBlock *InsertBlock,
1167                                     InlineFunctionInfo &IFI) {
1168   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1169   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1170 
1171   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1172 
1173   // Always generate a memcpy of alignment 1 here because we don't know
1174   // the alignment of the src pointer.  Other optimizations can infer
1175   // better alignment.
1176   Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1177 }
1178 
1179 /// When inlining a call site that has a byval argument,
1180 /// we have to make the implicit memcpy explicit by adding it.
1181 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1182                                   const Function *CalledFunc,
1183                                   InlineFunctionInfo &IFI,
1184                                   unsigned ByValAlignment) {
1185   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1186   Type *AggTy = ArgTy->getElementType();
1187 
1188   Function *Caller = TheCall->getParent()->getParent();
1189 
1190   // If the called function is readonly, then it could not mutate the caller's
1191   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1192   // temporary.
1193   if (CalledFunc->onlyReadsMemory()) {
1194     // If the byval argument has a specified alignment that is greater than the
1195     // passed in pointer, then we either have to round up the input pointer or
1196     // give up on this transformation.
1197     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1198       return Arg;
1199 
1200     const DataLayout &DL = Caller->getParent()->getDataLayout();
1201 
1202     // If the pointer is already known to be sufficiently aligned, or if we can
1203     // round it up to a larger alignment, then we don't need a temporary.
1204     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
1205                                    &IFI.ACT->getAssumptionCache(*Caller)) >=
1206         ByValAlignment)
1207       return Arg;
1208 
1209     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1210     // for code quality, but rarely happens and is required for correctness.
1211   }
1212 
1213   // Create the alloca.  If we have DataLayout, use nice alignment.
1214   unsigned Align =
1215       Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
1216 
1217   // If the byval had an alignment specified, we *must* use at least that
1218   // alignment, as it is required by the byval argument (and uses of the
1219   // pointer inside the callee).
1220   Align = std::max(Align, ByValAlignment);
1221 
1222   Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
1223                                     &*Caller->begin()->begin());
1224   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1225 
1226   // Uses of the argument in the function should use our new alloca
1227   // instead.
1228   return NewAlloca;
1229 }
1230 
1231 // Check whether this Value is used by a lifetime intrinsic.
1232 static bool isUsedByLifetimeMarker(Value *V) {
1233   for (User *U : V->users()) {
1234     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1235       switch (II->getIntrinsicID()) {
1236       default: break;
1237       case Intrinsic::lifetime_start:
1238       case Intrinsic::lifetime_end:
1239         return true;
1240       }
1241     }
1242   }
1243   return false;
1244 }
1245 
1246 // Check whether the given alloca already has
1247 // lifetime.start or lifetime.end intrinsics.
1248 static bool hasLifetimeMarkers(AllocaInst *AI) {
1249   Type *Ty = AI->getType();
1250   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1251                                        Ty->getPointerAddressSpace());
1252   if (Ty == Int8PtrTy)
1253     return isUsedByLifetimeMarker(AI);
1254 
1255   // Do a scan to find all the casts to i8*.
1256   for (User *U : AI->users()) {
1257     if (U->getType() != Int8PtrTy) continue;
1258     if (U->stripPointerCasts() != AI) continue;
1259     if (isUsedByLifetimeMarker(U))
1260       return true;
1261   }
1262   return false;
1263 }
1264 
1265 /// Rebuild the entire inlined-at chain for this instruction so that the top of
1266 /// the chain now is inlined-at the new call site.
1267 static DebugLoc
1268 updateInlinedAtInfo(DebugLoc DL, DILocation *InlinedAtNode, LLVMContext &Ctx,
1269                     DenseMap<const DILocation *, DILocation *> &IANodes) {
1270   SmallVector<DILocation *, 3> InlinedAtLocations;
1271   DILocation *Last = InlinedAtNode;
1272   DILocation *CurInlinedAt = DL;
1273 
1274   // Gather all the inlined-at nodes
1275   while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
1276     // Skip any we've already built nodes for
1277     if (DILocation *Found = IANodes[IA]) {
1278       Last = Found;
1279       break;
1280     }
1281 
1282     InlinedAtLocations.push_back(IA);
1283     CurInlinedAt = IA;
1284   }
1285 
1286   // Starting from the top, rebuild the nodes to point to the new inlined-at
1287   // location (then rebuilding the rest of the chain behind it) and update the
1288   // map of already-constructed inlined-at nodes.
1289   for (const DILocation *MD : make_range(InlinedAtLocations.rbegin(),
1290                                          InlinedAtLocations.rend())) {
1291     Last = IANodes[MD] = DILocation::getDistinct(
1292         Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
1293   }
1294 
1295   // And finally create the normal location for this instruction, referring to
1296   // the new inlined-at chain.
1297   return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
1298 }
1299 
1300 /// Update inlined instructions' line numbers to
1301 /// to encode location where these instructions are inlined.
1302 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1303                              Instruction *TheCall) {
1304   DebugLoc TheCallDL = TheCall->getDebugLoc();
1305   if (!TheCallDL)
1306     return;
1307 
1308   auto &Ctx = Fn->getContext();
1309   DILocation *InlinedAtNode = TheCallDL;
1310 
1311   // Create a unique call site, not to be confused with any other call from the
1312   // same location.
1313   InlinedAtNode = DILocation::getDistinct(
1314       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1315       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1316 
1317   // Cache the inlined-at nodes as they're built so they are reused, without
1318   // this every instruction's inlined-at chain would become distinct from each
1319   // other.
1320   DenseMap<const DILocation *, DILocation *> IANodes;
1321 
1322   for (; FI != Fn->end(); ++FI) {
1323     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1324          BI != BE; ++BI) {
1325       DebugLoc DL = BI->getDebugLoc();
1326       if (!DL) {
1327         // If the inlined instruction has no line number, make it look as if it
1328         // originates from the call location. This is important for
1329         // ((__always_inline__, __nodebug__)) functions which must use caller
1330         // location for all instructions in their function body.
1331 
1332         // Don't update static allocas, as they may get moved later.
1333         if (auto *AI = dyn_cast<AllocaInst>(BI))
1334           if (isa<Constant>(AI->getArraySize()))
1335             continue;
1336 
1337         BI->setDebugLoc(TheCallDL);
1338       } else {
1339         BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
1340       }
1341     }
1342   }
1343 }
1344 
1345 /// This function inlines the called function into the basic block of the
1346 /// caller. This returns false if it is not possible to inline this call.
1347 /// The program is still in a well defined state if this occurs though.
1348 ///
1349 /// Note that this only does one level of inlining.  For example, if the
1350 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1351 /// exists in the instruction stream.  Similarly this will inline a recursive
1352 /// function by one level.
1353 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1354                           AAResults *CalleeAAR, bool InsertLifetime) {
1355   Instruction *TheCall = CS.getInstruction();
1356   assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
1357          "Instruction not in function!");
1358 
1359   // If IFI has any state in it, zap it before we fill it in.
1360   IFI.reset();
1361 
1362   const Function *CalledFunc = CS.getCalledFunction();
1363   if (!CalledFunc ||              // Can't inline external function or indirect
1364       CalledFunc->isDeclaration() || // call, or call to a vararg function!
1365       CalledFunc->getFunctionType()->isVarArg()) return false;
1366 
1367   // The inliner does not know how to inline through calls with operand bundles
1368   // in general ...
1369   if (CS.hasOperandBundles()) {
1370     for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1371       uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1372       // ... but it knows how to inline through "deopt" operand bundles ...
1373       if (Tag == LLVMContext::OB_deopt)
1374         continue;
1375       // ... and "funclet" operand bundles.
1376       if (Tag == LLVMContext::OB_funclet)
1377         continue;
1378 
1379       return false;
1380     }
1381   }
1382 
1383   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1384   // calls that we inline.
1385   bool MarkNoUnwind = CS.doesNotThrow();
1386 
1387   BasicBlock *OrigBB = TheCall->getParent();
1388   Function *Caller = OrigBB->getParent();
1389 
1390   // GC poses two hazards to inlining, which only occur when the callee has GC:
1391   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1392   //     caller.
1393   //  2. If the caller has a differing GC, it is invalid to inline.
1394   if (CalledFunc->hasGC()) {
1395     if (!Caller->hasGC())
1396       Caller->setGC(CalledFunc->getGC());
1397     else if (CalledFunc->getGC() != Caller->getGC())
1398       return false;
1399   }
1400 
1401   // Get the personality function from the callee if it contains a landing pad.
1402   Constant *CalledPersonality =
1403       CalledFunc->hasPersonalityFn()
1404           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1405           : nullptr;
1406 
1407   // Find the personality function used by the landing pads of the caller. If it
1408   // exists, then check to see that it matches the personality function used in
1409   // the callee.
1410   Constant *CallerPersonality =
1411       Caller->hasPersonalityFn()
1412           ? Caller->getPersonalityFn()->stripPointerCasts()
1413           : nullptr;
1414   if (CalledPersonality) {
1415     if (!CallerPersonality)
1416       Caller->setPersonalityFn(CalledPersonality);
1417     // If the personality functions match, then we can perform the
1418     // inlining. Otherwise, we can't inline.
1419     // TODO: This isn't 100% true. Some personality functions are proper
1420     //       supersets of others and can be used in place of the other.
1421     else if (CalledPersonality != CallerPersonality)
1422       return false;
1423   }
1424 
1425   // We need to figure out which funclet the callsite was in so that we may
1426   // properly nest the callee.
1427   Instruction *CallSiteEHPad = nullptr;
1428   if (CallerPersonality) {
1429     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1430     if (isFuncletEHPersonality(Personality)) {
1431       Optional<OperandBundleUse> ParentFunclet =
1432           CS.getOperandBundle(LLVMContext::OB_funclet);
1433       if (ParentFunclet)
1434         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1435 
1436       // OK, the inlining site is legal.  What about the target function?
1437 
1438       if (CallSiteEHPad) {
1439         if (Personality == EHPersonality::MSVC_CXX) {
1440           // The MSVC personality cannot tolerate catches getting inlined into
1441           // cleanup funclets.
1442           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1443             // Ok, the call site is within a cleanuppad.  Let's check the callee
1444             // for catchpads.
1445             for (const BasicBlock &CalledBB : *CalledFunc) {
1446               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1447                 return false;
1448             }
1449           }
1450         } else if (isAsynchronousEHPersonality(Personality)) {
1451           // SEH is even less tolerant, there may not be any sort of exceptional
1452           // funclet in the callee.
1453           for (const BasicBlock &CalledBB : *CalledFunc) {
1454             if (CalledBB.isEHPad())
1455               return false;
1456           }
1457         }
1458       }
1459     }
1460   }
1461 
1462   // Determine if we are dealing with a call in an EHPad which does not unwind
1463   // to caller.
1464   bool EHPadForCallUnwindsLocally = false;
1465   if (CallSiteEHPad && CS.isCall()) {
1466     UnwindDestMemoTy FuncletUnwindMap;
1467     Value *CallSiteUnwindDestToken =
1468         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1469 
1470     EHPadForCallUnwindsLocally =
1471         CallSiteUnwindDestToken &&
1472         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1473   }
1474 
1475   // Get an iterator to the last basic block in the function, which will have
1476   // the new function inlined after it.
1477   Function::iterator LastBlock = --Caller->end();
1478 
1479   // Make sure to capture all of the return instructions from the cloned
1480   // function.
1481   SmallVector<ReturnInst*, 8> Returns;
1482   ClonedCodeInfo InlinedFunctionInfo;
1483   Function::iterator FirstNewBlock;
1484 
1485   { // Scope to destroy VMap after cloning.
1486     ValueToValueMapTy VMap;
1487     // Keep a list of pair (dst, src) to emit byval initializations.
1488     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1489 
1490     auto &DL = Caller->getParent()->getDataLayout();
1491 
1492     assert(CalledFunc->arg_size() == CS.arg_size() &&
1493            "No varargs calls can be inlined!");
1494 
1495     // Calculate the vector of arguments to pass into the function cloner, which
1496     // matches up the formal to the actual argument values.
1497     CallSite::arg_iterator AI = CS.arg_begin();
1498     unsigned ArgNo = 0;
1499     for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
1500          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1501       Value *ActualArg = *AI;
1502 
1503       // When byval arguments actually inlined, we need to make the copy implied
1504       // by them explicit.  However, we don't do this if the callee is readonly
1505       // or readnone, because the copy would be unneeded: the callee doesn't
1506       // modify the struct.
1507       if (CS.isByValArgument(ArgNo)) {
1508         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1509                                         CalledFunc->getParamAlignment(ArgNo+1));
1510         if (ActualArg != *AI)
1511           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1512       }
1513 
1514       VMap[&*I] = ActualArg;
1515     }
1516 
1517     // Add alignment assumptions if necessary. We do this before the inlined
1518     // instructions are actually cloned into the caller so that we can easily
1519     // check what will be known at the start of the inlined code.
1520     AddAlignmentAssumptions(CS, IFI);
1521 
1522     // We want the inliner to prune the code as it copies.  We would LOVE to
1523     // have no dead or constant instructions leftover after inlining occurs
1524     // (which can happen, e.g., because an argument was constant), but we'll be
1525     // happy with whatever the cloner can do.
1526     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1527                               /*ModuleLevelChanges=*/false, Returns, ".i",
1528                               &InlinedFunctionInfo, TheCall);
1529 
1530     // Remember the first block that is newly cloned over.
1531     FirstNewBlock = LastBlock; ++FirstNewBlock;
1532 
1533     // Inject byval arguments initialization.
1534     for (std::pair<Value*, Value*> &Init : ByValInit)
1535       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1536                               &*FirstNewBlock, IFI);
1537 
1538     Optional<OperandBundleUse> ParentDeopt =
1539         CS.getOperandBundle(LLVMContext::OB_deopt);
1540     if (ParentDeopt) {
1541       SmallVector<OperandBundleDef, 2> OpDefs;
1542 
1543       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1544         Instruction *I = dyn_cast_or_null<Instruction>(VH);
1545         if (!I) continue;  // instruction was DCE'd or RAUW'ed to undef
1546 
1547         OpDefs.clear();
1548 
1549         CallSite ICS(I);
1550         OpDefs.reserve(ICS.getNumOperandBundles());
1551 
1552         for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1553           auto ChildOB = ICS.getOperandBundleAt(i);
1554           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1555             // If the inlined call has other operand bundles, let them be
1556             OpDefs.emplace_back(ChildOB);
1557             continue;
1558           }
1559 
1560           // It may be useful to separate this logic (of handling operand
1561           // bundles) out to a separate "policy" component if this gets crowded.
1562           // Prepend the parent's deoptimization continuation to the newly
1563           // inlined call's deoptimization continuation.
1564           std::vector<Value *> MergedDeoptArgs;
1565           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1566                                   ChildOB.Inputs.size());
1567 
1568           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1569                                  ParentDeopt->Inputs.begin(),
1570                                  ParentDeopt->Inputs.end());
1571           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1572                                  ChildOB.Inputs.end());
1573 
1574           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1575         }
1576 
1577         Instruction *NewI = nullptr;
1578         if (isa<CallInst>(I))
1579           NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1580         else
1581           NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1582 
1583         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1584         // this even if the call returns void.
1585         I->replaceAllUsesWith(NewI);
1586 
1587         VH = nullptr;
1588         I->eraseFromParent();
1589       }
1590     }
1591 
1592     // Update the callgraph if requested.
1593     if (IFI.CG)
1594       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1595 
1596     // Update inlined instructions' line number information.
1597     fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1598 
1599     // Clone existing noalias metadata if necessary.
1600     CloneAliasScopeMetadata(CS, VMap);
1601 
1602     // Add noalias metadata if necessary.
1603     AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1604 
1605     // Propagate llvm.mem.parallel_loop_access if necessary.
1606     PropagateParallelLoopAccessMetadata(CS, VMap);
1607 
1608     // FIXME: We could register any cloned assumptions instead of clearing the
1609     // whole function's cache.
1610     if (IFI.ACT)
1611       IFI.ACT->getAssumptionCache(*Caller).clear();
1612   }
1613 
1614   // If there are any alloca instructions in the block that used to be the entry
1615   // block for the callee, move them to the entry block of the caller.  First
1616   // calculate which instruction they should be inserted before.  We insert the
1617   // instructions at the end of the current alloca list.
1618   {
1619     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1620     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1621          E = FirstNewBlock->end(); I != E; ) {
1622       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1623       if (!AI) continue;
1624 
1625       // If the alloca is now dead, remove it.  This often occurs due to code
1626       // specialization.
1627       if (AI->use_empty()) {
1628         AI->eraseFromParent();
1629         continue;
1630       }
1631 
1632       if (!isa<Constant>(AI->getArraySize()))
1633         continue;
1634 
1635       // Keep track of the static allocas that we inline into the caller.
1636       IFI.StaticAllocas.push_back(AI);
1637 
1638       // Scan for the block of allocas that we can move over, and move them
1639       // all at once.
1640       while (isa<AllocaInst>(I) &&
1641              isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1642         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1643         ++I;
1644       }
1645 
1646       // Transfer all of the allocas over in a block.  Using splice means
1647       // that the instructions aren't removed from the symbol table, then
1648       // reinserted.
1649       Caller->getEntryBlock().getInstList().splice(
1650           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1651     }
1652     // Move any dbg.declares describing the allocas into the entry basic block.
1653     DIBuilder DIB(*Caller->getParent());
1654     for (auto &AI : IFI.StaticAllocas)
1655       replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1656   }
1657 
1658   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1659   if (InlinedFunctionInfo.ContainsCalls) {
1660     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1661     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1662       CallSiteTailKind = CI->getTailCallKind();
1663 
1664     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1665          ++BB) {
1666       for (Instruction &I : *BB) {
1667         CallInst *CI = dyn_cast<CallInst>(&I);
1668         if (!CI)
1669           continue;
1670 
1671         if (Function *F = CI->getCalledFunction())
1672           InlinedDeoptimizeCalls |=
1673               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1674 
1675         // We need to reduce the strength of any inlined tail calls.  For
1676         // musttail, we have to avoid introducing potential unbounded stack
1677         // growth.  For example, if functions 'f' and 'g' are mutually recursive
1678         // with musttail, we can inline 'g' into 'f' so long as we preserve
1679         // musttail on the cloned call to 'f'.  If either the inlined call site
1680         // or the cloned call site is *not* musttail, the program already has
1681         // one frame of stack growth, so it's safe to remove musttail.  Here is
1682         // a table of example transformations:
1683         //
1684         //    f -> musttail g -> musttail f  ==>  f -> musttail f
1685         //    f -> musttail g ->     tail f  ==>  f ->     tail f
1686         //    f ->          g -> musttail f  ==>  f ->          f
1687         //    f ->          g ->     tail f  ==>  f ->          f
1688         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1689         ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1690         CI->setTailCallKind(ChildTCK);
1691         InlinedMustTailCalls |= CI->isMustTailCall();
1692 
1693         // Calls inlined through a 'nounwind' call site should be marked
1694         // 'nounwind'.
1695         if (MarkNoUnwind)
1696           CI->setDoesNotThrow();
1697       }
1698     }
1699   }
1700 
1701   // Leave lifetime markers for the static alloca's, scoping them to the
1702   // function we just inlined.
1703   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1704     IRBuilder<> builder(&FirstNewBlock->front());
1705     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1706       AllocaInst *AI = IFI.StaticAllocas[ai];
1707 
1708       // If the alloca is already scoped to something smaller than the whole
1709       // function then there's no need to add redundant, less accurate markers.
1710       if (hasLifetimeMarkers(AI))
1711         continue;
1712 
1713       // Try to determine the size of the allocation.
1714       ConstantInt *AllocaSize = nullptr;
1715       if (ConstantInt *AIArraySize =
1716           dyn_cast<ConstantInt>(AI->getArraySize())) {
1717         auto &DL = Caller->getParent()->getDataLayout();
1718         Type *AllocaType = AI->getAllocatedType();
1719         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1720         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1721 
1722         // Don't add markers for zero-sized allocas.
1723         if (AllocaArraySize == 0)
1724           continue;
1725 
1726         // Check that array size doesn't saturate uint64_t and doesn't
1727         // overflow when it's multiplied by type size.
1728         if (AllocaArraySize != ~0ULL &&
1729             UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1730           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1731                                         AllocaArraySize * AllocaTypeSize);
1732         }
1733       }
1734 
1735       builder.CreateLifetimeStart(AI, AllocaSize);
1736       for (ReturnInst *RI : Returns) {
1737         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1738         // call and a return.  The return kills all local allocas.
1739         if (InlinedMustTailCalls &&
1740             RI->getParent()->getTerminatingMustTailCall())
1741           continue;
1742         if (InlinedDeoptimizeCalls &&
1743             RI->getParent()->getTerminatingDeoptimizeCall())
1744           continue;
1745         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1746       }
1747     }
1748   }
1749 
1750   // If the inlined code contained dynamic alloca instructions, wrap the inlined
1751   // code with llvm.stacksave/llvm.stackrestore intrinsics.
1752   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1753     Module *M = Caller->getParent();
1754     // Get the two intrinsics we care about.
1755     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1756     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1757 
1758     // Insert the llvm.stacksave.
1759     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1760                              .CreateCall(StackSave, {}, "savedstack");
1761 
1762     // Insert a call to llvm.stackrestore before any return instructions in the
1763     // inlined function.
1764     for (ReturnInst *RI : Returns) {
1765       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1766       // call and a return.  The return will restore the stack pointer.
1767       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1768         continue;
1769       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1770         continue;
1771       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1772     }
1773   }
1774 
1775   // If we are inlining for an invoke instruction, we must make sure to rewrite
1776   // any call instructions into invoke instructions.  This is sensitive to which
1777   // funclet pads were top-level in the inlinee, so must be done before
1778   // rewriting the "parent pad" links.
1779   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1780     BasicBlock *UnwindDest = II->getUnwindDest();
1781     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1782     if (isa<LandingPadInst>(FirstNonPHI)) {
1783       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1784     } else {
1785       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1786     }
1787   }
1788 
1789   // Update the lexical scopes of the new funclets and callsites.
1790   // Anything that had 'none' as its parent is now nested inside the callsite's
1791   // EHPad.
1792 
1793   if (CallSiteEHPad) {
1794     for (Function::iterator BB = FirstNewBlock->getIterator(),
1795                             E = Caller->end();
1796          BB != E; ++BB) {
1797       // Add bundle operands to any top-level call sites.
1798       SmallVector<OperandBundleDef, 1> OpBundles;
1799       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1800         Instruction *I = &*BBI++;
1801         CallSite CS(I);
1802         if (!CS)
1803           continue;
1804 
1805         // Skip call sites which are nounwind intrinsics.
1806         auto *CalledFn =
1807             dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1808         if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1809           continue;
1810 
1811         // Skip call sites which already have a "funclet" bundle.
1812         if (CS.getOperandBundle(LLVMContext::OB_funclet))
1813           continue;
1814 
1815         CS.getOperandBundlesAsDefs(OpBundles);
1816         OpBundles.emplace_back("funclet", CallSiteEHPad);
1817 
1818         Instruction *NewInst;
1819         if (CS.isCall())
1820           NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
1821         else
1822           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
1823         NewInst->takeName(I);
1824         I->replaceAllUsesWith(NewInst);
1825         I->eraseFromParent();
1826 
1827         OpBundles.clear();
1828       }
1829 
1830       // It is problematic if the inlinee has a cleanupret which unwinds to
1831       // caller and we inline it into a call site which doesn't unwind but into
1832       // an EH pad that does.  Such an edge must be dynamically unreachable.
1833       // As such, we replace the cleanupret with unreachable.
1834       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
1835         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
1836           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
1837 
1838       Instruction *I = BB->getFirstNonPHI();
1839       if (!I->isEHPad())
1840         continue;
1841 
1842       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
1843         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
1844           CatchSwitch->setParentPad(CallSiteEHPad);
1845       } else {
1846         auto *FPI = cast<FuncletPadInst>(I);
1847         if (isa<ConstantTokenNone>(FPI->getParentPad()))
1848           FPI->setParentPad(CallSiteEHPad);
1849       }
1850     }
1851   }
1852 
1853   if (InlinedDeoptimizeCalls) {
1854     // We need to at least remove the deoptimizing returns from the Return set,
1855     // so that the control flow from those returns does not get merged into the
1856     // caller (but terminate it instead).  If the caller's return type does not
1857     // match the callee's return type, we also need to change the return type of
1858     // the intrinsic.
1859     if (Caller->getReturnType() == TheCall->getType()) {
1860       auto NewEnd = remove_if(Returns, [](ReturnInst *RI) {
1861         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
1862       });
1863       Returns.erase(NewEnd, Returns.end());
1864     } else {
1865       SmallVector<ReturnInst *, 8> NormalReturns;
1866       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
1867           Caller->getParent(), Intrinsic::experimental_deoptimize,
1868           {Caller->getReturnType()});
1869 
1870       for (ReturnInst *RI : Returns) {
1871         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
1872         if (!DeoptCall) {
1873           NormalReturns.push_back(RI);
1874           continue;
1875         }
1876 
1877         // The calling convention on the deoptimize call itself may be bogus,
1878         // since the code we're inlining may have undefined behavior (and may
1879         // never actually execute at runtime); but all
1880         // @llvm.experimental.deoptimize declarations have to have the same
1881         // calling convention in a well-formed module.
1882         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
1883         NewDeoptIntrinsic->setCallingConv(CallingConv);
1884         auto *CurBB = RI->getParent();
1885         RI->eraseFromParent();
1886 
1887         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
1888                                          DeoptCall->arg_end());
1889 
1890         SmallVector<OperandBundleDef, 1> OpBundles;
1891         DeoptCall->getOperandBundlesAsDefs(OpBundles);
1892         DeoptCall->eraseFromParent();
1893         assert(!OpBundles.empty() &&
1894                "Expected at least the deopt operand bundle");
1895 
1896         IRBuilder<> Builder(CurBB);
1897         CallInst *NewDeoptCall =
1898             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
1899         NewDeoptCall->setCallingConv(CallingConv);
1900         if (NewDeoptCall->getType()->isVoidTy())
1901           Builder.CreateRetVoid();
1902         else
1903           Builder.CreateRet(NewDeoptCall);
1904       }
1905 
1906       // Leave behind the normal returns so we can merge control flow.
1907       std::swap(Returns, NormalReturns);
1908     }
1909   }
1910 
1911   // Handle any inlined musttail call sites.  In order for a new call site to be
1912   // musttail, the source of the clone and the inlined call site must have been
1913   // musttail.  Therefore it's safe to return without merging control into the
1914   // phi below.
1915   if (InlinedMustTailCalls) {
1916     // Check if we need to bitcast the result of any musttail calls.
1917     Type *NewRetTy = Caller->getReturnType();
1918     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1919 
1920     // Handle the returns preceded by musttail calls separately.
1921     SmallVector<ReturnInst *, 8> NormalReturns;
1922     for (ReturnInst *RI : Returns) {
1923       CallInst *ReturnedMustTail =
1924           RI->getParent()->getTerminatingMustTailCall();
1925       if (!ReturnedMustTail) {
1926         NormalReturns.push_back(RI);
1927         continue;
1928       }
1929       if (!NeedBitCast)
1930         continue;
1931 
1932       // Delete the old return and any preceding bitcast.
1933       BasicBlock *CurBB = RI->getParent();
1934       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1935       RI->eraseFromParent();
1936       if (OldCast)
1937         OldCast->eraseFromParent();
1938 
1939       // Insert a new bitcast and return with the right type.
1940       IRBuilder<> Builder(CurBB);
1941       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1942     }
1943 
1944     // Leave behind the normal returns so we can merge control flow.
1945     std::swap(Returns, NormalReturns);
1946   }
1947 
1948   // If we cloned in _exactly one_ basic block, and if that block ends in a
1949   // return instruction, we splice the body of the inlined callee directly into
1950   // the calling basic block.
1951   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1952     // Move all of the instructions right before the call.
1953     OrigBB->getInstList().splice(TheCall->getIterator(),
1954                                  FirstNewBlock->getInstList(),
1955                                  FirstNewBlock->begin(), FirstNewBlock->end());
1956     // Remove the cloned basic block.
1957     Caller->getBasicBlockList().pop_back();
1958 
1959     // If the call site was an invoke instruction, add a branch to the normal
1960     // destination.
1961     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1962       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1963       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1964     }
1965 
1966     // If the return instruction returned a value, replace uses of the call with
1967     // uses of the returned value.
1968     if (!TheCall->use_empty()) {
1969       ReturnInst *R = Returns[0];
1970       if (TheCall == R->getReturnValue())
1971         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1972       else
1973         TheCall->replaceAllUsesWith(R->getReturnValue());
1974     }
1975     // Since we are now done with the Call/Invoke, we can delete it.
1976     TheCall->eraseFromParent();
1977 
1978     // Since we are now done with the return instruction, delete it also.
1979     Returns[0]->eraseFromParent();
1980 
1981     // We are now done with the inlining.
1982     return true;
1983   }
1984 
1985   // Otherwise, we have the normal case, of more than one block to inline or
1986   // multiple return sites.
1987 
1988   // We want to clone the entire callee function into the hole between the
1989   // "starter" and "ender" blocks.  How we accomplish this depends on whether
1990   // this is an invoke instruction or a call instruction.
1991   BasicBlock *AfterCallBB;
1992   BranchInst *CreatedBranchToNormalDest = nullptr;
1993   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1994 
1995     // Add an unconditional branch to make this look like the CallInst case...
1996     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1997 
1998     // Split the basic block.  This guarantees that no PHI nodes will have to be
1999     // updated due to new incoming edges, and make the invoke case more
2000     // symmetric to the call case.
2001     AfterCallBB =
2002         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2003                                 CalledFunc->getName() + ".exit");
2004 
2005   } else {  // It's a call
2006     // If this is a call instruction, we need to split the basic block that
2007     // the call lives in.
2008     //
2009     AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2010                                           CalledFunc->getName() + ".exit");
2011   }
2012 
2013   // Change the branch that used to go to AfterCallBB to branch to the first
2014   // basic block of the inlined function.
2015   //
2016   TerminatorInst *Br = OrigBB->getTerminator();
2017   assert(Br && Br->getOpcode() == Instruction::Br &&
2018          "splitBasicBlock broken!");
2019   Br->setOperand(0, &*FirstNewBlock);
2020 
2021   // Now that the function is correct, make it a little bit nicer.  In
2022   // particular, move the basic blocks inserted from the end of the function
2023   // into the space made by splitting the source basic block.
2024   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2025                                      Caller->getBasicBlockList(), FirstNewBlock,
2026                                      Caller->end());
2027 
2028   // Handle all of the return instructions that we just cloned in, and eliminate
2029   // any users of the original call/invoke instruction.
2030   Type *RTy = CalledFunc->getReturnType();
2031 
2032   PHINode *PHI = nullptr;
2033   if (Returns.size() > 1) {
2034     // The PHI node should go at the front of the new basic block to merge all
2035     // possible incoming values.
2036     if (!TheCall->use_empty()) {
2037       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2038                             &AfterCallBB->front());
2039       // Anything that used the result of the function call should now use the
2040       // PHI node as their operand.
2041       TheCall->replaceAllUsesWith(PHI);
2042     }
2043 
2044     // Loop over all of the return instructions adding entries to the PHI node
2045     // as appropriate.
2046     if (PHI) {
2047       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2048         ReturnInst *RI = Returns[i];
2049         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2050                "Ret value not consistent in function!");
2051         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2052       }
2053     }
2054 
2055     // Add a branch to the merge points and remove return instructions.
2056     DebugLoc Loc;
2057     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2058       ReturnInst *RI = Returns[i];
2059       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2060       Loc = RI->getDebugLoc();
2061       BI->setDebugLoc(Loc);
2062       RI->eraseFromParent();
2063     }
2064     // We need to set the debug location to *somewhere* inside the
2065     // inlined function. The line number may be nonsensical, but the
2066     // instruction will at least be associated with the right
2067     // function.
2068     if (CreatedBranchToNormalDest)
2069       CreatedBranchToNormalDest->setDebugLoc(Loc);
2070   } else if (!Returns.empty()) {
2071     // Otherwise, if there is exactly one return value, just replace anything
2072     // using the return value of the call with the computed value.
2073     if (!TheCall->use_empty()) {
2074       if (TheCall == Returns[0]->getReturnValue())
2075         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2076       else
2077         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2078     }
2079 
2080     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2081     BasicBlock *ReturnBB = Returns[0]->getParent();
2082     ReturnBB->replaceAllUsesWith(AfterCallBB);
2083 
2084     // Splice the code from the return block into the block that it will return
2085     // to, which contains the code that was after the call.
2086     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2087                                       ReturnBB->getInstList());
2088 
2089     if (CreatedBranchToNormalDest)
2090       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2091 
2092     // Delete the return instruction now and empty ReturnBB now.
2093     Returns[0]->eraseFromParent();
2094     ReturnBB->eraseFromParent();
2095   } else if (!TheCall->use_empty()) {
2096     // No returns, but something is using the return value of the call.  Just
2097     // nuke the result.
2098     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2099   }
2100 
2101   // Since we are now done with the Call/Invoke, we can delete it.
2102   TheCall->eraseFromParent();
2103 
2104   // If we inlined any musttail calls and the original return is now
2105   // unreachable, delete it.  It can only contain a bitcast and ret.
2106   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2107     AfterCallBB->eraseFromParent();
2108 
2109   // We should always be able to fold the entry block of the function into the
2110   // single predecessor of the block...
2111   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2112   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2113 
2114   // Splice the code entry block into calling block, right before the
2115   // unconditional branch.
2116   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2117   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2118 
2119   // Remove the unconditional branch.
2120   OrigBB->getInstList().erase(Br);
2121 
2122   // Now we can remove the CalleeEntry block, which is now empty.
2123   Caller->getBasicBlockList().erase(CalleeEntry);
2124 
2125   // If we inserted a phi node, check to see if it has a single value (e.g. all
2126   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2127   // block other optimizations.
2128   if (PHI) {
2129     auto &DL = Caller->getParent()->getDataLayout();
2130     if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
2131                                        &IFI.ACT->getAssumptionCache(*Caller))) {
2132       PHI->replaceAllUsesWith(V);
2133       PHI->eraseFromParent();
2134     }
2135   }
2136 
2137   return true;
2138 }
2139