1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/EHPersonalities.h"
26 #include "llvm/Analysis/InstructionSimplify.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/CallSite.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/DebugInfo.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/DIBuilder.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/Transforms/Utils/Local.h"
44 #include "llvm/Support/CommandLine.h"
45 #include <algorithm>
46 
47 using namespace llvm;
48 
49 static cl::opt<bool>
50 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
51   cl::Hidden,
52   cl::desc("Convert noalias attributes to metadata during inlining."));
53 
54 static cl::opt<bool>
55 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
56   cl::init(true), cl::Hidden,
57   cl::desc("Convert align attributes to assumptions during inlining."));
58 
59 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
60                           AAResults *CalleeAAR, bool InsertLifetime) {
61   return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
62 }
63 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
64                           AAResults *CalleeAAR, bool InsertLifetime) {
65   return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
66 }
67 
68 namespace {
69   /// A class for recording information about inlining a landing pad.
70   class LandingPadInliningInfo {
71     BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
72     BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
73     LandingPadInst *CallerLPad;  ///< LandingPadInst associated with the invoke.
74     PHINode *InnerEHValuesPHI;   ///< PHI for EH values from landingpad insts.
75     SmallVector<Value*, 8> UnwindDestPHIValues;
76 
77   public:
78     LandingPadInliningInfo(InvokeInst *II)
79       : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
80         CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
81       // If there are PHI nodes in the unwind destination block, we need to keep
82       // track of which values came into them from the invoke before removing
83       // the edge from this block.
84       llvm::BasicBlock *InvokeBB = II->getParent();
85       BasicBlock::iterator I = OuterResumeDest->begin();
86       for (; isa<PHINode>(I); ++I) {
87         // Save the value to use for this edge.
88         PHINode *PHI = cast<PHINode>(I);
89         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
90       }
91 
92       CallerLPad = cast<LandingPadInst>(I);
93     }
94 
95     /// The outer unwind destination is the target of
96     /// unwind edges introduced for calls within the inlined function.
97     BasicBlock *getOuterResumeDest() const {
98       return OuterResumeDest;
99     }
100 
101     BasicBlock *getInnerResumeDest();
102 
103     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
104 
105     /// Forward the 'resume' instruction to the caller's landing pad block.
106     /// When the landing pad block has only one predecessor, this is
107     /// a simple branch. When there is more than one predecessor, we need to
108     /// split the landing pad block after the landingpad instruction and jump
109     /// to there.
110     void forwardResume(ResumeInst *RI,
111                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
112 
113     /// Add incoming-PHI values to the unwind destination block for the given
114     /// basic block, using the values for the original invoke's source block.
115     void addIncomingPHIValuesFor(BasicBlock *BB) const {
116       addIncomingPHIValuesForInto(BB, OuterResumeDest);
117     }
118 
119     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
120       BasicBlock::iterator I = dest->begin();
121       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
122         PHINode *phi = cast<PHINode>(I);
123         phi->addIncoming(UnwindDestPHIValues[i], src);
124       }
125     }
126   };
127 } // anonymous namespace
128 
129 /// Get or create a target for the branch from ResumeInsts.
130 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
131   if (InnerResumeDest) return InnerResumeDest;
132 
133   // Split the landing pad.
134   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
135   InnerResumeDest =
136     OuterResumeDest->splitBasicBlock(SplitPoint,
137                                      OuterResumeDest->getName() + ".body");
138 
139   // The number of incoming edges we expect to the inner landing pad.
140   const unsigned PHICapacity = 2;
141 
142   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
143   Instruction *InsertPoint = &InnerResumeDest->front();
144   BasicBlock::iterator I = OuterResumeDest->begin();
145   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
146     PHINode *OuterPHI = cast<PHINode>(I);
147     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
148                                         OuterPHI->getName() + ".lpad-body",
149                                         InsertPoint);
150     OuterPHI->replaceAllUsesWith(InnerPHI);
151     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
152   }
153 
154   // Create a PHI for the exception values.
155   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
156                                      "eh.lpad-body", InsertPoint);
157   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
158   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
159 
160   // All done.
161   return InnerResumeDest;
162 }
163 
164 /// Forward the 'resume' instruction to the caller's landing pad block.
165 /// When the landing pad block has only one predecessor, this is a simple
166 /// branch. When there is more than one predecessor, we need to split the
167 /// landing pad block after the landingpad instruction and jump to there.
168 void LandingPadInliningInfo::forwardResume(
169     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
170   BasicBlock *Dest = getInnerResumeDest();
171   BasicBlock *Src = RI->getParent();
172 
173   BranchInst::Create(Dest, Src);
174 
175   // Update the PHIs in the destination. They were inserted in an order which
176   // makes this work.
177   addIncomingPHIValuesForInto(Src, Dest);
178 
179   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
180   RI->eraseFromParent();
181 }
182 
183 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
184 static Value *getParentPad(Value *EHPad) {
185   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
186     return FPI->getParentPad();
187   return cast<CatchSwitchInst>(EHPad)->getParentPad();
188 }
189 
190 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy;
191 
192 /// Helper for getUnwindDestToken that does the descendant-ward part of
193 /// the search.
194 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
195                                        UnwindDestMemoTy &MemoMap) {
196   SmallVector<Instruction *, 8> Worklist(1, EHPad);
197 
198   while (!Worklist.empty()) {
199     Instruction *CurrentPad = Worklist.pop_back_val();
200     // We only put pads on the worklist that aren't in the MemoMap.  When
201     // we find an unwind dest for a pad we may update its ancestors, but
202     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
203     // so they should never get updated while queued on the worklist.
204     assert(!MemoMap.count(CurrentPad));
205     Value *UnwindDestToken = nullptr;
206     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
207       if (CatchSwitch->hasUnwindDest()) {
208         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
209       } else {
210         // Catchswitch doesn't have a 'nounwind' variant, and one might be
211         // annotated as "unwinds to caller" when really it's nounwind (see
212         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
213         // parent's unwind dest from this.  We can check its catchpads'
214         // descendants, since they might include a cleanuppad with an
215         // "unwinds to caller" cleanupret, which can be trusted.
216         for (auto HI = CatchSwitch->handler_begin(),
217                   HE = CatchSwitch->handler_end();
218              HI != HE && !UnwindDestToken; ++HI) {
219           BasicBlock *HandlerBlock = *HI;
220           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
221           for (User *Child : CatchPad->users()) {
222             // Intentionally ignore invokes here -- since the catchswitch is
223             // marked "unwind to caller", it would be a verifier error if it
224             // contained an invoke which unwinds out of it, so any invoke we'd
225             // encounter must unwind to some child of the catch.
226             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
227               continue;
228 
229             Instruction *ChildPad = cast<Instruction>(Child);
230             auto Memo = MemoMap.find(ChildPad);
231             if (Memo == MemoMap.end()) {
232               // Haven't figured out this child pad yet; queue it.
233               Worklist.push_back(ChildPad);
234               continue;
235             }
236             // We've already checked this child, but might have found that
237             // it offers no proof either way.
238             Value *ChildUnwindDestToken = Memo->second;
239             if (!ChildUnwindDestToken)
240               continue;
241             // We already know the child's unwind dest, which can either
242             // be ConstantTokenNone to indicate unwind to caller, or can
243             // be another child of the catchpad.  Only the former indicates
244             // the unwind dest of the catchswitch.
245             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
246               UnwindDestToken = ChildUnwindDestToken;
247               break;
248             }
249             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
250           }
251         }
252       }
253     } else {
254       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
255       for (User *U : CleanupPad->users()) {
256         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
257           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
258             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
259           else
260             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
261           break;
262         }
263         Value *ChildUnwindDestToken;
264         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
265           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
266         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
267           Instruction *ChildPad = cast<Instruction>(U);
268           auto Memo = MemoMap.find(ChildPad);
269           if (Memo == MemoMap.end()) {
270             // Haven't resolved this child yet; queue it and keep searching.
271             Worklist.push_back(ChildPad);
272             continue;
273           }
274           // We've checked this child, but still need to ignore it if it
275           // had no proof either way.
276           ChildUnwindDestToken = Memo->second;
277           if (!ChildUnwindDestToken)
278             continue;
279         } else {
280           // Not a relevant user of the cleanuppad
281           continue;
282         }
283         // In a well-formed program, the child/invoke must either unwind to
284         // an(other) child of the cleanup, or exit the cleanup.  In the
285         // first case, continue searching.
286         if (isa<Instruction>(ChildUnwindDestToken) &&
287             getParentPad(ChildUnwindDestToken) == CleanupPad)
288           continue;
289         UnwindDestToken = ChildUnwindDestToken;
290         break;
291       }
292     }
293     // If we haven't found an unwind dest for CurrentPad, we may have queued its
294     // children, so move on to the next in the worklist.
295     if (!UnwindDestToken)
296       continue;
297 
298     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
299     // any ancestors of CurrentPad up to but not including UnwindDestToken's
300     // parent pad.  Record this in the memo map, and check to see if the
301     // original EHPad being queried is one of the ones exited.
302     Value *UnwindParent;
303     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
304       UnwindParent = getParentPad(UnwindPad);
305     else
306       UnwindParent = nullptr;
307     bool ExitedOriginalPad = false;
308     for (Instruction *ExitedPad = CurrentPad;
309          ExitedPad && ExitedPad != UnwindParent;
310          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
311       // Skip over catchpads since they just follow their catchswitches.
312       if (isa<CatchPadInst>(ExitedPad))
313         continue;
314       MemoMap[ExitedPad] = UnwindDestToken;
315       ExitedOriginalPad |= (ExitedPad == EHPad);
316     }
317 
318     if (ExitedOriginalPad)
319       return UnwindDestToken;
320 
321     // Continue the search.
322   }
323 
324   // No definitive information is contained within this funclet.
325   return nullptr;
326 }
327 
328 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
329 /// return that pad instruction.  If it unwinds to caller, return
330 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
331 /// return nullptr.
332 ///
333 /// This routine gets invoked for calls in funclets in inlinees when inlining
334 /// an invoke.  Since many funclets don't have calls inside them, it's queried
335 /// on-demand rather than building a map of pads to unwind dests up front.
336 /// Determining a funclet's unwind dest may require recursively searching its
337 /// descendants, and also ancestors and cousins if the descendants don't provide
338 /// an answer.  Since most funclets will have their unwind dest immediately
339 /// available as the unwind dest of a catchswitch or cleanupret, this routine
340 /// searches top-down from the given pad and then up. To avoid worst-case
341 /// quadratic run-time given that approach, it uses a memo map to avoid
342 /// re-processing funclet trees.  The callers that rewrite the IR as they go
343 /// take advantage of this, for correctness, by checking/forcing rewritten
344 /// pads' entries to match the original callee view.
345 static Value *getUnwindDestToken(Instruction *EHPad,
346                                  UnwindDestMemoTy &MemoMap) {
347   // Catchpads unwind to the same place as their catchswitch;
348   // redirct any queries on catchpads so the code below can
349   // deal with just catchswitches and cleanuppads.
350   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
351     EHPad = CPI->getCatchSwitch();
352 
353   // Check if we've already determined the unwind dest for this pad.
354   auto Memo = MemoMap.find(EHPad);
355   if (Memo != MemoMap.end())
356     return Memo->second;
357 
358   // Search EHPad and, if necessary, its descendants.
359   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
360   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
361   if (UnwindDestToken)
362     return UnwindDestToken;
363 
364   // No information is available for this EHPad from itself or any of its
365   // descendants.  An unwind all the way out to a pad in the caller would
366   // need also to agree with the unwind dest of the parent funclet, so
367   // search up the chain to try to find a funclet with information.  Put
368   // null entries in the memo map to avoid re-processing as we go up.
369   MemoMap[EHPad] = nullptr;
370 #ifndef NDEBUG
371   SmallPtrSet<Instruction *, 4> TempMemos;
372   TempMemos.insert(EHPad);
373 #endif
374   Instruction *LastUselessPad = EHPad;
375   Value *AncestorToken;
376   for (AncestorToken = getParentPad(EHPad);
377        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
378        AncestorToken = getParentPad(AncestorToken)) {
379     // Skip over catchpads since they just follow their catchswitches.
380     if (isa<CatchPadInst>(AncestorPad))
381       continue;
382     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
383     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
384     // call to getUnwindDestToken, that would mean that AncestorPad had no
385     // information in itself, its descendants, or its ancestors.  If that
386     // were the case, then we should also have recorded the lack of information
387     // for the descendant that we're coming from.  So assert that we don't
388     // find a null entry in the MemoMap for AncestorPad.
389     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
390     auto AncestorMemo = MemoMap.find(AncestorPad);
391     if (AncestorMemo == MemoMap.end()) {
392       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
393     } else {
394       UnwindDestToken = AncestorMemo->second;
395     }
396     if (UnwindDestToken)
397       break;
398     LastUselessPad = AncestorPad;
399     MemoMap[LastUselessPad] = nullptr;
400 #ifndef NDEBUG
401     TempMemos.insert(LastUselessPad);
402 #endif
403   }
404 
405   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
406   // returned nullptr (and likewise for EHPad and any of its ancestors up to
407   // LastUselessPad), so LastUselessPad has no information from below.  Since
408   // getUnwindDestTokenHelper must investigate all downward paths through
409   // no-information nodes to prove that a node has no information like this,
410   // and since any time it finds information it records it in the MemoMap for
411   // not just the immediately-containing funclet but also any ancestors also
412   // exited, it must be the case that, walking downward from LastUselessPad,
413   // visiting just those nodes which have not been mapped to an unwind dest
414   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
415   // they are just used to keep getUnwindDestTokenHelper from repeating work),
416   // any node visited must have been exhaustively searched with no information
417   // for it found.
418   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
419   while (!Worklist.empty()) {
420     Instruction *UselessPad = Worklist.pop_back_val();
421     auto Memo = MemoMap.find(UselessPad);
422     if (Memo != MemoMap.end() && Memo->second) {
423       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
424       // that it is a funclet that does have information about unwinding to
425       // a particular destination; its parent was a useless pad.
426       // Since its parent has no information, the unwind edge must not escape
427       // the parent, and must target a sibling of this pad.  This local unwind
428       // gives us no information about EHPad.  Leave it and the subtree rooted
429       // at it alone.
430       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
431       continue;
432     }
433     // We know we don't have information for UselesPad.  If it has an entry in
434     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
435     // added on this invocation of getUnwindDestToken; if a previous invocation
436     // recorded nullptr, it would have had to prove that the ancestors of
437     // UselessPad, which include LastUselessPad, had no information, and that
438     // in turn would have required proving that the descendants of
439     // LastUselesPad, which include EHPad, have no information about
440     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
441     // the MemoMap on that invocation, which isn't the case if we got here.
442     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
443     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
444     // information that we'd be contradicting by making a map entry for it
445     // (which is something that getUnwindDestTokenHelper must have proved for
446     // us to get here).  Just assert on is direct users here; the checks in
447     // this downward walk at its descendants will verify that they don't have
448     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
449     // unwind edges or unwind to a sibling).
450     MemoMap[UselessPad] = UnwindDestToken;
451     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
452       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
453       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
454         auto *CatchPad = HandlerBlock->getFirstNonPHI();
455         for (User *U : CatchPad->users()) {
456           assert(
457               (!isa<InvokeInst>(U) ||
458                (getParentPad(
459                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
460                 CatchPad)) &&
461               "Expected useless pad");
462           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
463             Worklist.push_back(cast<Instruction>(U));
464         }
465       }
466     } else {
467       assert(isa<CleanupPadInst>(UselessPad));
468       for (User *U : UselessPad->users()) {
469         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
470         assert((!isa<InvokeInst>(U) ||
471                 (getParentPad(
472                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
473                  UselessPad)) &&
474                "Expected useless pad");
475         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
476           Worklist.push_back(cast<Instruction>(U));
477       }
478     }
479   }
480 
481   return UnwindDestToken;
482 }
483 
484 /// When we inline a basic block into an invoke,
485 /// we have to turn all of the calls that can throw into invokes.
486 /// This function analyze BB to see if there are any calls, and if so,
487 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
488 /// nodes in that block with the values specified in InvokeDestPHIValues.
489 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
490     BasicBlock *BB, BasicBlock *UnwindEdge,
491     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
492   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
493     Instruction *I = &*BBI++;
494 
495     // We only need to check for function calls: inlined invoke
496     // instructions require no special handling.
497     CallInst *CI = dyn_cast<CallInst>(I);
498 
499     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
500       continue;
501 
502     // We do not need to (and in fact, cannot) convert possibly throwing calls
503     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
504     // invokes.  The caller's "segment" of the deoptimization continuation
505     // attached to the newly inlined @llvm.experimental_deoptimize
506     // (resp. @llvm.experimental.guard) call should contain the exception
507     // handling logic, if any.
508     if (auto *F = CI->getCalledFunction())
509       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
510           F->getIntrinsicID() == Intrinsic::experimental_guard)
511         continue;
512 
513     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
514       // This call is nested inside a funclet.  If that funclet has an unwind
515       // destination within the inlinee, then unwinding out of this call would
516       // be UB.  Rewriting this call to an invoke which targets the inlined
517       // invoke's unwind dest would give the call's parent funclet multiple
518       // unwind destinations, which is something that subsequent EH table
519       // generation can't handle and that the veirifer rejects.  So when we
520       // see such a call, leave it as a call.
521       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
522       Value *UnwindDestToken =
523           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
524       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
525         continue;
526 #ifndef NDEBUG
527       Instruction *MemoKey;
528       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
529         MemoKey = CatchPad->getCatchSwitch();
530       else
531         MemoKey = FuncletPad;
532       assert(FuncletUnwindMap->count(MemoKey) &&
533              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
534              "must get memoized to avoid confusing later searches");
535 #endif // NDEBUG
536     }
537 
538     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
539     return BB;
540   }
541   return nullptr;
542 }
543 
544 /// If we inlined an invoke site, we need to convert calls
545 /// in the body of the inlined function into invokes.
546 ///
547 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
548 /// block of the inlined code (the last block is the end of the function),
549 /// and InlineCodeInfo is information about the code that got inlined.
550 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
551                                     ClonedCodeInfo &InlinedCodeInfo) {
552   BasicBlock *InvokeDest = II->getUnwindDest();
553 
554   Function *Caller = FirstNewBlock->getParent();
555 
556   // The inlined code is currently at the end of the function, scan from the
557   // start of the inlined code to its end, checking for stuff we need to
558   // rewrite.
559   LandingPadInliningInfo Invoke(II);
560 
561   // Get all of the inlined landing pad instructions.
562   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
563   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
564        I != E; ++I)
565     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
566       InlinedLPads.insert(II->getLandingPadInst());
567 
568   // Append the clauses from the outer landing pad instruction into the inlined
569   // landing pad instructions.
570   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
571   for (LandingPadInst *InlinedLPad : InlinedLPads) {
572     unsigned OuterNum = OuterLPad->getNumClauses();
573     InlinedLPad->reserveClauses(OuterNum);
574     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
575       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
576     if (OuterLPad->isCleanup())
577       InlinedLPad->setCleanup(true);
578   }
579 
580   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
581        BB != E; ++BB) {
582     if (InlinedCodeInfo.ContainsCalls)
583       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
584               &*BB, Invoke.getOuterResumeDest()))
585         // Update any PHI nodes in the exceptional block to indicate that there
586         // is now a new entry in them.
587         Invoke.addIncomingPHIValuesFor(NewBB);
588 
589     // Forward any resumes that are remaining here.
590     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
591       Invoke.forwardResume(RI, InlinedLPads);
592   }
593 
594   // Now that everything is happy, we have one final detail.  The PHI nodes in
595   // the exception destination block still have entries due to the original
596   // invoke instruction. Eliminate these entries (which might even delete the
597   // PHI node) now.
598   InvokeDest->removePredecessor(II->getParent());
599 }
600 
601 /// If we inlined an invoke site, we need to convert calls
602 /// in the body of the inlined function into invokes.
603 ///
604 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
605 /// block of the inlined code (the last block is the end of the function),
606 /// and InlineCodeInfo is information about the code that got inlined.
607 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
608                                ClonedCodeInfo &InlinedCodeInfo) {
609   BasicBlock *UnwindDest = II->getUnwindDest();
610   Function *Caller = FirstNewBlock->getParent();
611 
612   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
613 
614   // If there are PHI nodes in the unwind destination block, we need to keep
615   // track of which values came into them from the invoke before removing the
616   // edge from this block.
617   SmallVector<Value *, 8> UnwindDestPHIValues;
618   llvm::BasicBlock *InvokeBB = II->getParent();
619   for (Instruction &I : *UnwindDest) {
620     // Save the value to use for this edge.
621     PHINode *PHI = dyn_cast<PHINode>(&I);
622     if (!PHI)
623       break;
624     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
625   }
626 
627   // Add incoming-PHI values to the unwind destination block for the given basic
628   // block, using the values for the original invoke's source block.
629   auto UpdatePHINodes = [&](BasicBlock *Src) {
630     BasicBlock::iterator I = UnwindDest->begin();
631     for (Value *V : UnwindDestPHIValues) {
632       PHINode *PHI = cast<PHINode>(I);
633       PHI->addIncoming(V, Src);
634       ++I;
635     }
636   };
637 
638   // This connects all the instructions which 'unwind to caller' to the invoke
639   // destination.
640   UnwindDestMemoTy FuncletUnwindMap;
641   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
642        BB != E; ++BB) {
643     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
644       if (CRI->unwindsToCaller()) {
645         auto *CleanupPad = CRI->getCleanupPad();
646         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
647         CRI->eraseFromParent();
648         UpdatePHINodes(&*BB);
649         // Finding a cleanupret with an unwind destination would confuse
650         // subsequent calls to getUnwindDestToken, so map the cleanuppad
651         // to short-circuit any such calls and recognize this as an "unwind
652         // to caller" cleanup.
653         assert(!FuncletUnwindMap.count(CleanupPad) ||
654                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
655         FuncletUnwindMap[CleanupPad] =
656             ConstantTokenNone::get(Caller->getContext());
657       }
658     }
659 
660     Instruction *I = BB->getFirstNonPHI();
661     if (!I->isEHPad())
662       continue;
663 
664     Instruction *Replacement = nullptr;
665     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
666       if (CatchSwitch->unwindsToCaller()) {
667         Value *UnwindDestToken;
668         if (auto *ParentPad =
669                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
670           // This catchswitch is nested inside another funclet.  If that
671           // funclet has an unwind destination within the inlinee, then
672           // unwinding out of this catchswitch would be UB.  Rewriting this
673           // catchswitch to unwind to the inlined invoke's unwind dest would
674           // give the parent funclet multiple unwind destinations, which is
675           // something that subsequent EH table generation can't handle and
676           // that the veirifer rejects.  So when we see such a call, leave it
677           // as "unwind to caller".
678           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
679           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
680             continue;
681         } else {
682           // This catchswitch has no parent to inherit constraints from, and
683           // none of its descendants can have an unwind edge that exits it and
684           // targets another funclet in the inlinee.  It may or may not have a
685           // descendant that definitively has an unwind to caller.  In either
686           // case, we'll have to assume that any unwinds out of it may need to
687           // be routed to the caller, so treat it as though it has a definitive
688           // unwind to caller.
689           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
690         }
691         auto *NewCatchSwitch = CatchSwitchInst::Create(
692             CatchSwitch->getParentPad(), UnwindDest,
693             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
694             CatchSwitch);
695         for (BasicBlock *PadBB : CatchSwitch->handlers())
696           NewCatchSwitch->addHandler(PadBB);
697         // Propagate info for the old catchswitch over to the new one in
698         // the unwind map.  This also serves to short-circuit any subsequent
699         // checks for the unwind dest of this catchswitch, which would get
700         // confused if they found the outer handler in the callee.
701         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
702         Replacement = NewCatchSwitch;
703       }
704     } else if (!isa<FuncletPadInst>(I)) {
705       llvm_unreachable("unexpected EHPad!");
706     }
707 
708     if (Replacement) {
709       Replacement->takeName(I);
710       I->replaceAllUsesWith(Replacement);
711       I->eraseFromParent();
712       UpdatePHINodes(&*BB);
713     }
714   }
715 
716   if (InlinedCodeInfo.ContainsCalls)
717     for (Function::iterator BB = FirstNewBlock->getIterator(),
718                             E = Caller->end();
719          BB != E; ++BB)
720       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
721               &*BB, UnwindDest, &FuncletUnwindMap))
722         // Update any PHI nodes in the exceptional block to indicate that there
723         // is now a new entry in them.
724         UpdatePHINodes(NewBB);
725 
726   // Now that everything is happy, we have one final detail.  The PHI nodes in
727   // the exception destination block still have entries due to the original
728   // invoke instruction. Eliminate these entries (which might even delete the
729   // PHI node) now.
730   UnwindDest->removePredecessor(InvokeBB);
731 }
732 
733 /// When inlining a call site that has !llvm.mem.parallel_loop_access metadata,
734 /// that metadata should be propagated to all memory-accessing cloned
735 /// instructions.
736 static void PropagateParallelLoopAccessMetadata(CallSite CS,
737                                                 ValueToValueMapTy &VMap) {
738   MDNode *M =
739     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
740   if (!M)
741     return;
742 
743   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
744        VMI != VMIE; ++VMI) {
745     if (!VMI->second)
746       continue;
747 
748     Instruction *NI = dyn_cast<Instruction>(VMI->second);
749     if (!NI)
750       continue;
751 
752     if (MDNode *PM = NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
753         M = MDNode::concatenate(PM, M);
754       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
755     } else if (NI->mayReadOrWriteMemory()) {
756       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
757     }
758   }
759 }
760 
761 /// When inlining a function that contains noalias scope metadata,
762 /// this metadata needs to be cloned so that the inlined blocks
763 /// have different "unqiue scopes" at every call site. Were this not done, then
764 /// aliasing scopes from a function inlined into a caller multiple times could
765 /// not be differentiated (and this would lead to miscompiles because the
766 /// non-aliasing property communicated by the metadata could have
767 /// call-site-specific control dependencies).
768 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
769   const Function *CalledFunc = CS.getCalledFunction();
770   SetVector<const MDNode *> MD;
771 
772   // Note: We could only clone the metadata if it is already used in the
773   // caller. I'm omitting that check here because it might confuse
774   // inter-procedural alias analysis passes. We can revisit this if it becomes
775   // an efficiency or overhead problem.
776 
777   for (const BasicBlock &I : *CalledFunc)
778     for (const Instruction &J : I) {
779       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
780         MD.insert(M);
781       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
782         MD.insert(M);
783     }
784 
785   if (MD.empty())
786     return;
787 
788   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
789   // the set.
790   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
791   while (!Queue.empty()) {
792     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
793     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
794       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
795         if (MD.insert(M1))
796           Queue.push_back(M1);
797   }
798 
799   // Now we have a complete set of all metadata in the chains used to specify
800   // the noalias scopes and the lists of those scopes.
801   SmallVector<TempMDTuple, 16> DummyNodes;
802   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
803   for (const MDNode *I : MD) {
804     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
805     MDMap[I].reset(DummyNodes.back().get());
806   }
807 
808   // Create new metadata nodes to replace the dummy nodes, replacing old
809   // metadata references with either a dummy node or an already-created new
810   // node.
811   for (const MDNode *I : MD) {
812     SmallVector<Metadata *, 4> NewOps;
813     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
814       const Metadata *V = I->getOperand(i);
815       if (const MDNode *M = dyn_cast<MDNode>(V))
816         NewOps.push_back(MDMap[M]);
817       else
818         NewOps.push_back(const_cast<Metadata *>(V));
819     }
820 
821     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
822     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
823     assert(TempM->isTemporary() && "Expected temporary node");
824 
825     TempM->replaceAllUsesWith(NewM);
826   }
827 
828   // Now replace the metadata in the new inlined instructions with the
829   // repacements from the map.
830   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
831        VMI != VMIE; ++VMI) {
832     if (!VMI->second)
833       continue;
834 
835     Instruction *NI = dyn_cast<Instruction>(VMI->second);
836     if (!NI)
837       continue;
838 
839     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
840       MDNode *NewMD = MDMap[M];
841       // If the call site also had alias scope metadata (a list of scopes to
842       // which instructions inside it might belong), propagate those scopes to
843       // the inlined instructions.
844       if (MDNode *CSM =
845               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
846         NewMD = MDNode::concatenate(NewMD, CSM);
847       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
848     } else if (NI->mayReadOrWriteMemory()) {
849       if (MDNode *M =
850               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
851         NI->setMetadata(LLVMContext::MD_alias_scope, M);
852     }
853 
854     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
855       MDNode *NewMD = MDMap[M];
856       // If the call site also had noalias metadata (a list of scopes with
857       // which instructions inside it don't alias), propagate those scopes to
858       // the inlined instructions.
859       if (MDNode *CSM =
860               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
861         NewMD = MDNode::concatenate(NewMD, CSM);
862       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
863     } else if (NI->mayReadOrWriteMemory()) {
864       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
865         NI->setMetadata(LLVMContext::MD_noalias, M);
866     }
867   }
868 }
869 
870 /// If the inlined function has noalias arguments,
871 /// then add new alias scopes for each noalias argument, tag the mapped noalias
872 /// parameters with noalias metadata specifying the new scope, and tag all
873 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
874 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
875                                   const DataLayout &DL, AAResults *CalleeAAR) {
876   if (!EnableNoAliasConversion)
877     return;
878 
879   const Function *CalledFunc = CS.getCalledFunction();
880   SmallVector<const Argument *, 4> NoAliasArgs;
881 
882   for (const Argument &Arg : CalledFunc->args())
883     if (Arg.hasNoAliasAttr() && !Arg.use_empty())
884       NoAliasArgs.push_back(&Arg);
885 
886   if (NoAliasArgs.empty())
887     return;
888 
889   // To do a good job, if a noalias variable is captured, we need to know if
890   // the capture point dominates the particular use we're considering.
891   DominatorTree DT;
892   DT.recalculate(const_cast<Function&>(*CalledFunc));
893 
894   // noalias indicates that pointer values based on the argument do not alias
895   // pointer values which are not based on it. So we add a new "scope" for each
896   // noalias function argument. Accesses using pointers based on that argument
897   // become part of that alias scope, accesses using pointers not based on that
898   // argument are tagged as noalias with that scope.
899 
900   DenseMap<const Argument *, MDNode *> NewScopes;
901   MDBuilder MDB(CalledFunc->getContext());
902 
903   // Create a new scope domain for this function.
904   MDNode *NewDomain =
905     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
906   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
907     const Argument *A = NoAliasArgs[i];
908 
909     std::string Name = CalledFunc->getName();
910     if (A->hasName()) {
911       Name += ": %";
912       Name += A->getName();
913     } else {
914       Name += ": argument ";
915       Name += utostr(i);
916     }
917 
918     // Note: We always create a new anonymous root here. This is true regardless
919     // of the linkage of the callee because the aliasing "scope" is not just a
920     // property of the callee, but also all control dependencies in the caller.
921     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
922     NewScopes.insert(std::make_pair(A, NewScope));
923   }
924 
925   // Iterate over all new instructions in the map; for all memory-access
926   // instructions, add the alias scope metadata.
927   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
928        VMI != VMIE; ++VMI) {
929     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
930       if (!VMI->second)
931         continue;
932 
933       Instruction *NI = dyn_cast<Instruction>(VMI->second);
934       if (!NI)
935         continue;
936 
937       bool IsArgMemOnlyCall = false, IsFuncCall = false;
938       SmallVector<const Value *, 2> PtrArgs;
939 
940       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
941         PtrArgs.push_back(LI->getPointerOperand());
942       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
943         PtrArgs.push_back(SI->getPointerOperand());
944       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
945         PtrArgs.push_back(VAAI->getPointerOperand());
946       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
947         PtrArgs.push_back(CXI->getPointerOperand());
948       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
949         PtrArgs.push_back(RMWI->getPointerOperand());
950       else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
951         // If we know that the call does not access memory, then we'll still
952         // know that about the inlined clone of this call site, and we don't
953         // need to add metadata.
954         if (ICS.doesNotAccessMemory())
955           continue;
956 
957         IsFuncCall = true;
958         if (CalleeAAR) {
959           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
960           if (MRB == FMRB_OnlyAccessesArgumentPointees ||
961               MRB == FMRB_OnlyReadsArgumentPointees)
962             IsArgMemOnlyCall = true;
963         }
964 
965         for (Value *Arg : ICS.args()) {
966           // We need to check the underlying objects of all arguments, not just
967           // the pointer arguments, because we might be passing pointers as
968           // integers, etc.
969           // However, if we know that the call only accesses pointer arguments,
970           // then we only need to check the pointer arguments.
971           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
972             continue;
973 
974           PtrArgs.push_back(Arg);
975         }
976       }
977 
978       // If we found no pointers, then this instruction is not suitable for
979       // pairing with an instruction to receive aliasing metadata.
980       // However, if this is a call, this we might just alias with none of the
981       // noalias arguments.
982       if (PtrArgs.empty() && !IsFuncCall)
983         continue;
984 
985       // It is possible that there is only one underlying object, but you
986       // need to go through several PHIs to see it, and thus could be
987       // repeated in the Objects list.
988       SmallPtrSet<const Value *, 4> ObjSet;
989       SmallVector<Metadata *, 4> Scopes, NoAliases;
990 
991       SmallSetVector<const Argument *, 4> NAPtrArgs;
992       for (const Value *V : PtrArgs) {
993         SmallVector<Value *, 4> Objects;
994         GetUnderlyingObjects(const_cast<Value*>(V),
995                              Objects, DL, /* LI = */ nullptr);
996 
997         for (Value *O : Objects)
998           ObjSet.insert(O);
999       }
1000 
1001       // Figure out if we're derived from anything that is not a noalias
1002       // argument.
1003       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1004       for (const Value *V : ObjSet) {
1005         // Is this value a constant that cannot be derived from any pointer
1006         // value (we need to exclude constant expressions, for example, that
1007         // are formed from arithmetic on global symbols).
1008         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1009                              isa<ConstantPointerNull>(V) ||
1010                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1011         if (IsNonPtrConst)
1012           continue;
1013 
1014         // If this is anything other than a noalias argument, then we cannot
1015         // completely describe the aliasing properties using alias.scope
1016         // metadata (and, thus, won't add any).
1017         if (const Argument *A = dyn_cast<Argument>(V)) {
1018           if (!A->hasNoAliasAttr())
1019             UsesAliasingPtr = true;
1020         } else {
1021           UsesAliasingPtr = true;
1022         }
1023 
1024         // If this is not some identified function-local object (which cannot
1025         // directly alias a noalias argument), or some other argument (which,
1026         // by definition, also cannot alias a noalias argument), then we could
1027         // alias a noalias argument that has been captured).
1028         if (!isa<Argument>(V) &&
1029             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1030           CanDeriveViaCapture = true;
1031       }
1032 
1033       // A function call can always get captured noalias pointers (via other
1034       // parameters, globals, etc.).
1035       if (IsFuncCall && !IsArgMemOnlyCall)
1036         CanDeriveViaCapture = true;
1037 
1038       // First, we want to figure out all of the sets with which we definitely
1039       // don't alias. Iterate over all noalias set, and add those for which:
1040       //   1. The noalias argument is not in the set of objects from which we
1041       //      definitely derive.
1042       //   2. The noalias argument has not yet been captured.
1043       // An arbitrary function that might load pointers could see captured
1044       // noalias arguments via other noalias arguments or globals, and so we
1045       // must always check for prior capture.
1046       for (const Argument *A : NoAliasArgs) {
1047         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1048                                  // It might be tempting to skip the
1049                                  // PointerMayBeCapturedBefore check if
1050                                  // A->hasNoCaptureAttr() is true, but this is
1051                                  // incorrect because nocapture only guarantees
1052                                  // that no copies outlive the function, not
1053                                  // that the value cannot be locally captured.
1054                                  !PointerMayBeCapturedBefore(A,
1055                                    /* ReturnCaptures */ false,
1056                                    /* StoreCaptures */ false, I, &DT)))
1057           NoAliases.push_back(NewScopes[A]);
1058       }
1059 
1060       if (!NoAliases.empty())
1061         NI->setMetadata(LLVMContext::MD_noalias,
1062                         MDNode::concatenate(
1063                             NI->getMetadata(LLVMContext::MD_noalias),
1064                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1065 
1066       // Next, we want to figure out all of the sets to which we might belong.
1067       // We might belong to a set if the noalias argument is in the set of
1068       // underlying objects. If there is some non-noalias argument in our list
1069       // of underlying objects, then we cannot add a scope because the fact
1070       // that some access does not alias with any set of our noalias arguments
1071       // cannot itself guarantee that it does not alias with this access
1072       // (because there is some pointer of unknown origin involved and the
1073       // other access might also depend on this pointer). We also cannot add
1074       // scopes to arbitrary functions unless we know they don't access any
1075       // non-parameter pointer-values.
1076       bool CanAddScopes = !UsesAliasingPtr;
1077       if (CanAddScopes && IsFuncCall)
1078         CanAddScopes = IsArgMemOnlyCall;
1079 
1080       if (CanAddScopes)
1081         for (const Argument *A : NoAliasArgs) {
1082           if (ObjSet.count(A))
1083             Scopes.push_back(NewScopes[A]);
1084         }
1085 
1086       if (!Scopes.empty())
1087         NI->setMetadata(
1088             LLVMContext::MD_alias_scope,
1089             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1090                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1091     }
1092   }
1093 }
1094 
1095 /// If the inlined function has non-byval align arguments, then
1096 /// add @llvm.assume-based alignment assumptions to preserve this information.
1097 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1098   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1099     return;
1100   AssumptionCache *AC = IFI.GetAssumptionCache
1101                             ? &(*IFI.GetAssumptionCache)(*CS.getCaller())
1102                             : nullptr;
1103   auto &DL = CS.getCaller()->getParent()->getDataLayout();
1104 
1105   // To avoid inserting redundant assumptions, we should check for assumptions
1106   // already in the caller. To do this, we might need a DT of the caller.
1107   DominatorTree DT;
1108   bool DTCalculated = false;
1109 
1110   Function *CalledFunc = CS.getCalledFunction();
1111   for (Function::arg_iterator I = CalledFunc->arg_begin(),
1112                               E = CalledFunc->arg_end();
1113        I != E; ++I) {
1114     unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
1115     if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
1116       if (!DTCalculated) {
1117         DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
1118                                                ->getParent()));
1119         DTCalculated = true;
1120       }
1121 
1122       // If we can already prove the asserted alignment in the context of the
1123       // caller, then don't bother inserting the assumption.
1124       Value *Arg = CS.getArgument(I->getArgNo());
1125       if (getKnownAlignment(Arg, DL, CS.getInstruction(), AC, &DT) >= Align)
1126         continue;
1127 
1128       CallInst *NewAssumption = IRBuilder<>(CS.getInstruction())
1129                                     .CreateAlignmentAssumption(DL, Arg, Align);
1130       if (AC)
1131         AC->registerAssumption(NewAssumption);
1132     }
1133   }
1134 }
1135 
1136 /// Once we have cloned code over from a callee into the caller,
1137 /// update the specified callgraph to reflect the changes we made.
1138 /// Note that it's possible that not all code was copied over, so only
1139 /// some edges of the callgraph may remain.
1140 static void UpdateCallGraphAfterInlining(CallSite CS,
1141                                          Function::iterator FirstNewBlock,
1142                                          ValueToValueMapTy &VMap,
1143                                          InlineFunctionInfo &IFI) {
1144   CallGraph &CG = *IFI.CG;
1145   const Function *Caller = CS.getInstruction()->getParent()->getParent();
1146   const Function *Callee = CS.getCalledFunction();
1147   CallGraphNode *CalleeNode = CG[Callee];
1148   CallGraphNode *CallerNode = CG[Caller];
1149 
1150   // Since we inlined some uninlined call sites in the callee into the caller,
1151   // add edges from the caller to all of the callees of the callee.
1152   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1153 
1154   // Consider the case where CalleeNode == CallerNode.
1155   CallGraphNode::CalledFunctionsVector CallCache;
1156   if (CalleeNode == CallerNode) {
1157     CallCache.assign(I, E);
1158     I = CallCache.begin();
1159     E = CallCache.end();
1160   }
1161 
1162   for (; I != E; ++I) {
1163     const Value *OrigCall = I->first;
1164 
1165     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1166     // Only copy the edge if the call was inlined!
1167     if (VMI == VMap.end() || VMI->second == nullptr)
1168       continue;
1169 
1170     // If the call was inlined, but then constant folded, there is no edge to
1171     // add.  Check for this case.
1172     Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1173     if (!NewCall)
1174       continue;
1175 
1176     // We do not treat intrinsic calls like real function calls because we
1177     // expect them to become inline code; do not add an edge for an intrinsic.
1178     CallSite CS = CallSite(NewCall);
1179     if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1180       continue;
1181 
1182     // Remember that this call site got inlined for the client of
1183     // InlineFunction.
1184     IFI.InlinedCalls.push_back(NewCall);
1185 
1186     // It's possible that inlining the callsite will cause it to go from an
1187     // indirect to a direct call by resolving a function pointer.  If this
1188     // happens, set the callee of the new call site to a more precise
1189     // destination.  This can also happen if the call graph node of the caller
1190     // was just unnecessarily imprecise.
1191     if (!I->second->getFunction())
1192       if (Function *F = CallSite(NewCall).getCalledFunction()) {
1193         // Indirect call site resolved to direct call.
1194         CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1195 
1196         continue;
1197       }
1198 
1199     CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1200   }
1201 
1202   // Update the call graph by deleting the edge from Callee to Caller.  We must
1203   // do this after the loop above in case Caller and Callee are the same.
1204   CallerNode->removeCallEdgeFor(CS);
1205 }
1206 
1207 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1208                                     BasicBlock *InsertBlock,
1209                                     InlineFunctionInfo &IFI) {
1210   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1211   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1212 
1213   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1214 
1215   // Always generate a memcpy of alignment 1 here because we don't know
1216   // the alignment of the src pointer.  Other optimizations can infer
1217   // better alignment.
1218   Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1219 }
1220 
1221 /// When inlining a call site that has a byval argument,
1222 /// we have to make the implicit memcpy explicit by adding it.
1223 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1224                                   const Function *CalledFunc,
1225                                   InlineFunctionInfo &IFI,
1226                                   unsigned ByValAlignment) {
1227   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1228   Type *AggTy = ArgTy->getElementType();
1229 
1230   Function *Caller = TheCall->getParent()->getParent();
1231 
1232   // If the called function is readonly, then it could not mutate the caller's
1233   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1234   // temporary.
1235   if (CalledFunc->onlyReadsMemory()) {
1236     // If the byval argument has a specified alignment that is greater than the
1237     // passed in pointer, then we either have to round up the input pointer or
1238     // give up on this transformation.
1239     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1240       return Arg;
1241 
1242     AssumptionCache *AC =
1243         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1244     const DataLayout &DL = Caller->getParent()->getDataLayout();
1245 
1246     // If the pointer is already known to be sufficiently aligned, or if we can
1247     // round it up to a larger alignment, then we don't need a temporary.
1248     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1249         ByValAlignment)
1250       return Arg;
1251 
1252     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1253     // for code quality, but rarely happens and is required for correctness.
1254   }
1255 
1256   // Create the alloca.  If we have DataLayout, use nice alignment.
1257   unsigned Align =
1258       Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
1259 
1260   // If the byval had an alignment specified, we *must* use at least that
1261   // alignment, as it is required by the byval argument (and uses of the
1262   // pointer inside the callee).
1263   Align = std::max(Align, ByValAlignment);
1264 
1265   Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
1266                                     &*Caller->begin()->begin());
1267   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1268 
1269   // Uses of the argument in the function should use our new alloca
1270   // instead.
1271   return NewAlloca;
1272 }
1273 
1274 // Check whether this Value is used by a lifetime intrinsic.
1275 static bool isUsedByLifetimeMarker(Value *V) {
1276   for (User *U : V->users()) {
1277     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1278       switch (II->getIntrinsicID()) {
1279       default: break;
1280       case Intrinsic::lifetime_start:
1281       case Intrinsic::lifetime_end:
1282         return true;
1283       }
1284     }
1285   }
1286   return false;
1287 }
1288 
1289 // Check whether the given alloca already has
1290 // lifetime.start or lifetime.end intrinsics.
1291 static bool hasLifetimeMarkers(AllocaInst *AI) {
1292   Type *Ty = AI->getType();
1293   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1294                                        Ty->getPointerAddressSpace());
1295   if (Ty == Int8PtrTy)
1296     return isUsedByLifetimeMarker(AI);
1297 
1298   // Do a scan to find all the casts to i8*.
1299   for (User *U : AI->users()) {
1300     if (U->getType() != Int8PtrTy) continue;
1301     if (U->stripPointerCasts() != AI) continue;
1302     if (isUsedByLifetimeMarker(U))
1303       return true;
1304   }
1305   return false;
1306 }
1307 
1308 /// Rebuild the entire inlined-at chain for this instruction so that the top of
1309 /// the chain now is inlined-at the new call site.
1310 static DebugLoc
1311 updateInlinedAtInfo(const DebugLoc &DL, DILocation *InlinedAtNode,
1312                     LLVMContext &Ctx,
1313                     DenseMap<const DILocation *, DILocation *> &IANodes) {
1314   SmallVector<DILocation *, 3> InlinedAtLocations;
1315   DILocation *Last = InlinedAtNode;
1316   DILocation *CurInlinedAt = DL;
1317 
1318   // Gather all the inlined-at nodes
1319   while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
1320     // Skip any we've already built nodes for
1321     if (DILocation *Found = IANodes[IA]) {
1322       Last = Found;
1323       break;
1324     }
1325 
1326     InlinedAtLocations.push_back(IA);
1327     CurInlinedAt = IA;
1328   }
1329 
1330   // Starting from the top, rebuild the nodes to point to the new inlined-at
1331   // location (then rebuilding the rest of the chain behind it) and update the
1332   // map of already-constructed inlined-at nodes.
1333   for (const DILocation *MD : reverse(InlinedAtLocations)) {
1334     Last = IANodes[MD] = DILocation::getDistinct(
1335         Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
1336   }
1337 
1338   // And finally create the normal location for this instruction, referring to
1339   // the new inlined-at chain.
1340   return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
1341 }
1342 
1343 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1344 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1345 /// cannot be static.
1346 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1347   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1348 }
1349 
1350 /// Update inlined instructions' line numbers to
1351 /// to encode location where these instructions are inlined.
1352 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1353                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1354   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1355   if (!TheCallDL)
1356     return;
1357 
1358   auto &Ctx = Fn->getContext();
1359   DILocation *InlinedAtNode = TheCallDL;
1360 
1361   // Create a unique call site, not to be confused with any other call from the
1362   // same location.
1363   InlinedAtNode = DILocation::getDistinct(
1364       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1365       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1366 
1367   // Cache the inlined-at nodes as they're built so they are reused, without
1368   // this every instruction's inlined-at chain would become distinct from each
1369   // other.
1370   DenseMap<const DILocation *, DILocation *> IANodes;
1371 
1372   for (; FI != Fn->end(); ++FI) {
1373     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1374          BI != BE; ++BI) {
1375       if (DebugLoc DL = BI->getDebugLoc()) {
1376         BI->setDebugLoc(
1377             updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
1378         continue;
1379       }
1380 
1381       if (CalleeHasDebugInfo)
1382         continue;
1383 
1384       // If the inlined instruction has no line number, make it look as if it
1385       // originates from the call location. This is important for
1386       // ((__always_inline__, __nodebug__)) functions which must use caller
1387       // location for all instructions in their function body.
1388 
1389       // Don't update static allocas, as they may get moved later.
1390       if (auto *AI = dyn_cast<AllocaInst>(BI))
1391         if (allocaWouldBeStaticInEntry(AI))
1392           continue;
1393 
1394       BI->setDebugLoc(TheCallDL);
1395     }
1396   }
1397 }
1398 
1399 /// This function inlines the called function into the basic block of the
1400 /// caller. This returns false if it is not possible to inline this call.
1401 /// The program is still in a well defined state if this occurs though.
1402 ///
1403 /// Note that this only does one level of inlining.  For example, if the
1404 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1405 /// exists in the instruction stream.  Similarly this will inline a recursive
1406 /// function by one level.
1407 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1408                           AAResults *CalleeAAR, bool InsertLifetime) {
1409   Instruction *TheCall = CS.getInstruction();
1410   assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
1411          "Instruction not in function!");
1412 
1413   // If IFI has any state in it, zap it before we fill it in.
1414   IFI.reset();
1415 
1416   const Function *CalledFunc = CS.getCalledFunction();
1417   if (!CalledFunc ||              // Can't inline external function or indirect
1418       CalledFunc->isDeclaration() || // call, or call to a vararg function!
1419       CalledFunc->getFunctionType()->isVarArg()) return false;
1420 
1421   // The inliner does not know how to inline through calls with operand bundles
1422   // in general ...
1423   if (CS.hasOperandBundles()) {
1424     for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1425       uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1426       // ... but it knows how to inline through "deopt" operand bundles ...
1427       if (Tag == LLVMContext::OB_deopt)
1428         continue;
1429       // ... and "funclet" operand bundles.
1430       if (Tag == LLVMContext::OB_funclet)
1431         continue;
1432 
1433       return false;
1434     }
1435   }
1436 
1437   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1438   // calls that we inline.
1439   bool MarkNoUnwind = CS.doesNotThrow();
1440 
1441   BasicBlock *OrigBB = TheCall->getParent();
1442   Function *Caller = OrigBB->getParent();
1443 
1444   // GC poses two hazards to inlining, which only occur when the callee has GC:
1445   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1446   //     caller.
1447   //  2. If the caller has a differing GC, it is invalid to inline.
1448   if (CalledFunc->hasGC()) {
1449     if (!Caller->hasGC())
1450       Caller->setGC(CalledFunc->getGC());
1451     else if (CalledFunc->getGC() != Caller->getGC())
1452       return false;
1453   }
1454 
1455   // Get the personality function from the callee if it contains a landing pad.
1456   Constant *CalledPersonality =
1457       CalledFunc->hasPersonalityFn()
1458           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1459           : nullptr;
1460 
1461   // Find the personality function used by the landing pads of the caller. If it
1462   // exists, then check to see that it matches the personality function used in
1463   // the callee.
1464   Constant *CallerPersonality =
1465       Caller->hasPersonalityFn()
1466           ? Caller->getPersonalityFn()->stripPointerCasts()
1467           : nullptr;
1468   if (CalledPersonality) {
1469     if (!CallerPersonality)
1470       Caller->setPersonalityFn(CalledPersonality);
1471     // If the personality functions match, then we can perform the
1472     // inlining. Otherwise, we can't inline.
1473     // TODO: This isn't 100% true. Some personality functions are proper
1474     //       supersets of others and can be used in place of the other.
1475     else if (CalledPersonality != CallerPersonality)
1476       return false;
1477   }
1478 
1479   // We need to figure out which funclet the callsite was in so that we may
1480   // properly nest the callee.
1481   Instruction *CallSiteEHPad = nullptr;
1482   if (CallerPersonality) {
1483     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1484     if (isFuncletEHPersonality(Personality)) {
1485       Optional<OperandBundleUse> ParentFunclet =
1486           CS.getOperandBundle(LLVMContext::OB_funclet);
1487       if (ParentFunclet)
1488         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1489 
1490       // OK, the inlining site is legal.  What about the target function?
1491 
1492       if (CallSiteEHPad) {
1493         if (Personality == EHPersonality::MSVC_CXX) {
1494           // The MSVC personality cannot tolerate catches getting inlined into
1495           // cleanup funclets.
1496           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1497             // Ok, the call site is within a cleanuppad.  Let's check the callee
1498             // for catchpads.
1499             for (const BasicBlock &CalledBB : *CalledFunc) {
1500               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1501                 return false;
1502             }
1503           }
1504         } else if (isAsynchronousEHPersonality(Personality)) {
1505           // SEH is even less tolerant, there may not be any sort of exceptional
1506           // funclet in the callee.
1507           for (const BasicBlock &CalledBB : *CalledFunc) {
1508             if (CalledBB.isEHPad())
1509               return false;
1510           }
1511         }
1512       }
1513     }
1514   }
1515 
1516   // Determine if we are dealing with a call in an EHPad which does not unwind
1517   // to caller.
1518   bool EHPadForCallUnwindsLocally = false;
1519   if (CallSiteEHPad && CS.isCall()) {
1520     UnwindDestMemoTy FuncletUnwindMap;
1521     Value *CallSiteUnwindDestToken =
1522         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1523 
1524     EHPadForCallUnwindsLocally =
1525         CallSiteUnwindDestToken &&
1526         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1527   }
1528 
1529   // Get an iterator to the last basic block in the function, which will have
1530   // the new function inlined after it.
1531   Function::iterator LastBlock = --Caller->end();
1532 
1533   // Make sure to capture all of the return instructions from the cloned
1534   // function.
1535   SmallVector<ReturnInst*, 8> Returns;
1536   ClonedCodeInfo InlinedFunctionInfo;
1537   Function::iterator FirstNewBlock;
1538 
1539   { // Scope to destroy VMap after cloning.
1540     ValueToValueMapTy VMap;
1541     // Keep a list of pair (dst, src) to emit byval initializations.
1542     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1543 
1544     auto &DL = Caller->getParent()->getDataLayout();
1545 
1546     assert(CalledFunc->arg_size() == CS.arg_size() &&
1547            "No varargs calls can be inlined!");
1548 
1549     // Calculate the vector of arguments to pass into the function cloner, which
1550     // matches up the formal to the actual argument values.
1551     CallSite::arg_iterator AI = CS.arg_begin();
1552     unsigned ArgNo = 0;
1553     for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
1554          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1555       Value *ActualArg = *AI;
1556 
1557       // When byval arguments actually inlined, we need to make the copy implied
1558       // by them explicit.  However, we don't do this if the callee is readonly
1559       // or readnone, because the copy would be unneeded: the callee doesn't
1560       // modify the struct.
1561       if (CS.isByValArgument(ArgNo)) {
1562         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1563                                         CalledFunc->getParamAlignment(ArgNo+1));
1564         if (ActualArg != *AI)
1565           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1566       }
1567 
1568       VMap[&*I] = ActualArg;
1569     }
1570 
1571     // Add alignment assumptions if necessary. We do this before the inlined
1572     // instructions are actually cloned into the caller so that we can easily
1573     // check what will be known at the start of the inlined code.
1574     AddAlignmentAssumptions(CS, IFI);
1575 
1576     // We want the inliner to prune the code as it copies.  We would LOVE to
1577     // have no dead or constant instructions leftover after inlining occurs
1578     // (which can happen, e.g., because an argument was constant), but we'll be
1579     // happy with whatever the cloner can do.
1580     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1581                               /*ModuleLevelChanges=*/false, Returns, ".i",
1582                               &InlinedFunctionInfo, TheCall);
1583 
1584     // Remember the first block that is newly cloned over.
1585     FirstNewBlock = LastBlock; ++FirstNewBlock;
1586 
1587     // Inject byval arguments initialization.
1588     for (std::pair<Value*, Value*> &Init : ByValInit)
1589       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1590                               &*FirstNewBlock, IFI);
1591 
1592     Optional<OperandBundleUse> ParentDeopt =
1593         CS.getOperandBundle(LLVMContext::OB_deopt);
1594     if (ParentDeopt) {
1595       SmallVector<OperandBundleDef, 2> OpDefs;
1596 
1597       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1598         Instruction *I = dyn_cast_or_null<Instruction>(VH);
1599         if (!I) continue;  // instruction was DCE'd or RAUW'ed to undef
1600 
1601         OpDefs.clear();
1602 
1603         CallSite ICS(I);
1604         OpDefs.reserve(ICS.getNumOperandBundles());
1605 
1606         for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1607           auto ChildOB = ICS.getOperandBundleAt(i);
1608           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1609             // If the inlined call has other operand bundles, let them be
1610             OpDefs.emplace_back(ChildOB);
1611             continue;
1612           }
1613 
1614           // It may be useful to separate this logic (of handling operand
1615           // bundles) out to a separate "policy" component if this gets crowded.
1616           // Prepend the parent's deoptimization continuation to the newly
1617           // inlined call's deoptimization continuation.
1618           std::vector<Value *> MergedDeoptArgs;
1619           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1620                                   ChildOB.Inputs.size());
1621 
1622           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1623                                  ParentDeopt->Inputs.begin(),
1624                                  ParentDeopt->Inputs.end());
1625           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1626                                  ChildOB.Inputs.end());
1627 
1628           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1629         }
1630 
1631         Instruction *NewI = nullptr;
1632         if (isa<CallInst>(I))
1633           NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1634         else
1635           NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1636 
1637         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1638         // this even if the call returns void.
1639         I->replaceAllUsesWith(NewI);
1640 
1641         VH = nullptr;
1642         I->eraseFromParent();
1643       }
1644     }
1645 
1646     // Update the callgraph if requested.
1647     if (IFI.CG)
1648       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1649 
1650     // For 'nodebug' functions, the associated DISubprogram is always null.
1651     // Conservatively avoid propagating the callsite debug location to
1652     // instructions inlined from a function whose DISubprogram is not null.
1653     fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1654                      CalledFunc->getSubprogram() != nullptr);
1655 
1656     // Clone existing noalias metadata if necessary.
1657     CloneAliasScopeMetadata(CS, VMap);
1658 
1659     // Add noalias metadata if necessary.
1660     AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1661 
1662     // Propagate llvm.mem.parallel_loop_access if necessary.
1663     PropagateParallelLoopAccessMetadata(CS, VMap);
1664 
1665     // Register any cloned assumptions.
1666     if (IFI.GetAssumptionCache)
1667       for (BasicBlock &NewBlock :
1668            make_range(FirstNewBlock->getIterator(), Caller->end()))
1669         for (Instruction &I : NewBlock) {
1670           if (auto *II = dyn_cast<IntrinsicInst>(&I))
1671             if (II->getIntrinsicID() == Intrinsic::assume)
1672               (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1673         }
1674   }
1675 
1676   // If there are any alloca instructions in the block that used to be the entry
1677   // block for the callee, move them to the entry block of the caller.  First
1678   // calculate which instruction they should be inserted before.  We insert the
1679   // instructions at the end of the current alloca list.
1680   {
1681     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1682     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1683          E = FirstNewBlock->end(); I != E; ) {
1684       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1685       if (!AI) continue;
1686 
1687       // If the alloca is now dead, remove it.  This often occurs due to code
1688       // specialization.
1689       if (AI->use_empty()) {
1690         AI->eraseFromParent();
1691         continue;
1692       }
1693 
1694       if (!allocaWouldBeStaticInEntry(AI))
1695         continue;
1696 
1697       // Keep track of the static allocas that we inline into the caller.
1698       IFI.StaticAllocas.push_back(AI);
1699 
1700       // Scan for the block of allocas that we can move over, and move them
1701       // all at once.
1702       while (isa<AllocaInst>(I) &&
1703              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1704         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1705         ++I;
1706       }
1707 
1708       // Transfer all of the allocas over in a block.  Using splice means
1709       // that the instructions aren't removed from the symbol table, then
1710       // reinserted.
1711       Caller->getEntryBlock().getInstList().splice(
1712           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1713     }
1714     // Move any dbg.declares describing the allocas into the entry basic block.
1715     DIBuilder DIB(*Caller->getParent());
1716     for (auto &AI : IFI.StaticAllocas)
1717       replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1718   }
1719 
1720   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1721   if (InlinedFunctionInfo.ContainsCalls) {
1722     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1723     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1724       CallSiteTailKind = CI->getTailCallKind();
1725 
1726     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1727          ++BB) {
1728       for (Instruction &I : *BB) {
1729         CallInst *CI = dyn_cast<CallInst>(&I);
1730         if (!CI)
1731           continue;
1732 
1733         if (Function *F = CI->getCalledFunction())
1734           InlinedDeoptimizeCalls |=
1735               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1736 
1737         // We need to reduce the strength of any inlined tail calls.  For
1738         // musttail, we have to avoid introducing potential unbounded stack
1739         // growth.  For example, if functions 'f' and 'g' are mutually recursive
1740         // with musttail, we can inline 'g' into 'f' so long as we preserve
1741         // musttail on the cloned call to 'f'.  If either the inlined call site
1742         // or the cloned call site is *not* musttail, the program already has
1743         // one frame of stack growth, so it's safe to remove musttail.  Here is
1744         // a table of example transformations:
1745         //
1746         //    f -> musttail g -> musttail f  ==>  f -> musttail f
1747         //    f -> musttail g ->     tail f  ==>  f ->     tail f
1748         //    f ->          g -> musttail f  ==>  f ->          f
1749         //    f ->          g ->     tail f  ==>  f ->          f
1750         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1751         ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1752         CI->setTailCallKind(ChildTCK);
1753         InlinedMustTailCalls |= CI->isMustTailCall();
1754 
1755         // Calls inlined through a 'nounwind' call site should be marked
1756         // 'nounwind'.
1757         if (MarkNoUnwind)
1758           CI->setDoesNotThrow();
1759       }
1760     }
1761   }
1762 
1763   // Leave lifetime markers for the static alloca's, scoping them to the
1764   // function we just inlined.
1765   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1766     IRBuilder<> builder(&FirstNewBlock->front());
1767     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1768       AllocaInst *AI = IFI.StaticAllocas[ai];
1769       // Don't mark swifterror allocas. They can't have bitcast uses.
1770       if (AI->isSwiftError())
1771         continue;
1772 
1773       // If the alloca is already scoped to something smaller than the whole
1774       // function then there's no need to add redundant, less accurate markers.
1775       if (hasLifetimeMarkers(AI))
1776         continue;
1777 
1778       // Try to determine the size of the allocation.
1779       ConstantInt *AllocaSize = nullptr;
1780       if (ConstantInt *AIArraySize =
1781           dyn_cast<ConstantInt>(AI->getArraySize())) {
1782         auto &DL = Caller->getParent()->getDataLayout();
1783         Type *AllocaType = AI->getAllocatedType();
1784         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1785         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1786 
1787         // Don't add markers for zero-sized allocas.
1788         if (AllocaArraySize == 0)
1789           continue;
1790 
1791         // Check that array size doesn't saturate uint64_t and doesn't
1792         // overflow when it's multiplied by type size.
1793         if (AllocaArraySize != ~0ULL &&
1794             UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1795           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1796                                         AllocaArraySize * AllocaTypeSize);
1797         }
1798       }
1799 
1800       builder.CreateLifetimeStart(AI, AllocaSize);
1801       for (ReturnInst *RI : Returns) {
1802         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1803         // call and a return.  The return kills all local allocas.
1804         if (InlinedMustTailCalls &&
1805             RI->getParent()->getTerminatingMustTailCall())
1806           continue;
1807         if (InlinedDeoptimizeCalls &&
1808             RI->getParent()->getTerminatingDeoptimizeCall())
1809           continue;
1810         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1811       }
1812     }
1813   }
1814 
1815   // If the inlined code contained dynamic alloca instructions, wrap the inlined
1816   // code with llvm.stacksave/llvm.stackrestore intrinsics.
1817   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1818     Module *M = Caller->getParent();
1819     // Get the two intrinsics we care about.
1820     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1821     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1822 
1823     // Insert the llvm.stacksave.
1824     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1825                              .CreateCall(StackSave, {}, "savedstack");
1826 
1827     // Insert a call to llvm.stackrestore before any return instructions in the
1828     // inlined function.
1829     for (ReturnInst *RI : Returns) {
1830       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1831       // call and a return.  The return will restore the stack pointer.
1832       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1833         continue;
1834       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1835         continue;
1836       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1837     }
1838   }
1839 
1840   // If we are inlining for an invoke instruction, we must make sure to rewrite
1841   // any call instructions into invoke instructions.  This is sensitive to which
1842   // funclet pads were top-level in the inlinee, so must be done before
1843   // rewriting the "parent pad" links.
1844   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1845     BasicBlock *UnwindDest = II->getUnwindDest();
1846     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1847     if (isa<LandingPadInst>(FirstNonPHI)) {
1848       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1849     } else {
1850       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1851     }
1852   }
1853 
1854   // Update the lexical scopes of the new funclets and callsites.
1855   // Anything that had 'none' as its parent is now nested inside the callsite's
1856   // EHPad.
1857 
1858   if (CallSiteEHPad) {
1859     for (Function::iterator BB = FirstNewBlock->getIterator(),
1860                             E = Caller->end();
1861          BB != E; ++BB) {
1862       // Add bundle operands to any top-level call sites.
1863       SmallVector<OperandBundleDef, 1> OpBundles;
1864       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1865         Instruction *I = &*BBI++;
1866         CallSite CS(I);
1867         if (!CS)
1868           continue;
1869 
1870         // Skip call sites which are nounwind intrinsics.
1871         auto *CalledFn =
1872             dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1873         if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1874           continue;
1875 
1876         // Skip call sites which already have a "funclet" bundle.
1877         if (CS.getOperandBundle(LLVMContext::OB_funclet))
1878           continue;
1879 
1880         CS.getOperandBundlesAsDefs(OpBundles);
1881         OpBundles.emplace_back("funclet", CallSiteEHPad);
1882 
1883         Instruction *NewInst;
1884         if (CS.isCall())
1885           NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
1886         else
1887           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
1888         NewInst->takeName(I);
1889         I->replaceAllUsesWith(NewInst);
1890         I->eraseFromParent();
1891 
1892         OpBundles.clear();
1893       }
1894 
1895       // It is problematic if the inlinee has a cleanupret which unwinds to
1896       // caller and we inline it into a call site which doesn't unwind but into
1897       // an EH pad that does.  Such an edge must be dynamically unreachable.
1898       // As such, we replace the cleanupret with unreachable.
1899       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
1900         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
1901           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
1902 
1903       Instruction *I = BB->getFirstNonPHI();
1904       if (!I->isEHPad())
1905         continue;
1906 
1907       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
1908         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
1909           CatchSwitch->setParentPad(CallSiteEHPad);
1910       } else {
1911         auto *FPI = cast<FuncletPadInst>(I);
1912         if (isa<ConstantTokenNone>(FPI->getParentPad()))
1913           FPI->setParentPad(CallSiteEHPad);
1914       }
1915     }
1916   }
1917 
1918   if (InlinedDeoptimizeCalls) {
1919     // We need to at least remove the deoptimizing returns from the Return set,
1920     // so that the control flow from those returns does not get merged into the
1921     // caller (but terminate it instead).  If the caller's return type does not
1922     // match the callee's return type, we also need to change the return type of
1923     // the intrinsic.
1924     if (Caller->getReturnType() == TheCall->getType()) {
1925       auto NewEnd = remove_if(Returns, [](ReturnInst *RI) {
1926         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
1927       });
1928       Returns.erase(NewEnd, Returns.end());
1929     } else {
1930       SmallVector<ReturnInst *, 8> NormalReturns;
1931       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
1932           Caller->getParent(), Intrinsic::experimental_deoptimize,
1933           {Caller->getReturnType()});
1934 
1935       for (ReturnInst *RI : Returns) {
1936         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
1937         if (!DeoptCall) {
1938           NormalReturns.push_back(RI);
1939           continue;
1940         }
1941 
1942         // The calling convention on the deoptimize call itself may be bogus,
1943         // since the code we're inlining may have undefined behavior (and may
1944         // never actually execute at runtime); but all
1945         // @llvm.experimental.deoptimize declarations have to have the same
1946         // calling convention in a well-formed module.
1947         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
1948         NewDeoptIntrinsic->setCallingConv(CallingConv);
1949         auto *CurBB = RI->getParent();
1950         RI->eraseFromParent();
1951 
1952         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
1953                                          DeoptCall->arg_end());
1954 
1955         SmallVector<OperandBundleDef, 1> OpBundles;
1956         DeoptCall->getOperandBundlesAsDefs(OpBundles);
1957         DeoptCall->eraseFromParent();
1958         assert(!OpBundles.empty() &&
1959                "Expected at least the deopt operand bundle");
1960 
1961         IRBuilder<> Builder(CurBB);
1962         CallInst *NewDeoptCall =
1963             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
1964         NewDeoptCall->setCallingConv(CallingConv);
1965         if (NewDeoptCall->getType()->isVoidTy())
1966           Builder.CreateRetVoid();
1967         else
1968           Builder.CreateRet(NewDeoptCall);
1969       }
1970 
1971       // Leave behind the normal returns so we can merge control flow.
1972       std::swap(Returns, NormalReturns);
1973     }
1974   }
1975 
1976   // Handle any inlined musttail call sites.  In order for a new call site to be
1977   // musttail, the source of the clone and the inlined call site must have been
1978   // musttail.  Therefore it's safe to return without merging control into the
1979   // phi below.
1980   if (InlinedMustTailCalls) {
1981     // Check if we need to bitcast the result of any musttail calls.
1982     Type *NewRetTy = Caller->getReturnType();
1983     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1984 
1985     // Handle the returns preceded by musttail calls separately.
1986     SmallVector<ReturnInst *, 8> NormalReturns;
1987     for (ReturnInst *RI : Returns) {
1988       CallInst *ReturnedMustTail =
1989           RI->getParent()->getTerminatingMustTailCall();
1990       if (!ReturnedMustTail) {
1991         NormalReturns.push_back(RI);
1992         continue;
1993       }
1994       if (!NeedBitCast)
1995         continue;
1996 
1997       // Delete the old return and any preceding bitcast.
1998       BasicBlock *CurBB = RI->getParent();
1999       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2000       RI->eraseFromParent();
2001       if (OldCast)
2002         OldCast->eraseFromParent();
2003 
2004       // Insert a new bitcast and return with the right type.
2005       IRBuilder<> Builder(CurBB);
2006       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2007     }
2008 
2009     // Leave behind the normal returns so we can merge control flow.
2010     std::swap(Returns, NormalReturns);
2011   }
2012 
2013   // If we cloned in _exactly one_ basic block, and if that block ends in a
2014   // return instruction, we splice the body of the inlined callee directly into
2015   // the calling basic block.
2016   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2017     // Move all of the instructions right before the call.
2018     OrigBB->getInstList().splice(TheCall->getIterator(),
2019                                  FirstNewBlock->getInstList(),
2020                                  FirstNewBlock->begin(), FirstNewBlock->end());
2021     // Remove the cloned basic block.
2022     Caller->getBasicBlockList().pop_back();
2023 
2024     // If the call site was an invoke instruction, add a branch to the normal
2025     // destination.
2026     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2027       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2028       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2029     }
2030 
2031     // If the return instruction returned a value, replace uses of the call with
2032     // uses of the returned value.
2033     if (!TheCall->use_empty()) {
2034       ReturnInst *R = Returns[0];
2035       if (TheCall == R->getReturnValue())
2036         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2037       else
2038         TheCall->replaceAllUsesWith(R->getReturnValue());
2039     }
2040     // Since we are now done with the Call/Invoke, we can delete it.
2041     TheCall->eraseFromParent();
2042 
2043     // Since we are now done with the return instruction, delete it also.
2044     Returns[0]->eraseFromParent();
2045 
2046     // We are now done with the inlining.
2047     return true;
2048   }
2049 
2050   // Otherwise, we have the normal case, of more than one block to inline or
2051   // multiple return sites.
2052 
2053   // We want to clone the entire callee function into the hole between the
2054   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2055   // this is an invoke instruction or a call instruction.
2056   BasicBlock *AfterCallBB;
2057   BranchInst *CreatedBranchToNormalDest = nullptr;
2058   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2059 
2060     // Add an unconditional branch to make this look like the CallInst case...
2061     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2062 
2063     // Split the basic block.  This guarantees that no PHI nodes will have to be
2064     // updated due to new incoming edges, and make the invoke case more
2065     // symmetric to the call case.
2066     AfterCallBB =
2067         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2068                                 CalledFunc->getName() + ".exit");
2069 
2070   } else {  // It's a call
2071     // If this is a call instruction, we need to split the basic block that
2072     // the call lives in.
2073     //
2074     AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2075                                           CalledFunc->getName() + ".exit");
2076   }
2077 
2078   // Change the branch that used to go to AfterCallBB to branch to the first
2079   // basic block of the inlined function.
2080   //
2081   TerminatorInst *Br = OrigBB->getTerminator();
2082   assert(Br && Br->getOpcode() == Instruction::Br &&
2083          "splitBasicBlock broken!");
2084   Br->setOperand(0, &*FirstNewBlock);
2085 
2086   // Now that the function is correct, make it a little bit nicer.  In
2087   // particular, move the basic blocks inserted from the end of the function
2088   // into the space made by splitting the source basic block.
2089   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2090                                      Caller->getBasicBlockList(), FirstNewBlock,
2091                                      Caller->end());
2092 
2093   // Handle all of the return instructions that we just cloned in, and eliminate
2094   // any users of the original call/invoke instruction.
2095   Type *RTy = CalledFunc->getReturnType();
2096 
2097   PHINode *PHI = nullptr;
2098   if (Returns.size() > 1) {
2099     // The PHI node should go at the front of the new basic block to merge all
2100     // possible incoming values.
2101     if (!TheCall->use_empty()) {
2102       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2103                             &AfterCallBB->front());
2104       // Anything that used the result of the function call should now use the
2105       // PHI node as their operand.
2106       TheCall->replaceAllUsesWith(PHI);
2107     }
2108 
2109     // Loop over all of the return instructions adding entries to the PHI node
2110     // as appropriate.
2111     if (PHI) {
2112       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2113         ReturnInst *RI = Returns[i];
2114         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2115                "Ret value not consistent in function!");
2116         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2117       }
2118     }
2119 
2120     // Add a branch to the merge points and remove return instructions.
2121     DebugLoc Loc;
2122     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2123       ReturnInst *RI = Returns[i];
2124       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2125       Loc = RI->getDebugLoc();
2126       BI->setDebugLoc(Loc);
2127       RI->eraseFromParent();
2128     }
2129     // We need to set the debug location to *somewhere* inside the
2130     // inlined function. The line number may be nonsensical, but the
2131     // instruction will at least be associated with the right
2132     // function.
2133     if (CreatedBranchToNormalDest)
2134       CreatedBranchToNormalDest->setDebugLoc(Loc);
2135   } else if (!Returns.empty()) {
2136     // Otherwise, if there is exactly one return value, just replace anything
2137     // using the return value of the call with the computed value.
2138     if (!TheCall->use_empty()) {
2139       if (TheCall == Returns[0]->getReturnValue())
2140         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2141       else
2142         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2143     }
2144 
2145     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2146     BasicBlock *ReturnBB = Returns[0]->getParent();
2147     ReturnBB->replaceAllUsesWith(AfterCallBB);
2148 
2149     // Splice the code from the return block into the block that it will return
2150     // to, which contains the code that was after the call.
2151     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2152                                       ReturnBB->getInstList());
2153 
2154     if (CreatedBranchToNormalDest)
2155       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2156 
2157     // Delete the return instruction now and empty ReturnBB now.
2158     Returns[0]->eraseFromParent();
2159     ReturnBB->eraseFromParent();
2160   } else if (!TheCall->use_empty()) {
2161     // No returns, but something is using the return value of the call.  Just
2162     // nuke the result.
2163     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2164   }
2165 
2166   // Since we are now done with the Call/Invoke, we can delete it.
2167   TheCall->eraseFromParent();
2168 
2169   // If we inlined any musttail calls and the original return is now
2170   // unreachable, delete it.  It can only contain a bitcast and ret.
2171   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2172     AfterCallBB->eraseFromParent();
2173 
2174   // We should always be able to fold the entry block of the function into the
2175   // single predecessor of the block...
2176   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2177   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2178 
2179   // Splice the code entry block into calling block, right before the
2180   // unconditional branch.
2181   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2182   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2183 
2184   // Remove the unconditional branch.
2185   OrigBB->getInstList().erase(Br);
2186 
2187   // Now we can remove the CalleeEntry block, which is now empty.
2188   Caller->getBasicBlockList().erase(CalleeEntry);
2189 
2190   // If we inserted a phi node, check to see if it has a single value (e.g. all
2191   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2192   // block other optimizations.
2193   if (PHI) {
2194     AssumptionCache *AC =
2195         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2196     auto &DL = Caller->getParent()->getDataLayout();
2197     if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) {
2198       PHI->replaceAllUsesWith(V);
2199       PHI->eraseFromParent();
2200     }
2201   }
2202 
2203   return true;
2204 }
2205