1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76 
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79   cl::Hidden,
80   cl::desc("Convert noalias attributes to metadata during inlining."));
81 
82 static cl::opt<bool>
83 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
84   cl::init(true), cl::Hidden,
85   cl::desc("Convert align attributes to assumptions during inlining."));
86 
87 llvm::InlineResult llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
88                                         AAResults *CalleeAAR,
89                                         bool InsertLifetime) {
90   return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
91 }
92 
93 llvm::InlineResult llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
94                                         AAResults *CalleeAAR,
95                                         bool InsertLifetime) {
96   return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
97 }
98 
99 namespace {
100 
101   /// A class for recording information about inlining a landing pad.
102   class LandingPadInliningInfo {
103     /// Destination of the invoke's unwind.
104     BasicBlock *OuterResumeDest;
105 
106     /// Destination for the callee's resume.
107     BasicBlock *InnerResumeDest = nullptr;
108 
109     /// LandingPadInst associated with the invoke.
110     LandingPadInst *CallerLPad = nullptr;
111 
112     /// PHI for EH values from landingpad insts.
113     PHINode *InnerEHValuesPHI = nullptr;
114 
115     SmallVector<Value*, 8> UnwindDestPHIValues;
116 
117   public:
118     LandingPadInliningInfo(InvokeInst *II)
119         : OuterResumeDest(II->getUnwindDest()) {
120       // If there are PHI nodes in the unwind destination block, we need to keep
121       // track of which values came into them from the invoke before removing
122       // the edge from this block.
123       BasicBlock *InvokeBB = II->getParent();
124       BasicBlock::iterator I = OuterResumeDest->begin();
125       for (; isa<PHINode>(I); ++I) {
126         // Save the value to use for this edge.
127         PHINode *PHI = cast<PHINode>(I);
128         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
129       }
130 
131       CallerLPad = cast<LandingPadInst>(I);
132     }
133 
134     /// The outer unwind destination is the target of
135     /// unwind edges introduced for calls within the inlined function.
136     BasicBlock *getOuterResumeDest() const {
137       return OuterResumeDest;
138     }
139 
140     BasicBlock *getInnerResumeDest();
141 
142     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
143 
144     /// Forward the 'resume' instruction to the caller's landing pad block.
145     /// When the landing pad block has only one predecessor, this is
146     /// a simple branch. When there is more than one predecessor, we need to
147     /// split the landing pad block after the landingpad instruction and jump
148     /// to there.
149     void forwardResume(ResumeInst *RI,
150                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
151 
152     /// Add incoming-PHI values to the unwind destination block for the given
153     /// basic block, using the values for the original invoke's source block.
154     void addIncomingPHIValuesFor(BasicBlock *BB) const {
155       addIncomingPHIValuesForInto(BB, OuterResumeDest);
156     }
157 
158     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
159       BasicBlock::iterator I = dest->begin();
160       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
161         PHINode *phi = cast<PHINode>(I);
162         phi->addIncoming(UnwindDestPHIValues[i], src);
163       }
164     }
165   };
166 
167 } // end anonymous namespace
168 
169 /// Get or create a target for the branch from ResumeInsts.
170 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
171   if (InnerResumeDest) return InnerResumeDest;
172 
173   // Split the landing pad.
174   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
175   InnerResumeDest =
176     OuterResumeDest->splitBasicBlock(SplitPoint,
177                                      OuterResumeDest->getName() + ".body");
178 
179   // The number of incoming edges we expect to the inner landing pad.
180   const unsigned PHICapacity = 2;
181 
182   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
183   Instruction *InsertPoint = &InnerResumeDest->front();
184   BasicBlock::iterator I = OuterResumeDest->begin();
185   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
186     PHINode *OuterPHI = cast<PHINode>(I);
187     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
188                                         OuterPHI->getName() + ".lpad-body",
189                                         InsertPoint);
190     OuterPHI->replaceAllUsesWith(InnerPHI);
191     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
192   }
193 
194   // Create a PHI for the exception values.
195   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
196                                      "eh.lpad-body", InsertPoint);
197   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
198   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
199 
200   // All done.
201   return InnerResumeDest;
202 }
203 
204 /// Forward the 'resume' instruction to the caller's landing pad block.
205 /// When the landing pad block has only one predecessor, this is a simple
206 /// branch. When there is more than one predecessor, we need to split the
207 /// landing pad block after the landingpad instruction and jump to there.
208 void LandingPadInliningInfo::forwardResume(
209     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
210   BasicBlock *Dest = getInnerResumeDest();
211   BasicBlock *Src = RI->getParent();
212 
213   BranchInst::Create(Dest, Src);
214 
215   // Update the PHIs in the destination. They were inserted in an order which
216   // makes this work.
217   addIncomingPHIValuesForInto(Src, Dest);
218 
219   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
220   RI->eraseFromParent();
221 }
222 
223 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
224 static Value *getParentPad(Value *EHPad) {
225   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
226     return FPI->getParentPad();
227   return cast<CatchSwitchInst>(EHPad)->getParentPad();
228 }
229 
230 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
231 
232 /// Helper for getUnwindDestToken that does the descendant-ward part of
233 /// the search.
234 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
235                                        UnwindDestMemoTy &MemoMap) {
236   SmallVector<Instruction *, 8> Worklist(1, EHPad);
237 
238   while (!Worklist.empty()) {
239     Instruction *CurrentPad = Worklist.pop_back_val();
240     // We only put pads on the worklist that aren't in the MemoMap.  When
241     // we find an unwind dest for a pad we may update its ancestors, but
242     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
243     // so they should never get updated while queued on the worklist.
244     assert(!MemoMap.count(CurrentPad));
245     Value *UnwindDestToken = nullptr;
246     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
247       if (CatchSwitch->hasUnwindDest()) {
248         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
249       } else {
250         // Catchswitch doesn't have a 'nounwind' variant, and one might be
251         // annotated as "unwinds to caller" when really it's nounwind (see
252         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
253         // parent's unwind dest from this.  We can check its catchpads'
254         // descendants, since they might include a cleanuppad with an
255         // "unwinds to caller" cleanupret, which can be trusted.
256         for (auto HI = CatchSwitch->handler_begin(),
257                   HE = CatchSwitch->handler_end();
258              HI != HE && !UnwindDestToken; ++HI) {
259           BasicBlock *HandlerBlock = *HI;
260           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
261           for (User *Child : CatchPad->users()) {
262             // Intentionally ignore invokes here -- since the catchswitch is
263             // marked "unwind to caller", it would be a verifier error if it
264             // contained an invoke which unwinds out of it, so any invoke we'd
265             // encounter must unwind to some child of the catch.
266             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
267               continue;
268 
269             Instruction *ChildPad = cast<Instruction>(Child);
270             auto Memo = MemoMap.find(ChildPad);
271             if (Memo == MemoMap.end()) {
272               // Haven't figured out this child pad yet; queue it.
273               Worklist.push_back(ChildPad);
274               continue;
275             }
276             // We've already checked this child, but might have found that
277             // it offers no proof either way.
278             Value *ChildUnwindDestToken = Memo->second;
279             if (!ChildUnwindDestToken)
280               continue;
281             // We already know the child's unwind dest, which can either
282             // be ConstantTokenNone to indicate unwind to caller, or can
283             // be another child of the catchpad.  Only the former indicates
284             // the unwind dest of the catchswitch.
285             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
286               UnwindDestToken = ChildUnwindDestToken;
287               break;
288             }
289             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
290           }
291         }
292       }
293     } else {
294       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
295       for (User *U : CleanupPad->users()) {
296         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
297           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
298             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
299           else
300             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
301           break;
302         }
303         Value *ChildUnwindDestToken;
304         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
305           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
306         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
307           Instruction *ChildPad = cast<Instruction>(U);
308           auto Memo = MemoMap.find(ChildPad);
309           if (Memo == MemoMap.end()) {
310             // Haven't resolved this child yet; queue it and keep searching.
311             Worklist.push_back(ChildPad);
312             continue;
313           }
314           // We've checked this child, but still need to ignore it if it
315           // had no proof either way.
316           ChildUnwindDestToken = Memo->second;
317           if (!ChildUnwindDestToken)
318             continue;
319         } else {
320           // Not a relevant user of the cleanuppad
321           continue;
322         }
323         // In a well-formed program, the child/invoke must either unwind to
324         // an(other) child of the cleanup, or exit the cleanup.  In the
325         // first case, continue searching.
326         if (isa<Instruction>(ChildUnwindDestToken) &&
327             getParentPad(ChildUnwindDestToken) == CleanupPad)
328           continue;
329         UnwindDestToken = ChildUnwindDestToken;
330         break;
331       }
332     }
333     // If we haven't found an unwind dest for CurrentPad, we may have queued its
334     // children, so move on to the next in the worklist.
335     if (!UnwindDestToken)
336       continue;
337 
338     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
339     // any ancestors of CurrentPad up to but not including UnwindDestToken's
340     // parent pad.  Record this in the memo map, and check to see if the
341     // original EHPad being queried is one of the ones exited.
342     Value *UnwindParent;
343     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
344       UnwindParent = getParentPad(UnwindPad);
345     else
346       UnwindParent = nullptr;
347     bool ExitedOriginalPad = false;
348     for (Instruction *ExitedPad = CurrentPad;
349          ExitedPad && ExitedPad != UnwindParent;
350          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
351       // Skip over catchpads since they just follow their catchswitches.
352       if (isa<CatchPadInst>(ExitedPad))
353         continue;
354       MemoMap[ExitedPad] = UnwindDestToken;
355       ExitedOriginalPad |= (ExitedPad == EHPad);
356     }
357 
358     if (ExitedOriginalPad)
359       return UnwindDestToken;
360 
361     // Continue the search.
362   }
363 
364   // No definitive information is contained within this funclet.
365   return nullptr;
366 }
367 
368 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
369 /// return that pad instruction.  If it unwinds to caller, return
370 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
371 /// return nullptr.
372 ///
373 /// This routine gets invoked for calls in funclets in inlinees when inlining
374 /// an invoke.  Since many funclets don't have calls inside them, it's queried
375 /// on-demand rather than building a map of pads to unwind dests up front.
376 /// Determining a funclet's unwind dest may require recursively searching its
377 /// descendants, and also ancestors and cousins if the descendants don't provide
378 /// an answer.  Since most funclets will have their unwind dest immediately
379 /// available as the unwind dest of a catchswitch or cleanupret, this routine
380 /// searches top-down from the given pad and then up. To avoid worst-case
381 /// quadratic run-time given that approach, it uses a memo map to avoid
382 /// re-processing funclet trees.  The callers that rewrite the IR as they go
383 /// take advantage of this, for correctness, by checking/forcing rewritten
384 /// pads' entries to match the original callee view.
385 static Value *getUnwindDestToken(Instruction *EHPad,
386                                  UnwindDestMemoTy &MemoMap) {
387   // Catchpads unwind to the same place as their catchswitch;
388   // redirct any queries on catchpads so the code below can
389   // deal with just catchswitches and cleanuppads.
390   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
391     EHPad = CPI->getCatchSwitch();
392 
393   // Check if we've already determined the unwind dest for this pad.
394   auto Memo = MemoMap.find(EHPad);
395   if (Memo != MemoMap.end())
396     return Memo->second;
397 
398   // Search EHPad and, if necessary, its descendants.
399   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
400   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
401   if (UnwindDestToken)
402     return UnwindDestToken;
403 
404   // No information is available for this EHPad from itself or any of its
405   // descendants.  An unwind all the way out to a pad in the caller would
406   // need also to agree with the unwind dest of the parent funclet, so
407   // search up the chain to try to find a funclet with information.  Put
408   // null entries in the memo map to avoid re-processing as we go up.
409   MemoMap[EHPad] = nullptr;
410 #ifndef NDEBUG
411   SmallPtrSet<Instruction *, 4> TempMemos;
412   TempMemos.insert(EHPad);
413 #endif
414   Instruction *LastUselessPad = EHPad;
415   Value *AncestorToken;
416   for (AncestorToken = getParentPad(EHPad);
417        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
418        AncestorToken = getParentPad(AncestorToken)) {
419     // Skip over catchpads since they just follow their catchswitches.
420     if (isa<CatchPadInst>(AncestorPad))
421       continue;
422     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
423     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
424     // call to getUnwindDestToken, that would mean that AncestorPad had no
425     // information in itself, its descendants, or its ancestors.  If that
426     // were the case, then we should also have recorded the lack of information
427     // for the descendant that we're coming from.  So assert that we don't
428     // find a null entry in the MemoMap for AncestorPad.
429     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
430     auto AncestorMemo = MemoMap.find(AncestorPad);
431     if (AncestorMemo == MemoMap.end()) {
432       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
433     } else {
434       UnwindDestToken = AncestorMemo->second;
435     }
436     if (UnwindDestToken)
437       break;
438     LastUselessPad = AncestorPad;
439     MemoMap[LastUselessPad] = nullptr;
440 #ifndef NDEBUG
441     TempMemos.insert(LastUselessPad);
442 #endif
443   }
444 
445   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
446   // returned nullptr (and likewise for EHPad and any of its ancestors up to
447   // LastUselessPad), so LastUselessPad has no information from below.  Since
448   // getUnwindDestTokenHelper must investigate all downward paths through
449   // no-information nodes to prove that a node has no information like this,
450   // and since any time it finds information it records it in the MemoMap for
451   // not just the immediately-containing funclet but also any ancestors also
452   // exited, it must be the case that, walking downward from LastUselessPad,
453   // visiting just those nodes which have not been mapped to an unwind dest
454   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
455   // they are just used to keep getUnwindDestTokenHelper from repeating work),
456   // any node visited must have been exhaustively searched with no information
457   // for it found.
458   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
459   while (!Worklist.empty()) {
460     Instruction *UselessPad = Worklist.pop_back_val();
461     auto Memo = MemoMap.find(UselessPad);
462     if (Memo != MemoMap.end() && Memo->second) {
463       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
464       // that it is a funclet that does have information about unwinding to
465       // a particular destination; its parent was a useless pad.
466       // Since its parent has no information, the unwind edge must not escape
467       // the parent, and must target a sibling of this pad.  This local unwind
468       // gives us no information about EHPad.  Leave it and the subtree rooted
469       // at it alone.
470       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
471       continue;
472     }
473     // We know we don't have information for UselesPad.  If it has an entry in
474     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
475     // added on this invocation of getUnwindDestToken; if a previous invocation
476     // recorded nullptr, it would have had to prove that the ancestors of
477     // UselessPad, which include LastUselessPad, had no information, and that
478     // in turn would have required proving that the descendants of
479     // LastUselesPad, which include EHPad, have no information about
480     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
481     // the MemoMap on that invocation, which isn't the case if we got here.
482     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
483     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
484     // information that we'd be contradicting by making a map entry for it
485     // (which is something that getUnwindDestTokenHelper must have proved for
486     // us to get here).  Just assert on is direct users here; the checks in
487     // this downward walk at its descendants will verify that they don't have
488     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
489     // unwind edges or unwind to a sibling).
490     MemoMap[UselessPad] = UnwindDestToken;
491     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
492       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
493       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
494         auto *CatchPad = HandlerBlock->getFirstNonPHI();
495         for (User *U : CatchPad->users()) {
496           assert(
497               (!isa<InvokeInst>(U) ||
498                (getParentPad(
499                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
500                 CatchPad)) &&
501               "Expected useless pad");
502           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
503             Worklist.push_back(cast<Instruction>(U));
504         }
505       }
506     } else {
507       assert(isa<CleanupPadInst>(UselessPad));
508       for (User *U : UselessPad->users()) {
509         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
510         assert((!isa<InvokeInst>(U) ||
511                 (getParentPad(
512                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
513                  UselessPad)) &&
514                "Expected useless pad");
515         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
516           Worklist.push_back(cast<Instruction>(U));
517       }
518     }
519   }
520 
521   return UnwindDestToken;
522 }
523 
524 /// When we inline a basic block into an invoke,
525 /// we have to turn all of the calls that can throw into invokes.
526 /// This function analyze BB to see if there are any calls, and if so,
527 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
528 /// nodes in that block with the values specified in InvokeDestPHIValues.
529 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
530     BasicBlock *BB, BasicBlock *UnwindEdge,
531     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
532   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
533     Instruction *I = &*BBI++;
534 
535     // We only need to check for function calls: inlined invoke
536     // instructions require no special handling.
537     CallInst *CI = dyn_cast<CallInst>(I);
538 
539     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
540       continue;
541 
542     // We do not need to (and in fact, cannot) convert possibly throwing calls
543     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
544     // invokes.  The caller's "segment" of the deoptimization continuation
545     // attached to the newly inlined @llvm.experimental_deoptimize
546     // (resp. @llvm.experimental.guard) call should contain the exception
547     // handling logic, if any.
548     if (auto *F = CI->getCalledFunction())
549       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
550           F->getIntrinsicID() == Intrinsic::experimental_guard)
551         continue;
552 
553     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
554       // This call is nested inside a funclet.  If that funclet has an unwind
555       // destination within the inlinee, then unwinding out of this call would
556       // be UB.  Rewriting this call to an invoke which targets the inlined
557       // invoke's unwind dest would give the call's parent funclet multiple
558       // unwind destinations, which is something that subsequent EH table
559       // generation can't handle and that the veirifer rejects.  So when we
560       // see such a call, leave it as a call.
561       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
562       Value *UnwindDestToken =
563           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
564       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
565         continue;
566 #ifndef NDEBUG
567       Instruction *MemoKey;
568       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
569         MemoKey = CatchPad->getCatchSwitch();
570       else
571         MemoKey = FuncletPad;
572       assert(FuncletUnwindMap->count(MemoKey) &&
573              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
574              "must get memoized to avoid confusing later searches");
575 #endif // NDEBUG
576     }
577 
578     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
579     return BB;
580   }
581   return nullptr;
582 }
583 
584 /// If we inlined an invoke site, we need to convert calls
585 /// in the body of the inlined function into invokes.
586 ///
587 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
588 /// block of the inlined code (the last block is the end of the function),
589 /// and InlineCodeInfo is information about the code that got inlined.
590 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
591                                     ClonedCodeInfo &InlinedCodeInfo) {
592   BasicBlock *InvokeDest = II->getUnwindDest();
593 
594   Function *Caller = FirstNewBlock->getParent();
595 
596   // The inlined code is currently at the end of the function, scan from the
597   // start of the inlined code to its end, checking for stuff we need to
598   // rewrite.
599   LandingPadInliningInfo Invoke(II);
600 
601   // Get all of the inlined landing pad instructions.
602   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
603   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
604        I != E; ++I)
605     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
606       InlinedLPads.insert(II->getLandingPadInst());
607 
608   // Append the clauses from the outer landing pad instruction into the inlined
609   // landing pad instructions.
610   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
611   for (LandingPadInst *InlinedLPad : InlinedLPads) {
612     unsigned OuterNum = OuterLPad->getNumClauses();
613     InlinedLPad->reserveClauses(OuterNum);
614     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
615       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
616     if (OuterLPad->isCleanup())
617       InlinedLPad->setCleanup(true);
618   }
619 
620   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
621        BB != E; ++BB) {
622     if (InlinedCodeInfo.ContainsCalls)
623       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
624               &*BB, Invoke.getOuterResumeDest()))
625         // Update any PHI nodes in the exceptional block to indicate that there
626         // is now a new entry in them.
627         Invoke.addIncomingPHIValuesFor(NewBB);
628 
629     // Forward any resumes that are remaining here.
630     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
631       Invoke.forwardResume(RI, InlinedLPads);
632   }
633 
634   // Now that everything is happy, we have one final detail.  The PHI nodes in
635   // the exception destination block still have entries due to the original
636   // invoke instruction. Eliminate these entries (which might even delete the
637   // PHI node) now.
638   InvokeDest->removePredecessor(II->getParent());
639 }
640 
641 /// If we inlined an invoke site, we need to convert calls
642 /// in the body of the inlined function into invokes.
643 ///
644 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
645 /// block of the inlined code (the last block is the end of the function),
646 /// and InlineCodeInfo is information about the code that got inlined.
647 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
648                                ClonedCodeInfo &InlinedCodeInfo) {
649   BasicBlock *UnwindDest = II->getUnwindDest();
650   Function *Caller = FirstNewBlock->getParent();
651 
652   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
653 
654   // If there are PHI nodes in the unwind destination block, we need to keep
655   // track of which values came into them from the invoke before removing the
656   // edge from this block.
657   SmallVector<Value *, 8> UnwindDestPHIValues;
658   BasicBlock *InvokeBB = II->getParent();
659   for (Instruction &I : *UnwindDest) {
660     // Save the value to use for this edge.
661     PHINode *PHI = dyn_cast<PHINode>(&I);
662     if (!PHI)
663       break;
664     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
665   }
666 
667   // Add incoming-PHI values to the unwind destination block for the given basic
668   // block, using the values for the original invoke's source block.
669   auto UpdatePHINodes = [&](BasicBlock *Src) {
670     BasicBlock::iterator I = UnwindDest->begin();
671     for (Value *V : UnwindDestPHIValues) {
672       PHINode *PHI = cast<PHINode>(I);
673       PHI->addIncoming(V, Src);
674       ++I;
675     }
676   };
677 
678   // This connects all the instructions which 'unwind to caller' to the invoke
679   // destination.
680   UnwindDestMemoTy FuncletUnwindMap;
681   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
682        BB != E; ++BB) {
683     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
684       if (CRI->unwindsToCaller()) {
685         auto *CleanupPad = CRI->getCleanupPad();
686         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
687         CRI->eraseFromParent();
688         UpdatePHINodes(&*BB);
689         // Finding a cleanupret with an unwind destination would confuse
690         // subsequent calls to getUnwindDestToken, so map the cleanuppad
691         // to short-circuit any such calls and recognize this as an "unwind
692         // to caller" cleanup.
693         assert(!FuncletUnwindMap.count(CleanupPad) ||
694                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
695         FuncletUnwindMap[CleanupPad] =
696             ConstantTokenNone::get(Caller->getContext());
697       }
698     }
699 
700     Instruction *I = BB->getFirstNonPHI();
701     if (!I->isEHPad())
702       continue;
703 
704     Instruction *Replacement = nullptr;
705     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
706       if (CatchSwitch->unwindsToCaller()) {
707         Value *UnwindDestToken;
708         if (auto *ParentPad =
709                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
710           // This catchswitch is nested inside another funclet.  If that
711           // funclet has an unwind destination within the inlinee, then
712           // unwinding out of this catchswitch would be UB.  Rewriting this
713           // catchswitch to unwind to the inlined invoke's unwind dest would
714           // give the parent funclet multiple unwind destinations, which is
715           // something that subsequent EH table generation can't handle and
716           // that the veirifer rejects.  So when we see such a call, leave it
717           // as "unwind to caller".
718           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
719           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
720             continue;
721         } else {
722           // This catchswitch has no parent to inherit constraints from, and
723           // none of its descendants can have an unwind edge that exits it and
724           // targets another funclet in the inlinee.  It may or may not have a
725           // descendant that definitively has an unwind to caller.  In either
726           // case, we'll have to assume that any unwinds out of it may need to
727           // be routed to the caller, so treat it as though it has a definitive
728           // unwind to caller.
729           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
730         }
731         auto *NewCatchSwitch = CatchSwitchInst::Create(
732             CatchSwitch->getParentPad(), UnwindDest,
733             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
734             CatchSwitch);
735         for (BasicBlock *PadBB : CatchSwitch->handlers())
736           NewCatchSwitch->addHandler(PadBB);
737         // Propagate info for the old catchswitch over to the new one in
738         // the unwind map.  This also serves to short-circuit any subsequent
739         // checks for the unwind dest of this catchswitch, which would get
740         // confused if they found the outer handler in the callee.
741         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
742         Replacement = NewCatchSwitch;
743       }
744     } else if (!isa<FuncletPadInst>(I)) {
745       llvm_unreachable("unexpected EHPad!");
746     }
747 
748     if (Replacement) {
749       Replacement->takeName(I);
750       I->replaceAllUsesWith(Replacement);
751       I->eraseFromParent();
752       UpdatePHINodes(&*BB);
753     }
754   }
755 
756   if (InlinedCodeInfo.ContainsCalls)
757     for (Function::iterator BB = FirstNewBlock->getIterator(),
758                             E = Caller->end();
759          BB != E; ++BB)
760       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
761               &*BB, UnwindDest, &FuncletUnwindMap))
762         // Update any PHI nodes in the exceptional block to indicate that there
763         // is now a new entry in them.
764         UpdatePHINodes(NewBB);
765 
766   // Now that everything is happy, we have one final detail.  The PHI nodes in
767   // the exception destination block still have entries due to the original
768   // invoke instruction. Eliminate these entries (which might even delete the
769   // PHI node) now.
770   UnwindDest->removePredecessor(InvokeBB);
771 }
772 
773 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
774 /// llvm.access.group metadata, that metadata should be propagated to all
775 /// memory-accessing cloned instructions.
776 static void PropagateParallelLoopAccessMetadata(CallSite CS,
777                                                 ValueToValueMapTy &VMap) {
778   MDNode *M =
779     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
780   MDNode *CallAccessGroup =
781       CS.getInstruction()->getMetadata(LLVMContext::MD_access_group);
782   if (!M && !CallAccessGroup)
783     return;
784 
785   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
786        VMI != VMIE; ++VMI) {
787     if (!VMI->second)
788       continue;
789 
790     Instruction *NI = dyn_cast<Instruction>(VMI->second);
791     if (!NI)
792       continue;
793 
794     if (M) {
795       if (MDNode *PM =
796               NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
797         M = MDNode::concatenate(PM, M);
798       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
799       } else if (NI->mayReadOrWriteMemory()) {
800         NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
801       }
802     }
803 
804     if (NI->mayReadOrWriteMemory()) {
805       MDNode *UnitedAccGroups = uniteAccessGroups(
806           NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
807       NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
808     }
809   }
810 }
811 
812 /// When inlining a function that contains noalias scope metadata,
813 /// this metadata needs to be cloned so that the inlined blocks
814 /// have different "unique scopes" at every call site. Were this not done, then
815 /// aliasing scopes from a function inlined into a caller multiple times could
816 /// not be differentiated (and this would lead to miscompiles because the
817 /// non-aliasing property communicated by the metadata could have
818 /// call-site-specific control dependencies).
819 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
820   const Function *CalledFunc = CS.getCalledFunction();
821   SetVector<const MDNode *> MD;
822 
823   // Note: We could only clone the metadata if it is already used in the
824   // caller. I'm omitting that check here because it might confuse
825   // inter-procedural alias analysis passes. We can revisit this if it becomes
826   // an efficiency or overhead problem.
827 
828   for (const BasicBlock &I : *CalledFunc)
829     for (const Instruction &J : I) {
830       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
831         MD.insert(M);
832       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
833         MD.insert(M);
834     }
835 
836   if (MD.empty())
837     return;
838 
839   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
840   // the set.
841   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
842   while (!Queue.empty()) {
843     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
844     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
845       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
846         if (MD.insert(M1))
847           Queue.push_back(M1);
848   }
849 
850   // Now we have a complete set of all metadata in the chains used to specify
851   // the noalias scopes and the lists of those scopes.
852   SmallVector<TempMDTuple, 16> DummyNodes;
853   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
854   for (const MDNode *I : MD) {
855     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
856     MDMap[I].reset(DummyNodes.back().get());
857   }
858 
859   // Create new metadata nodes to replace the dummy nodes, replacing old
860   // metadata references with either a dummy node or an already-created new
861   // node.
862   for (const MDNode *I : MD) {
863     SmallVector<Metadata *, 4> NewOps;
864     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
865       const Metadata *V = I->getOperand(i);
866       if (const MDNode *M = dyn_cast<MDNode>(V))
867         NewOps.push_back(MDMap[M]);
868       else
869         NewOps.push_back(const_cast<Metadata *>(V));
870     }
871 
872     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
873     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
874     assert(TempM->isTemporary() && "Expected temporary node");
875 
876     TempM->replaceAllUsesWith(NewM);
877   }
878 
879   // Now replace the metadata in the new inlined instructions with the
880   // repacements from the map.
881   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
882        VMI != VMIE; ++VMI) {
883     if (!VMI->second)
884       continue;
885 
886     Instruction *NI = dyn_cast<Instruction>(VMI->second);
887     if (!NI)
888       continue;
889 
890     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
891       MDNode *NewMD = MDMap[M];
892       // If the call site also had alias scope metadata (a list of scopes to
893       // which instructions inside it might belong), propagate those scopes to
894       // the inlined instructions.
895       if (MDNode *CSM =
896               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
897         NewMD = MDNode::concatenate(NewMD, CSM);
898       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
899     } else if (NI->mayReadOrWriteMemory()) {
900       if (MDNode *M =
901               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
902         NI->setMetadata(LLVMContext::MD_alias_scope, M);
903     }
904 
905     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
906       MDNode *NewMD = MDMap[M];
907       // If the call site also had noalias metadata (a list of scopes with
908       // which instructions inside it don't alias), propagate those scopes to
909       // the inlined instructions.
910       if (MDNode *CSM =
911               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
912         NewMD = MDNode::concatenate(NewMD, CSM);
913       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
914     } else if (NI->mayReadOrWriteMemory()) {
915       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
916         NI->setMetadata(LLVMContext::MD_noalias, M);
917     }
918   }
919 }
920 
921 /// If the inlined function has noalias arguments,
922 /// then add new alias scopes for each noalias argument, tag the mapped noalias
923 /// parameters with noalias metadata specifying the new scope, and tag all
924 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
925 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
926                                   const DataLayout &DL, AAResults *CalleeAAR) {
927   if (!EnableNoAliasConversion)
928     return;
929 
930   const Function *CalledFunc = CS.getCalledFunction();
931   SmallVector<const Argument *, 4> NoAliasArgs;
932 
933   for (const Argument &Arg : CalledFunc->args())
934     if (Arg.hasNoAliasAttr() && !Arg.use_empty())
935       NoAliasArgs.push_back(&Arg);
936 
937   if (NoAliasArgs.empty())
938     return;
939 
940   // To do a good job, if a noalias variable is captured, we need to know if
941   // the capture point dominates the particular use we're considering.
942   DominatorTree DT;
943   DT.recalculate(const_cast<Function&>(*CalledFunc));
944 
945   // noalias indicates that pointer values based on the argument do not alias
946   // pointer values which are not based on it. So we add a new "scope" for each
947   // noalias function argument. Accesses using pointers based on that argument
948   // become part of that alias scope, accesses using pointers not based on that
949   // argument are tagged as noalias with that scope.
950 
951   DenseMap<const Argument *, MDNode *> NewScopes;
952   MDBuilder MDB(CalledFunc->getContext());
953 
954   // Create a new scope domain for this function.
955   MDNode *NewDomain =
956     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
957   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
958     const Argument *A = NoAliasArgs[i];
959 
960     std::string Name = CalledFunc->getName();
961     if (A->hasName()) {
962       Name += ": %";
963       Name += A->getName();
964     } else {
965       Name += ": argument ";
966       Name += utostr(i);
967     }
968 
969     // Note: We always create a new anonymous root here. This is true regardless
970     // of the linkage of the callee because the aliasing "scope" is not just a
971     // property of the callee, but also all control dependencies in the caller.
972     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
973     NewScopes.insert(std::make_pair(A, NewScope));
974   }
975 
976   // Iterate over all new instructions in the map; for all memory-access
977   // instructions, add the alias scope metadata.
978   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
979        VMI != VMIE; ++VMI) {
980     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
981       if (!VMI->second)
982         continue;
983 
984       Instruction *NI = dyn_cast<Instruction>(VMI->second);
985       if (!NI)
986         continue;
987 
988       bool IsArgMemOnlyCall = false, IsFuncCall = false;
989       SmallVector<const Value *, 2> PtrArgs;
990 
991       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
992         PtrArgs.push_back(LI->getPointerOperand());
993       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
994         PtrArgs.push_back(SI->getPointerOperand());
995       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
996         PtrArgs.push_back(VAAI->getPointerOperand());
997       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
998         PtrArgs.push_back(CXI->getPointerOperand());
999       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1000         PtrArgs.push_back(RMWI->getPointerOperand());
1001       else if (const auto *Call = dyn_cast<CallBase>(I)) {
1002         // If we know that the call does not access memory, then we'll still
1003         // know that about the inlined clone of this call site, and we don't
1004         // need to add metadata.
1005         if (Call->doesNotAccessMemory())
1006           continue;
1007 
1008         IsFuncCall = true;
1009         if (CalleeAAR) {
1010           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1011           if (MRB == FMRB_OnlyAccessesArgumentPointees ||
1012               MRB == FMRB_OnlyReadsArgumentPointees)
1013             IsArgMemOnlyCall = true;
1014         }
1015 
1016         for (Value *Arg : Call->args()) {
1017           // We need to check the underlying objects of all arguments, not just
1018           // the pointer arguments, because we might be passing pointers as
1019           // integers, etc.
1020           // However, if we know that the call only accesses pointer arguments,
1021           // then we only need to check the pointer arguments.
1022           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1023             continue;
1024 
1025           PtrArgs.push_back(Arg);
1026         }
1027       }
1028 
1029       // If we found no pointers, then this instruction is not suitable for
1030       // pairing with an instruction to receive aliasing metadata.
1031       // However, if this is a call, this we might just alias with none of the
1032       // noalias arguments.
1033       if (PtrArgs.empty() && !IsFuncCall)
1034         continue;
1035 
1036       // It is possible that there is only one underlying object, but you
1037       // need to go through several PHIs to see it, and thus could be
1038       // repeated in the Objects list.
1039       SmallPtrSet<const Value *, 4> ObjSet;
1040       SmallVector<Metadata *, 4> Scopes, NoAliases;
1041 
1042       SmallSetVector<const Argument *, 4> NAPtrArgs;
1043       for (const Value *V : PtrArgs) {
1044         SmallVector<Value *, 4> Objects;
1045         GetUnderlyingObjects(const_cast<Value*>(V),
1046                              Objects, DL, /* LI = */ nullptr);
1047 
1048         for (Value *O : Objects)
1049           ObjSet.insert(O);
1050       }
1051 
1052       // Figure out if we're derived from anything that is not a noalias
1053       // argument.
1054       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1055       for (const Value *V : ObjSet) {
1056         // Is this value a constant that cannot be derived from any pointer
1057         // value (we need to exclude constant expressions, for example, that
1058         // are formed from arithmetic on global symbols).
1059         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1060                              isa<ConstantPointerNull>(V) ||
1061                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1062         if (IsNonPtrConst)
1063           continue;
1064 
1065         // If this is anything other than a noalias argument, then we cannot
1066         // completely describe the aliasing properties using alias.scope
1067         // metadata (and, thus, won't add any).
1068         if (const Argument *A = dyn_cast<Argument>(V)) {
1069           if (!A->hasNoAliasAttr())
1070             UsesAliasingPtr = true;
1071         } else {
1072           UsesAliasingPtr = true;
1073         }
1074 
1075         // If this is not some identified function-local object (which cannot
1076         // directly alias a noalias argument), or some other argument (which,
1077         // by definition, also cannot alias a noalias argument), then we could
1078         // alias a noalias argument that has been captured).
1079         if (!isa<Argument>(V) &&
1080             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1081           CanDeriveViaCapture = true;
1082       }
1083 
1084       // A function call can always get captured noalias pointers (via other
1085       // parameters, globals, etc.).
1086       if (IsFuncCall && !IsArgMemOnlyCall)
1087         CanDeriveViaCapture = true;
1088 
1089       // First, we want to figure out all of the sets with which we definitely
1090       // don't alias. Iterate over all noalias set, and add those for which:
1091       //   1. The noalias argument is not in the set of objects from which we
1092       //      definitely derive.
1093       //   2. The noalias argument has not yet been captured.
1094       // An arbitrary function that might load pointers could see captured
1095       // noalias arguments via other noalias arguments or globals, and so we
1096       // must always check for prior capture.
1097       for (const Argument *A : NoAliasArgs) {
1098         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1099                                  // It might be tempting to skip the
1100                                  // PointerMayBeCapturedBefore check if
1101                                  // A->hasNoCaptureAttr() is true, but this is
1102                                  // incorrect because nocapture only guarantees
1103                                  // that no copies outlive the function, not
1104                                  // that the value cannot be locally captured.
1105                                  !PointerMayBeCapturedBefore(A,
1106                                    /* ReturnCaptures */ false,
1107                                    /* StoreCaptures */ false, I, &DT)))
1108           NoAliases.push_back(NewScopes[A]);
1109       }
1110 
1111       if (!NoAliases.empty())
1112         NI->setMetadata(LLVMContext::MD_noalias,
1113                         MDNode::concatenate(
1114                             NI->getMetadata(LLVMContext::MD_noalias),
1115                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1116 
1117       // Next, we want to figure out all of the sets to which we might belong.
1118       // We might belong to a set if the noalias argument is in the set of
1119       // underlying objects. If there is some non-noalias argument in our list
1120       // of underlying objects, then we cannot add a scope because the fact
1121       // that some access does not alias with any set of our noalias arguments
1122       // cannot itself guarantee that it does not alias with this access
1123       // (because there is some pointer of unknown origin involved and the
1124       // other access might also depend on this pointer). We also cannot add
1125       // scopes to arbitrary functions unless we know they don't access any
1126       // non-parameter pointer-values.
1127       bool CanAddScopes = !UsesAliasingPtr;
1128       if (CanAddScopes && IsFuncCall)
1129         CanAddScopes = IsArgMemOnlyCall;
1130 
1131       if (CanAddScopes)
1132         for (const Argument *A : NoAliasArgs) {
1133           if (ObjSet.count(A))
1134             Scopes.push_back(NewScopes[A]);
1135         }
1136 
1137       if (!Scopes.empty())
1138         NI->setMetadata(
1139             LLVMContext::MD_alias_scope,
1140             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1141                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1142     }
1143   }
1144 }
1145 
1146 /// If the inlined function has non-byval align arguments, then
1147 /// add @llvm.assume-based alignment assumptions to preserve this information.
1148 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1149   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1150     return;
1151 
1152   AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1153   auto &DL = CS.getCaller()->getParent()->getDataLayout();
1154 
1155   // To avoid inserting redundant assumptions, we should check for assumptions
1156   // already in the caller. To do this, we might need a DT of the caller.
1157   DominatorTree DT;
1158   bool DTCalculated = false;
1159 
1160   Function *CalledFunc = CS.getCalledFunction();
1161   for (Argument &Arg : CalledFunc->args()) {
1162     unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1163     if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1164       if (!DTCalculated) {
1165         DT.recalculate(*CS.getCaller());
1166         DTCalculated = true;
1167       }
1168 
1169       // If we can already prove the asserted alignment in the context of the
1170       // caller, then don't bother inserting the assumption.
1171       Value *ArgVal = CS.getArgument(Arg.getArgNo());
1172       if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1173         continue;
1174 
1175       CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1176                               .CreateAlignmentAssumption(DL, ArgVal, Align);
1177       AC->registerAssumption(NewAsmp);
1178     }
1179   }
1180 }
1181 
1182 /// Once we have cloned code over from a callee into the caller,
1183 /// update the specified callgraph to reflect the changes we made.
1184 /// Note that it's possible that not all code was copied over, so only
1185 /// some edges of the callgraph may remain.
1186 static void UpdateCallGraphAfterInlining(CallSite CS,
1187                                          Function::iterator FirstNewBlock,
1188                                          ValueToValueMapTy &VMap,
1189                                          InlineFunctionInfo &IFI) {
1190   CallGraph &CG = *IFI.CG;
1191   const Function *Caller = CS.getCaller();
1192   const Function *Callee = CS.getCalledFunction();
1193   CallGraphNode *CalleeNode = CG[Callee];
1194   CallGraphNode *CallerNode = CG[Caller];
1195 
1196   // Since we inlined some uninlined call sites in the callee into the caller,
1197   // add edges from the caller to all of the callees of the callee.
1198   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1199 
1200   // Consider the case where CalleeNode == CallerNode.
1201   CallGraphNode::CalledFunctionsVector CallCache;
1202   if (CalleeNode == CallerNode) {
1203     CallCache.assign(I, E);
1204     I = CallCache.begin();
1205     E = CallCache.end();
1206   }
1207 
1208   for (; I != E; ++I) {
1209     const Value *OrigCall = I->first;
1210 
1211     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1212     // Only copy the edge if the call was inlined!
1213     if (VMI == VMap.end() || VMI->second == nullptr)
1214       continue;
1215 
1216     // If the call was inlined, but then constant folded, there is no edge to
1217     // add.  Check for this case.
1218     Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1219     if (!NewCall)
1220       continue;
1221 
1222     // We do not treat intrinsic calls like real function calls because we
1223     // expect them to become inline code; do not add an edge for an intrinsic.
1224     CallSite CS = CallSite(NewCall);
1225     if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1226       continue;
1227 
1228     // Remember that this call site got inlined for the client of
1229     // InlineFunction.
1230     IFI.InlinedCalls.push_back(NewCall);
1231 
1232     // It's possible that inlining the callsite will cause it to go from an
1233     // indirect to a direct call by resolving a function pointer.  If this
1234     // happens, set the callee of the new call site to a more precise
1235     // destination.  This can also happen if the call graph node of the caller
1236     // was just unnecessarily imprecise.
1237     if (!I->second->getFunction())
1238       if (Function *F = CallSite(NewCall).getCalledFunction()) {
1239         // Indirect call site resolved to direct call.
1240         CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1241 
1242         continue;
1243       }
1244 
1245     CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1246   }
1247 
1248   // Update the call graph by deleting the edge from Callee to Caller.  We must
1249   // do this after the loop above in case Caller and Callee are the same.
1250   CallerNode->removeCallEdgeFor(CS);
1251 }
1252 
1253 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1254                                     BasicBlock *InsertBlock,
1255                                     InlineFunctionInfo &IFI) {
1256   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1257   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1258 
1259   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1260 
1261   // Always generate a memcpy of alignment 1 here because we don't know
1262   // the alignment of the src pointer.  Other optimizations can infer
1263   // better alignment.
1264   Builder.CreateMemCpy(Dst, /*DstAlign*/1, Src, /*SrcAlign*/1, Size);
1265 }
1266 
1267 /// When inlining a call site that has a byval argument,
1268 /// we have to make the implicit memcpy explicit by adding it.
1269 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1270                                   const Function *CalledFunc,
1271                                   InlineFunctionInfo &IFI,
1272                                   unsigned ByValAlignment) {
1273   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1274   Type *AggTy = ArgTy->getElementType();
1275 
1276   Function *Caller = TheCall->getFunction();
1277   const DataLayout &DL = Caller->getParent()->getDataLayout();
1278 
1279   // If the called function is readonly, then it could not mutate the caller's
1280   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1281   // temporary.
1282   if (CalledFunc->onlyReadsMemory()) {
1283     // If the byval argument has a specified alignment that is greater than the
1284     // passed in pointer, then we either have to round up the input pointer or
1285     // give up on this transformation.
1286     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1287       return Arg;
1288 
1289     AssumptionCache *AC =
1290         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1291 
1292     // If the pointer is already known to be sufficiently aligned, or if we can
1293     // round it up to a larger alignment, then we don't need a temporary.
1294     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1295         ByValAlignment)
1296       return Arg;
1297 
1298     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1299     // for code quality, but rarely happens and is required for correctness.
1300   }
1301 
1302   // Create the alloca.  If we have DataLayout, use nice alignment.
1303   unsigned Align = DL.getPrefTypeAlignment(AggTy);
1304 
1305   // If the byval had an alignment specified, we *must* use at least that
1306   // alignment, as it is required by the byval argument (and uses of the
1307   // pointer inside the callee).
1308   Align = std::max(Align, ByValAlignment);
1309 
1310   Value *NewAlloca = new AllocaInst(AggTy, DL.getAllocaAddrSpace(),
1311                                     nullptr, Align, Arg->getName(),
1312                                     &*Caller->begin()->begin());
1313   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1314 
1315   // Uses of the argument in the function should use our new alloca
1316   // instead.
1317   return NewAlloca;
1318 }
1319 
1320 // Check whether this Value is used by a lifetime intrinsic.
1321 static bool isUsedByLifetimeMarker(Value *V) {
1322   for (User *U : V->users())
1323     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1324       if (II->isLifetimeStartOrEnd())
1325         return true;
1326   return false;
1327 }
1328 
1329 // Check whether the given alloca already has
1330 // lifetime.start or lifetime.end intrinsics.
1331 static bool hasLifetimeMarkers(AllocaInst *AI) {
1332   Type *Ty = AI->getType();
1333   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1334                                        Ty->getPointerAddressSpace());
1335   if (Ty == Int8PtrTy)
1336     return isUsedByLifetimeMarker(AI);
1337 
1338   // Do a scan to find all the casts to i8*.
1339   for (User *U : AI->users()) {
1340     if (U->getType() != Int8PtrTy) continue;
1341     if (U->stripPointerCasts() != AI) continue;
1342     if (isUsedByLifetimeMarker(U))
1343       return true;
1344   }
1345   return false;
1346 }
1347 
1348 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1349 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1350 /// cannot be static.
1351 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1352   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1353 }
1354 
1355 /// Update inlined instructions' line numbers to
1356 /// to encode location where these instructions are inlined.
1357 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1358                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1359   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1360   if (!TheCallDL)
1361     return;
1362 
1363   auto &Ctx = Fn->getContext();
1364   DILocation *InlinedAtNode = TheCallDL;
1365 
1366   // Create a unique call site, not to be confused with any other call from the
1367   // same location.
1368   InlinedAtNode = DILocation::getDistinct(
1369       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1370       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1371 
1372   // Cache the inlined-at nodes as they're built so they are reused, without
1373   // this every instruction's inlined-at chain would become distinct from each
1374   // other.
1375   DenseMap<const MDNode *, MDNode *> IANodes;
1376 
1377   for (; FI != Fn->end(); ++FI) {
1378     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1379          BI != BE; ++BI) {
1380       if (DebugLoc DL = BI->getDebugLoc()) {
1381         auto IA = DebugLoc::appendInlinedAt(DL, InlinedAtNode, BI->getContext(),
1382                                             IANodes);
1383         auto IDL = DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), IA);
1384         BI->setDebugLoc(IDL);
1385         continue;
1386       }
1387 
1388       if (CalleeHasDebugInfo)
1389         continue;
1390 
1391       // If the inlined instruction has no line number, make it look as if it
1392       // originates from the call location. This is important for
1393       // ((__always_inline__, __nodebug__)) functions which must use caller
1394       // location for all instructions in their function body.
1395 
1396       // Don't update static allocas, as they may get moved later.
1397       if (auto *AI = dyn_cast<AllocaInst>(BI))
1398         if (allocaWouldBeStaticInEntry(AI))
1399           continue;
1400 
1401       BI->setDebugLoc(TheCallDL);
1402     }
1403   }
1404 }
1405 
1406 /// Update the block frequencies of the caller after a callee has been inlined.
1407 ///
1408 /// Each block cloned into the caller has its block frequency scaled by the
1409 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1410 /// callee's entry block gets the same frequency as the callsite block and the
1411 /// relative frequencies of all cloned blocks remain the same after cloning.
1412 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1413                             const ValueToValueMapTy &VMap,
1414                             BlockFrequencyInfo *CallerBFI,
1415                             BlockFrequencyInfo *CalleeBFI,
1416                             const BasicBlock &CalleeEntryBlock) {
1417   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1418   for (auto const &Entry : VMap) {
1419     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1420       continue;
1421     auto *OrigBB = cast<BasicBlock>(Entry.first);
1422     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1423     uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1424     if (!ClonedBBs.insert(ClonedBB).second) {
1425       // Multiple blocks in the callee might get mapped to one cloned block in
1426       // the caller since we prune the callee as we clone it. When that happens,
1427       // we want to use the maximum among the original blocks' frequencies.
1428       uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1429       if (NewFreq > Freq)
1430         Freq = NewFreq;
1431     }
1432     CallerBFI->setBlockFreq(ClonedBB, Freq);
1433   }
1434   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1435   CallerBFI->setBlockFreqAndScale(
1436       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1437       ClonedBBs);
1438 }
1439 
1440 /// Update the branch metadata for cloned call instructions.
1441 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1442                               const ProfileCount &CalleeEntryCount,
1443                               const Instruction *TheCall,
1444                               ProfileSummaryInfo *PSI,
1445                               BlockFrequencyInfo *CallerBFI) {
1446   if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1447       CalleeEntryCount.getCount() < 1)
1448     return;
1449   auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1450   int64_t CallCount =
1451       std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1452                CalleeEntryCount.getCount());
1453   updateProfileCallee(Callee, -CallCount, &VMap);
1454 }
1455 
1456 void llvm::updateProfileCallee(
1457     Function *Callee, int64_t entryDelta,
1458     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1459   auto CalleeCount = Callee->getEntryCount();
1460   if (!CalleeCount.hasValue())
1461     return;
1462 
1463   uint64_t priorEntryCount = CalleeCount.getCount();
1464   uint64_t newEntryCount = priorEntryCount;
1465 
1466   // Since CallSiteCount is an estimate, it could exceed the original callee
1467   // count and has to be set to 0 so guard against underflow.
1468   if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1469     newEntryCount = 0;
1470   else
1471     newEntryCount = priorEntryCount + entryDelta;
1472 
1473   Callee->setEntryCount(newEntryCount);
1474 
1475   // During inlining ?
1476   if (VMap) {
1477     uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1478     for (auto const &Entry : *VMap)
1479       if (isa<CallInst>(Entry.first))
1480         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1481           CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1482   }
1483   for (BasicBlock &BB : *Callee)
1484     // No need to update the callsite if it is pruned during inlining.
1485     if (!VMap || VMap->count(&BB))
1486       for (Instruction &I : BB)
1487         if (CallInst *CI = dyn_cast<CallInst>(&I))
1488           CI->updateProfWeight(newEntryCount, priorEntryCount);
1489 }
1490 
1491 /// This function inlines the called function into the basic block of the
1492 /// caller. This returns false if it is not possible to inline this call.
1493 /// The program is still in a well defined state if this occurs though.
1494 ///
1495 /// Note that this only does one level of inlining.  For example, if the
1496 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1497 /// exists in the instruction stream.  Similarly this will inline a recursive
1498 /// function by one level.
1499 llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1500                                         AAResults *CalleeAAR,
1501                                         bool InsertLifetime,
1502                                         Function *ForwardVarArgsTo) {
1503   Instruction *TheCall = CS.getInstruction();
1504   assert(TheCall->getParent() && TheCall->getFunction()
1505          && "Instruction not in function!");
1506 
1507   // FIXME: we don't inline callbr yet.
1508   if (isa<CallBrInst>(TheCall))
1509     return false;
1510 
1511   // If IFI has any state in it, zap it before we fill it in.
1512   IFI.reset();
1513 
1514   Function *CalledFunc = CS.getCalledFunction();
1515   if (!CalledFunc ||               // Can't inline external function or indirect
1516       CalledFunc->isDeclaration()) // call!
1517     return "external or indirect";
1518 
1519   // The inliner does not know how to inline through calls with operand bundles
1520   // in general ...
1521   if (CS.hasOperandBundles()) {
1522     for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1523       uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1524       // ... but it knows how to inline through "deopt" operand bundles ...
1525       if (Tag == LLVMContext::OB_deopt)
1526         continue;
1527       // ... and "funclet" operand bundles.
1528       if (Tag == LLVMContext::OB_funclet)
1529         continue;
1530 
1531       return "unsupported operand bundle";
1532     }
1533   }
1534 
1535   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1536   // calls that we inline.
1537   bool MarkNoUnwind = CS.doesNotThrow();
1538 
1539   BasicBlock *OrigBB = TheCall->getParent();
1540   Function *Caller = OrigBB->getParent();
1541 
1542   // GC poses two hazards to inlining, which only occur when the callee has GC:
1543   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1544   //     caller.
1545   //  2. If the caller has a differing GC, it is invalid to inline.
1546   if (CalledFunc->hasGC()) {
1547     if (!Caller->hasGC())
1548       Caller->setGC(CalledFunc->getGC());
1549     else if (CalledFunc->getGC() != Caller->getGC())
1550       return "incompatible GC";
1551   }
1552 
1553   // Get the personality function from the callee if it contains a landing pad.
1554   Constant *CalledPersonality =
1555       CalledFunc->hasPersonalityFn()
1556           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1557           : nullptr;
1558 
1559   // Find the personality function used by the landing pads of the caller. If it
1560   // exists, then check to see that it matches the personality function used in
1561   // the callee.
1562   Constant *CallerPersonality =
1563       Caller->hasPersonalityFn()
1564           ? Caller->getPersonalityFn()->stripPointerCasts()
1565           : nullptr;
1566   if (CalledPersonality) {
1567     if (!CallerPersonality)
1568       Caller->setPersonalityFn(CalledPersonality);
1569     // If the personality functions match, then we can perform the
1570     // inlining. Otherwise, we can't inline.
1571     // TODO: This isn't 100% true. Some personality functions are proper
1572     //       supersets of others and can be used in place of the other.
1573     else if (CalledPersonality != CallerPersonality)
1574       return "incompatible personality";
1575   }
1576 
1577   // We need to figure out which funclet the callsite was in so that we may
1578   // properly nest the callee.
1579   Instruction *CallSiteEHPad = nullptr;
1580   if (CallerPersonality) {
1581     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1582     if (isScopedEHPersonality(Personality)) {
1583       Optional<OperandBundleUse> ParentFunclet =
1584           CS.getOperandBundle(LLVMContext::OB_funclet);
1585       if (ParentFunclet)
1586         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1587 
1588       // OK, the inlining site is legal.  What about the target function?
1589 
1590       if (CallSiteEHPad) {
1591         if (Personality == EHPersonality::MSVC_CXX) {
1592           // The MSVC personality cannot tolerate catches getting inlined into
1593           // cleanup funclets.
1594           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1595             // Ok, the call site is within a cleanuppad.  Let's check the callee
1596             // for catchpads.
1597             for (const BasicBlock &CalledBB : *CalledFunc) {
1598               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1599                 return "catch in cleanup funclet";
1600             }
1601           }
1602         } else if (isAsynchronousEHPersonality(Personality)) {
1603           // SEH is even less tolerant, there may not be any sort of exceptional
1604           // funclet in the callee.
1605           for (const BasicBlock &CalledBB : *CalledFunc) {
1606             if (CalledBB.isEHPad())
1607               return "SEH in cleanup funclet";
1608           }
1609         }
1610       }
1611     }
1612   }
1613 
1614   // Determine if we are dealing with a call in an EHPad which does not unwind
1615   // to caller.
1616   bool EHPadForCallUnwindsLocally = false;
1617   if (CallSiteEHPad && CS.isCall()) {
1618     UnwindDestMemoTy FuncletUnwindMap;
1619     Value *CallSiteUnwindDestToken =
1620         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1621 
1622     EHPadForCallUnwindsLocally =
1623         CallSiteUnwindDestToken &&
1624         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1625   }
1626 
1627   // Get an iterator to the last basic block in the function, which will have
1628   // the new function inlined after it.
1629   Function::iterator LastBlock = --Caller->end();
1630 
1631   // Make sure to capture all of the return instructions from the cloned
1632   // function.
1633   SmallVector<ReturnInst*, 8> Returns;
1634   ClonedCodeInfo InlinedFunctionInfo;
1635   Function::iterator FirstNewBlock;
1636 
1637   { // Scope to destroy VMap after cloning.
1638     ValueToValueMapTy VMap;
1639     // Keep a list of pair (dst, src) to emit byval initializations.
1640     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1641 
1642     auto &DL = Caller->getParent()->getDataLayout();
1643 
1644     // Calculate the vector of arguments to pass into the function cloner, which
1645     // matches up the formal to the actual argument values.
1646     CallSite::arg_iterator AI = CS.arg_begin();
1647     unsigned ArgNo = 0;
1648     for (Function::arg_iterator I = CalledFunc->arg_begin(),
1649          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1650       Value *ActualArg = *AI;
1651 
1652       // When byval arguments actually inlined, we need to make the copy implied
1653       // by them explicit.  However, we don't do this if the callee is readonly
1654       // or readnone, because the copy would be unneeded: the callee doesn't
1655       // modify the struct.
1656       if (CS.isByValArgument(ArgNo)) {
1657         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1658                                         CalledFunc->getParamAlignment(ArgNo));
1659         if (ActualArg != *AI)
1660           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1661       }
1662 
1663       VMap[&*I] = ActualArg;
1664     }
1665 
1666     // Add alignment assumptions if necessary. We do this before the inlined
1667     // instructions are actually cloned into the caller so that we can easily
1668     // check what will be known at the start of the inlined code.
1669     AddAlignmentAssumptions(CS, IFI);
1670 
1671     // We want the inliner to prune the code as it copies.  We would LOVE to
1672     // have no dead or constant instructions leftover after inlining occurs
1673     // (which can happen, e.g., because an argument was constant), but we'll be
1674     // happy with whatever the cloner can do.
1675     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1676                               /*ModuleLevelChanges=*/false, Returns, ".i",
1677                               &InlinedFunctionInfo, TheCall);
1678     // Remember the first block that is newly cloned over.
1679     FirstNewBlock = LastBlock; ++FirstNewBlock;
1680 
1681     if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1682       // Update the BFI of blocks cloned into the caller.
1683       updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1684                       CalledFunc->front());
1685 
1686     updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1687                       IFI.PSI, IFI.CallerBFI);
1688 
1689     // Inject byval arguments initialization.
1690     for (std::pair<Value*, Value*> &Init : ByValInit)
1691       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1692                               &*FirstNewBlock, IFI);
1693 
1694     Optional<OperandBundleUse> ParentDeopt =
1695         CS.getOperandBundle(LLVMContext::OB_deopt);
1696     if (ParentDeopt) {
1697       SmallVector<OperandBundleDef, 2> OpDefs;
1698 
1699       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1700         Instruction *I = dyn_cast_or_null<Instruction>(VH);
1701         if (!I) continue;  // instruction was DCE'd or RAUW'ed to undef
1702 
1703         OpDefs.clear();
1704 
1705         CallSite ICS(I);
1706         OpDefs.reserve(ICS.getNumOperandBundles());
1707 
1708         for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1709           auto ChildOB = ICS.getOperandBundleAt(i);
1710           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1711             // If the inlined call has other operand bundles, let them be
1712             OpDefs.emplace_back(ChildOB);
1713             continue;
1714           }
1715 
1716           // It may be useful to separate this logic (of handling operand
1717           // bundles) out to a separate "policy" component if this gets crowded.
1718           // Prepend the parent's deoptimization continuation to the newly
1719           // inlined call's deoptimization continuation.
1720           std::vector<Value *> MergedDeoptArgs;
1721           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1722                                   ChildOB.Inputs.size());
1723 
1724           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1725                                  ParentDeopt->Inputs.begin(),
1726                                  ParentDeopt->Inputs.end());
1727           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1728                                  ChildOB.Inputs.end());
1729 
1730           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1731         }
1732 
1733         Instruction *NewI = nullptr;
1734         if (isa<CallInst>(I))
1735           NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1736         else if (isa<CallBrInst>(I))
1737           NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I);
1738         else
1739           NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1740 
1741         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1742         // this even if the call returns void.
1743         I->replaceAllUsesWith(NewI);
1744 
1745         VH = nullptr;
1746         I->eraseFromParent();
1747       }
1748     }
1749 
1750     // Update the callgraph if requested.
1751     if (IFI.CG)
1752       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1753 
1754     // For 'nodebug' functions, the associated DISubprogram is always null.
1755     // Conservatively avoid propagating the callsite debug location to
1756     // instructions inlined from a function whose DISubprogram is not null.
1757     fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1758                      CalledFunc->getSubprogram() != nullptr);
1759 
1760     // Clone existing noalias metadata if necessary.
1761     CloneAliasScopeMetadata(CS, VMap);
1762 
1763     // Add noalias metadata if necessary.
1764     AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1765 
1766     // Propagate llvm.mem.parallel_loop_access if necessary.
1767     PropagateParallelLoopAccessMetadata(CS, VMap);
1768 
1769     // Register any cloned assumptions.
1770     if (IFI.GetAssumptionCache)
1771       for (BasicBlock &NewBlock :
1772            make_range(FirstNewBlock->getIterator(), Caller->end()))
1773         for (Instruction &I : NewBlock) {
1774           if (auto *II = dyn_cast<IntrinsicInst>(&I))
1775             if (II->getIntrinsicID() == Intrinsic::assume)
1776               (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1777         }
1778   }
1779 
1780   // If there are any alloca instructions in the block that used to be the entry
1781   // block for the callee, move them to the entry block of the caller.  First
1782   // calculate which instruction they should be inserted before.  We insert the
1783   // instructions at the end of the current alloca list.
1784   {
1785     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1786     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1787          E = FirstNewBlock->end(); I != E; ) {
1788       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1789       if (!AI) continue;
1790 
1791       // If the alloca is now dead, remove it.  This often occurs due to code
1792       // specialization.
1793       if (AI->use_empty()) {
1794         AI->eraseFromParent();
1795         continue;
1796       }
1797 
1798       if (!allocaWouldBeStaticInEntry(AI))
1799         continue;
1800 
1801       // Keep track of the static allocas that we inline into the caller.
1802       IFI.StaticAllocas.push_back(AI);
1803 
1804       // Scan for the block of allocas that we can move over, and move them
1805       // all at once.
1806       while (isa<AllocaInst>(I) &&
1807              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1808         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1809         ++I;
1810       }
1811 
1812       // Transfer all of the allocas over in a block.  Using splice means
1813       // that the instructions aren't removed from the symbol table, then
1814       // reinserted.
1815       Caller->getEntryBlock().getInstList().splice(
1816           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1817     }
1818     // Move any dbg.declares describing the allocas into the entry basic block.
1819     DIBuilder DIB(*Caller->getParent());
1820     for (auto &AI : IFI.StaticAllocas)
1821       replaceDbgDeclareForAlloca(AI, AI, DIB, DIExpression::NoDeref, 0,
1822                                  DIExpression::NoDeref);
1823   }
1824 
1825   SmallVector<Value*,4> VarArgsToForward;
1826   SmallVector<AttributeSet, 4> VarArgsAttrs;
1827   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1828        i < CS.getNumArgOperands(); i++) {
1829     VarArgsToForward.push_back(CS.getArgOperand(i));
1830     VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i));
1831   }
1832 
1833   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1834   if (InlinedFunctionInfo.ContainsCalls) {
1835     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1836     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1837       CallSiteTailKind = CI->getTailCallKind();
1838 
1839     // For inlining purposes, the "notail" marker is the same as no marker.
1840     if (CallSiteTailKind == CallInst::TCK_NoTail)
1841       CallSiteTailKind = CallInst::TCK_None;
1842 
1843     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1844          ++BB) {
1845       for (auto II = BB->begin(); II != BB->end();) {
1846         Instruction &I = *II++;
1847         CallInst *CI = dyn_cast<CallInst>(&I);
1848         if (!CI)
1849           continue;
1850 
1851         // Forward varargs from inlined call site to calls to the
1852         // ForwardVarArgsTo function, if requested, and to musttail calls.
1853         if (!VarArgsToForward.empty() &&
1854             ((ForwardVarArgsTo &&
1855               CI->getCalledFunction() == ForwardVarArgsTo) ||
1856              CI->isMustTailCall())) {
1857           // Collect attributes for non-vararg parameters.
1858           AttributeList Attrs = CI->getAttributes();
1859           SmallVector<AttributeSet, 8> ArgAttrs;
1860           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1861             for (unsigned ArgNo = 0;
1862                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1863               ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1864           }
1865 
1866           // Add VarArg attributes.
1867           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
1868           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
1869                                      Attrs.getRetAttributes(), ArgAttrs);
1870           // Add VarArgs to existing parameters.
1871           SmallVector<Value *, 6> Params(CI->arg_operands());
1872           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
1873           CallInst *NewCI = CallInst::Create(
1874               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
1875           NewCI->setDebugLoc(CI->getDebugLoc());
1876           NewCI->setAttributes(Attrs);
1877           NewCI->setCallingConv(CI->getCallingConv());
1878           CI->replaceAllUsesWith(NewCI);
1879           CI->eraseFromParent();
1880           CI = NewCI;
1881         }
1882 
1883         if (Function *F = CI->getCalledFunction())
1884           InlinedDeoptimizeCalls |=
1885               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
1886 
1887         // We need to reduce the strength of any inlined tail calls.  For
1888         // musttail, we have to avoid introducing potential unbounded stack
1889         // growth.  For example, if functions 'f' and 'g' are mutually recursive
1890         // with musttail, we can inline 'g' into 'f' so long as we preserve
1891         // musttail on the cloned call to 'f'.  If either the inlined call site
1892         // or the cloned call site is *not* musttail, the program already has
1893         // one frame of stack growth, so it's safe to remove musttail.  Here is
1894         // a table of example transformations:
1895         //
1896         //    f -> musttail g -> musttail f  ==>  f -> musttail f
1897         //    f -> musttail g ->     tail f  ==>  f ->     tail f
1898         //    f ->          g -> musttail f  ==>  f ->          f
1899         //    f ->          g ->     tail f  ==>  f ->          f
1900         //
1901         // Inlined notail calls should remain notail calls.
1902         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1903         if (ChildTCK != CallInst::TCK_NoTail)
1904           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1905         CI->setTailCallKind(ChildTCK);
1906         InlinedMustTailCalls |= CI->isMustTailCall();
1907 
1908         // Calls inlined through a 'nounwind' call site should be marked
1909         // 'nounwind'.
1910         if (MarkNoUnwind)
1911           CI->setDoesNotThrow();
1912       }
1913     }
1914   }
1915 
1916   // Leave lifetime markers for the static alloca's, scoping them to the
1917   // function we just inlined.
1918   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1919     IRBuilder<> builder(&FirstNewBlock->front());
1920     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1921       AllocaInst *AI = IFI.StaticAllocas[ai];
1922       // Don't mark swifterror allocas. They can't have bitcast uses.
1923       if (AI->isSwiftError())
1924         continue;
1925 
1926       // If the alloca is already scoped to something smaller than the whole
1927       // function then there's no need to add redundant, less accurate markers.
1928       if (hasLifetimeMarkers(AI))
1929         continue;
1930 
1931       // Try to determine the size of the allocation.
1932       ConstantInt *AllocaSize = nullptr;
1933       if (ConstantInt *AIArraySize =
1934           dyn_cast<ConstantInt>(AI->getArraySize())) {
1935         auto &DL = Caller->getParent()->getDataLayout();
1936         Type *AllocaType = AI->getAllocatedType();
1937         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1938         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1939 
1940         // Don't add markers for zero-sized allocas.
1941         if (AllocaArraySize == 0)
1942           continue;
1943 
1944         // Check that array size doesn't saturate uint64_t and doesn't
1945         // overflow when it's multiplied by type size.
1946         if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
1947             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
1948                 AllocaTypeSize) {
1949           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1950                                         AllocaArraySize * AllocaTypeSize);
1951         }
1952       }
1953 
1954       builder.CreateLifetimeStart(AI, AllocaSize);
1955       for (ReturnInst *RI : Returns) {
1956         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
1957         // call and a return.  The return kills all local allocas.
1958         if (InlinedMustTailCalls &&
1959             RI->getParent()->getTerminatingMustTailCall())
1960           continue;
1961         if (InlinedDeoptimizeCalls &&
1962             RI->getParent()->getTerminatingDeoptimizeCall())
1963           continue;
1964         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1965       }
1966     }
1967   }
1968 
1969   // If the inlined code contained dynamic alloca instructions, wrap the inlined
1970   // code with llvm.stacksave/llvm.stackrestore intrinsics.
1971   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1972     Module *M = Caller->getParent();
1973     // Get the two intrinsics we care about.
1974     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1975     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1976 
1977     // Insert the llvm.stacksave.
1978     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1979                              .CreateCall(StackSave, {}, "savedstack");
1980 
1981     // Insert a call to llvm.stackrestore before any return instructions in the
1982     // inlined function.
1983     for (ReturnInst *RI : Returns) {
1984       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
1985       // call and a return.  The return will restore the stack pointer.
1986       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1987         continue;
1988       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
1989         continue;
1990       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1991     }
1992   }
1993 
1994   // If we are inlining for an invoke instruction, we must make sure to rewrite
1995   // any call instructions into invoke instructions.  This is sensitive to which
1996   // funclet pads were top-level in the inlinee, so must be done before
1997   // rewriting the "parent pad" links.
1998   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1999     BasicBlock *UnwindDest = II->getUnwindDest();
2000     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2001     if (isa<LandingPadInst>(FirstNonPHI)) {
2002       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2003     } else {
2004       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2005     }
2006   }
2007 
2008   // Update the lexical scopes of the new funclets and callsites.
2009   // Anything that had 'none' as its parent is now nested inside the callsite's
2010   // EHPad.
2011 
2012   if (CallSiteEHPad) {
2013     for (Function::iterator BB = FirstNewBlock->getIterator(),
2014                             E = Caller->end();
2015          BB != E; ++BB) {
2016       // Add bundle operands to any top-level call sites.
2017       SmallVector<OperandBundleDef, 1> OpBundles;
2018       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2019         Instruction *I = &*BBI++;
2020         CallSite CS(I);
2021         if (!CS)
2022           continue;
2023 
2024         // Skip call sites which are nounwind intrinsics.
2025         auto *CalledFn =
2026             dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2027         if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
2028           continue;
2029 
2030         // Skip call sites which already have a "funclet" bundle.
2031         if (CS.getOperandBundle(LLVMContext::OB_funclet))
2032           continue;
2033 
2034         CS.getOperandBundlesAsDefs(OpBundles);
2035         OpBundles.emplace_back("funclet", CallSiteEHPad);
2036 
2037         Instruction *NewInst;
2038         if (CS.isCall())
2039           NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2040         else if (CS.isCallBr())
2041           NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I);
2042         else
2043           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2044         NewInst->takeName(I);
2045         I->replaceAllUsesWith(NewInst);
2046         I->eraseFromParent();
2047 
2048         OpBundles.clear();
2049       }
2050 
2051       // It is problematic if the inlinee has a cleanupret which unwinds to
2052       // caller and we inline it into a call site which doesn't unwind but into
2053       // an EH pad that does.  Such an edge must be dynamically unreachable.
2054       // As such, we replace the cleanupret with unreachable.
2055       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2056         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2057           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2058 
2059       Instruction *I = BB->getFirstNonPHI();
2060       if (!I->isEHPad())
2061         continue;
2062 
2063       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2064         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2065           CatchSwitch->setParentPad(CallSiteEHPad);
2066       } else {
2067         auto *FPI = cast<FuncletPadInst>(I);
2068         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2069           FPI->setParentPad(CallSiteEHPad);
2070       }
2071     }
2072   }
2073 
2074   if (InlinedDeoptimizeCalls) {
2075     // We need to at least remove the deoptimizing returns from the Return set,
2076     // so that the control flow from those returns does not get merged into the
2077     // caller (but terminate it instead).  If the caller's return type does not
2078     // match the callee's return type, we also need to change the return type of
2079     // the intrinsic.
2080     if (Caller->getReturnType() == TheCall->getType()) {
2081       auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2082         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2083       });
2084       Returns.erase(NewEnd, Returns.end());
2085     } else {
2086       SmallVector<ReturnInst *, 8> NormalReturns;
2087       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2088           Caller->getParent(), Intrinsic::experimental_deoptimize,
2089           {Caller->getReturnType()});
2090 
2091       for (ReturnInst *RI : Returns) {
2092         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2093         if (!DeoptCall) {
2094           NormalReturns.push_back(RI);
2095           continue;
2096         }
2097 
2098         // The calling convention on the deoptimize call itself may be bogus,
2099         // since the code we're inlining may have undefined behavior (and may
2100         // never actually execute at runtime); but all
2101         // @llvm.experimental.deoptimize declarations have to have the same
2102         // calling convention in a well-formed module.
2103         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2104         NewDeoptIntrinsic->setCallingConv(CallingConv);
2105         auto *CurBB = RI->getParent();
2106         RI->eraseFromParent();
2107 
2108         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2109                                          DeoptCall->arg_end());
2110 
2111         SmallVector<OperandBundleDef, 1> OpBundles;
2112         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2113         DeoptCall->eraseFromParent();
2114         assert(!OpBundles.empty() &&
2115                "Expected at least the deopt operand bundle");
2116 
2117         IRBuilder<> Builder(CurBB);
2118         CallInst *NewDeoptCall =
2119             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2120         NewDeoptCall->setCallingConv(CallingConv);
2121         if (NewDeoptCall->getType()->isVoidTy())
2122           Builder.CreateRetVoid();
2123         else
2124           Builder.CreateRet(NewDeoptCall);
2125       }
2126 
2127       // Leave behind the normal returns so we can merge control flow.
2128       std::swap(Returns, NormalReturns);
2129     }
2130   }
2131 
2132   // Handle any inlined musttail call sites.  In order for a new call site to be
2133   // musttail, the source of the clone and the inlined call site must have been
2134   // musttail.  Therefore it's safe to return without merging control into the
2135   // phi below.
2136   if (InlinedMustTailCalls) {
2137     // Check if we need to bitcast the result of any musttail calls.
2138     Type *NewRetTy = Caller->getReturnType();
2139     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2140 
2141     // Handle the returns preceded by musttail calls separately.
2142     SmallVector<ReturnInst *, 8> NormalReturns;
2143     for (ReturnInst *RI : Returns) {
2144       CallInst *ReturnedMustTail =
2145           RI->getParent()->getTerminatingMustTailCall();
2146       if (!ReturnedMustTail) {
2147         NormalReturns.push_back(RI);
2148         continue;
2149       }
2150       if (!NeedBitCast)
2151         continue;
2152 
2153       // Delete the old return and any preceding bitcast.
2154       BasicBlock *CurBB = RI->getParent();
2155       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2156       RI->eraseFromParent();
2157       if (OldCast)
2158         OldCast->eraseFromParent();
2159 
2160       // Insert a new bitcast and return with the right type.
2161       IRBuilder<> Builder(CurBB);
2162       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2163     }
2164 
2165     // Leave behind the normal returns so we can merge control flow.
2166     std::swap(Returns, NormalReturns);
2167   }
2168 
2169   // Now that all of the transforms on the inlined code have taken place but
2170   // before we splice the inlined code into the CFG and lose track of which
2171   // blocks were actually inlined, collect the call sites. We only do this if
2172   // call graph updates weren't requested, as those provide value handle based
2173   // tracking of inlined call sites instead.
2174   if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2175     // Otherwise just collect the raw call sites that were inlined.
2176     for (BasicBlock &NewBB :
2177          make_range(FirstNewBlock->getIterator(), Caller->end()))
2178       for (Instruction &I : NewBB)
2179         if (auto CS = CallSite(&I))
2180           IFI.InlinedCallSites.push_back(CS);
2181   }
2182 
2183   // If we cloned in _exactly one_ basic block, and if that block ends in a
2184   // return instruction, we splice the body of the inlined callee directly into
2185   // the calling basic block.
2186   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2187     // Move all of the instructions right before the call.
2188     OrigBB->getInstList().splice(TheCall->getIterator(),
2189                                  FirstNewBlock->getInstList(),
2190                                  FirstNewBlock->begin(), FirstNewBlock->end());
2191     // Remove the cloned basic block.
2192     Caller->getBasicBlockList().pop_back();
2193 
2194     // If the call site was an invoke instruction, add a branch to the normal
2195     // destination.
2196     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2197       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2198       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2199     }
2200 
2201     // If the return instruction returned a value, replace uses of the call with
2202     // uses of the returned value.
2203     if (!TheCall->use_empty()) {
2204       ReturnInst *R = Returns[0];
2205       if (TheCall == R->getReturnValue())
2206         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2207       else
2208         TheCall->replaceAllUsesWith(R->getReturnValue());
2209     }
2210     // Since we are now done with the Call/Invoke, we can delete it.
2211     TheCall->eraseFromParent();
2212 
2213     // Since we are now done with the return instruction, delete it also.
2214     Returns[0]->eraseFromParent();
2215 
2216     // We are now done with the inlining.
2217     return true;
2218   }
2219 
2220   // Otherwise, we have the normal case, of more than one block to inline or
2221   // multiple return sites.
2222 
2223   // We want to clone the entire callee function into the hole between the
2224   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2225   // this is an invoke instruction or a call instruction.
2226   BasicBlock *AfterCallBB;
2227   BranchInst *CreatedBranchToNormalDest = nullptr;
2228   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2229 
2230     // Add an unconditional branch to make this look like the CallInst case...
2231     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2232 
2233     // Split the basic block.  This guarantees that no PHI nodes will have to be
2234     // updated due to new incoming edges, and make the invoke case more
2235     // symmetric to the call case.
2236     AfterCallBB =
2237         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2238                                 CalledFunc->getName() + ".exit");
2239 
2240   } else {  // It's a call
2241     // If this is a call instruction, we need to split the basic block that
2242     // the call lives in.
2243     //
2244     AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2245                                           CalledFunc->getName() + ".exit");
2246   }
2247 
2248   if (IFI.CallerBFI) {
2249     // Copy original BB's block frequency to AfterCallBB
2250     IFI.CallerBFI->setBlockFreq(
2251         AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2252   }
2253 
2254   // Change the branch that used to go to AfterCallBB to branch to the first
2255   // basic block of the inlined function.
2256   //
2257   Instruction *Br = OrigBB->getTerminator();
2258   assert(Br && Br->getOpcode() == Instruction::Br &&
2259          "splitBasicBlock broken!");
2260   Br->setOperand(0, &*FirstNewBlock);
2261 
2262   // Now that the function is correct, make it a little bit nicer.  In
2263   // particular, move the basic blocks inserted from the end of the function
2264   // into the space made by splitting the source basic block.
2265   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2266                                      Caller->getBasicBlockList(), FirstNewBlock,
2267                                      Caller->end());
2268 
2269   // Handle all of the return instructions that we just cloned in, and eliminate
2270   // any users of the original call/invoke instruction.
2271   Type *RTy = CalledFunc->getReturnType();
2272 
2273   PHINode *PHI = nullptr;
2274   if (Returns.size() > 1) {
2275     // The PHI node should go at the front of the new basic block to merge all
2276     // possible incoming values.
2277     if (!TheCall->use_empty()) {
2278       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2279                             &AfterCallBB->front());
2280       // Anything that used the result of the function call should now use the
2281       // PHI node as their operand.
2282       TheCall->replaceAllUsesWith(PHI);
2283     }
2284 
2285     // Loop over all of the return instructions adding entries to the PHI node
2286     // as appropriate.
2287     if (PHI) {
2288       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2289         ReturnInst *RI = Returns[i];
2290         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2291                "Ret value not consistent in function!");
2292         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2293       }
2294     }
2295 
2296     // Add a branch to the merge points and remove return instructions.
2297     DebugLoc Loc;
2298     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2299       ReturnInst *RI = Returns[i];
2300       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2301       Loc = RI->getDebugLoc();
2302       BI->setDebugLoc(Loc);
2303       RI->eraseFromParent();
2304     }
2305     // We need to set the debug location to *somewhere* inside the
2306     // inlined function. The line number may be nonsensical, but the
2307     // instruction will at least be associated with the right
2308     // function.
2309     if (CreatedBranchToNormalDest)
2310       CreatedBranchToNormalDest->setDebugLoc(Loc);
2311   } else if (!Returns.empty()) {
2312     // Otherwise, if there is exactly one return value, just replace anything
2313     // using the return value of the call with the computed value.
2314     if (!TheCall->use_empty()) {
2315       if (TheCall == Returns[0]->getReturnValue())
2316         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2317       else
2318         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2319     }
2320 
2321     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2322     BasicBlock *ReturnBB = Returns[0]->getParent();
2323     ReturnBB->replaceAllUsesWith(AfterCallBB);
2324 
2325     // Splice the code from the return block into the block that it will return
2326     // to, which contains the code that was after the call.
2327     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2328                                       ReturnBB->getInstList());
2329 
2330     if (CreatedBranchToNormalDest)
2331       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2332 
2333     // Delete the return instruction now and empty ReturnBB now.
2334     Returns[0]->eraseFromParent();
2335     ReturnBB->eraseFromParent();
2336   } else if (!TheCall->use_empty()) {
2337     // No returns, but something is using the return value of the call.  Just
2338     // nuke the result.
2339     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2340   }
2341 
2342   // Since we are now done with the Call/Invoke, we can delete it.
2343   TheCall->eraseFromParent();
2344 
2345   // If we inlined any musttail calls and the original return is now
2346   // unreachable, delete it.  It can only contain a bitcast and ret.
2347   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2348     AfterCallBB->eraseFromParent();
2349 
2350   // We should always be able to fold the entry block of the function into the
2351   // single predecessor of the block...
2352   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2353   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2354 
2355   // Splice the code entry block into calling block, right before the
2356   // unconditional branch.
2357   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2358   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2359 
2360   // Remove the unconditional branch.
2361   OrigBB->getInstList().erase(Br);
2362 
2363   // Now we can remove the CalleeEntry block, which is now empty.
2364   Caller->getBasicBlockList().erase(CalleeEntry);
2365 
2366   // If we inserted a phi node, check to see if it has a single value (e.g. all
2367   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2368   // block other optimizations.
2369   if (PHI) {
2370     AssumptionCache *AC =
2371         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2372     auto &DL = Caller->getParent()->getDataLayout();
2373     if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2374       PHI->replaceAllUsesWith(V);
2375       PHI->eraseFromParent();
2376     }
2377   }
2378 
2379   return true;
2380 }
2381