1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
64 #include "llvm/Transforms/Utils/Cloning.h"
65 #include "llvm/Transforms/Utils/ValueMapper.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <iterator>
70 #include <limits>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 using namespace llvm;
76 using ProfileCount = Function::ProfileCount;
77 
78 static cl::opt<bool>
79 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
80   cl::Hidden,
81   cl::desc("Convert noalias attributes to metadata during inlining."));
82 
83 static cl::opt<bool>
84 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
85   cl::init(true), cl::Hidden,
86   cl::desc("Convert align attributes to assumptions during inlining."));
87 
88 static cl::opt<bool> UpdateReturnAttributes(
89         "update-return-attrs", cl::init(true), cl::Hidden,
90             cl::desc("Update return attributes on calls within inlined body"));
91 
92 static cl::opt<unsigned> InlinerAttributeWindow(
93     "max-inst-checked-for-throw-during-inlining", cl::Hidden,
94     cl::desc("the maximum number of instructions analyzed for may throw during "
95              "attribute inference in inlined body"),
96     cl::init(4));
97 
98 llvm::InlineResult llvm::InlineFunction(CallBase *CB, InlineFunctionInfo &IFI,
99                                         AAResults *CalleeAAR,
100                                         bool InsertLifetime) {
101   return InlineFunction(CallSite(CB), IFI, CalleeAAR, InsertLifetime);
102 }
103 
104 namespace {
105 
106   /// A class for recording information about inlining a landing pad.
107   class LandingPadInliningInfo {
108     /// Destination of the invoke's unwind.
109     BasicBlock *OuterResumeDest;
110 
111     /// Destination for the callee's resume.
112     BasicBlock *InnerResumeDest = nullptr;
113 
114     /// LandingPadInst associated with the invoke.
115     LandingPadInst *CallerLPad = nullptr;
116 
117     /// PHI for EH values from landingpad insts.
118     PHINode *InnerEHValuesPHI = nullptr;
119 
120     SmallVector<Value*, 8> UnwindDestPHIValues;
121 
122   public:
123     LandingPadInliningInfo(InvokeInst *II)
124         : OuterResumeDest(II->getUnwindDest()) {
125       // If there are PHI nodes in the unwind destination block, we need to keep
126       // track of which values came into them from the invoke before removing
127       // the edge from this block.
128       BasicBlock *InvokeBB = II->getParent();
129       BasicBlock::iterator I = OuterResumeDest->begin();
130       for (; isa<PHINode>(I); ++I) {
131         // Save the value to use for this edge.
132         PHINode *PHI = cast<PHINode>(I);
133         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
134       }
135 
136       CallerLPad = cast<LandingPadInst>(I);
137     }
138 
139     /// The outer unwind destination is the target of
140     /// unwind edges introduced for calls within the inlined function.
141     BasicBlock *getOuterResumeDest() const {
142       return OuterResumeDest;
143     }
144 
145     BasicBlock *getInnerResumeDest();
146 
147     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
148 
149     /// Forward the 'resume' instruction to the caller's landing pad block.
150     /// When the landing pad block has only one predecessor, this is
151     /// a simple branch. When there is more than one predecessor, we need to
152     /// split the landing pad block after the landingpad instruction and jump
153     /// to there.
154     void forwardResume(ResumeInst *RI,
155                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
156 
157     /// Add incoming-PHI values to the unwind destination block for the given
158     /// basic block, using the values for the original invoke's source block.
159     void addIncomingPHIValuesFor(BasicBlock *BB) const {
160       addIncomingPHIValuesForInto(BB, OuterResumeDest);
161     }
162 
163     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
164       BasicBlock::iterator I = dest->begin();
165       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
166         PHINode *phi = cast<PHINode>(I);
167         phi->addIncoming(UnwindDestPHIValues[i], src);
168       }
169     }
170   };
171 
172 } // end anonymous namespace
173 
174 /// Get or create a target for the branch from ResumeInsts.
175 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
176   if (InnerResumeDest) return InnerResumeDest;
177 
178   // Split the landing pad.
179   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
180   InnerResumeDest =
181     OuterResumeDest->splitBasicBlock(SplitPoint,
182                                      OuterResumeDest->getName() + ".body");
183 
184   // The number of incoming edges we expect to the inner landing pad.
185   const unsigned PHICapacity = 2;
186 
187   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
188   Instruction *InsertPoint = &InnerResumeDest->front();
189   BasicBlock::iterator I = OuterResumeDest->begin();
190   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
191     PHINode *OuterPHI = cast<PHINode>(I);
192     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
193                                         OuterPHI->getName() + ".lpad-body",
194                                         InsertPoint);
195     OuterPHI->replaceAllUsesWith(InnerPHI);
196     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
197   }
198 
199   // Create a PHI for the exception values.
200   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
201                                      "eh.lpad-body", InsertPoint);
202   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
203   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
204 
205   // All done.
206   return InnerResumeDest;
207 }
208 
209 /// Forward the 'resume' instruction to the caller's landing pad block.
210 /// When the landing pad block has only one predecessor, this is a simple
211 /// branch. When there is more than one predecessor, we need to split the
212 /// landing pad block after the landingpad instruction and jump to there.
213 void LandingPadInliningInfo::forwardResume(
214     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
215   BasicBlock *Dest = getInnerResumeDest();
216   BasicBlock *Src = RI->getParent();
217 
218   BranchInst::Create(Dest, Src);
219 
220   // Update the PHIs in the destination. They were inserted in an order which
221   // makes this work.
222   addIncomingPHIValuesForInto(Src, Dest);
223 
224   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
225   RI->eraseFromParent();
226 }
227 
228 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
229 static Value *getParentPad(Value *EHPad) {
230   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
231     return FPI->getParentPad();
232   return cast<CatchSwitchInst>(EHPad)->getParentPad();
233 }
234 
235 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
236 
237 /// Helper for getUnwindDestToken that does the descendant-ward part of
238 /// the search.
239 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
240                                        UnwindDestMemoTy &MemoMap) {
241   SmallVector<Instruction *, 8> Worklist(1, EHPad);
242 
243   while (!Worklist.empty()) {
244     Instruction *CurrentPad = Worklist.pop_back_val();
245     // We only put pads on the worklist that aren't in the MemoMap.  When
246     // we find an unwind dest for a pad we may update its ancestors, but
247     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
248     // so they should never get updated while queued on the worklist.
249     assert(!MemoMap.count(CurrentPad));
250     Value *UnwindDestToken = nullptr;
251     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
252       if (CatchSwitch->hasUnwindDest()) {
253         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
254       } else {
255         // Catchswitch doesn't have a 'nounwind' variant, and one might be
256         // annotated as "unwinds to caller" when really it's nounwind (see
257         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
258         // parent's unwind dest from this.  We can check its catchpads'
259         // descendants, since they might include a cleanuppad with an
260         // "unwinds to caller" cleanupret, which can be trusted.
261         for (auto HI = CatchSwitch->handler_begin(),
262                   HE = CatchSwitch->handler_end();
263              HI != HE && !UnwindDestToken; ++HI) {
264           BasicBlock *HandlerBlock = *HI;
265           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
266           for (User *Child : CatchPad->users()) {
267             // Intentionally ignore invokes here -- since the catchswitch is
268             // marked "unwind to caller", it would be a verifier error if it
269             // contained an invoke which unwinds out of it, so any invoke we'd
270             // encounter must unwind to some child of the catch.
271             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
272               continue;
273 
274             Instruction *ChildPad = cast<Instruction>(Child);
275             auto Memo = MemoMap.find(ChildPad);
276             if (Memo == MemoMap.end()) {
277               // Haven't figured out this child pad yet; queue it.
278               Worklist.push_back(ChildPad);
279               continue;
280             }
281             // We've already checked this child, but might have found that
282             // it offers no proof either way.
283             Value *ChildUnwindDestToken = Memo->second;
284             if (!ChildUnwindDestToken)
285               continue;
286             // We already know the child's unwind dest, which can either
287             // be ConstantTokenNone to indicate unwind to caller, or can
288             // be another child of the catchpad.  Only the former indicates
289             // the unwind dest of the catchswitch.
290             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
291               UnwindDestToken = ChildUnwindDestToken;
292               break;
293             }
294             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
295           }
296         }
297       }
298     } else {
299       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
300       for (User *U : CleanupPad->users()) {
301         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
302           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
303             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
304           else
305             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
306           break;
307         }
308         Value *ChildUnwindDestToken;
309         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
310           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
311         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
312           Instruction *ChildPad = cast<Instruction>(U);
313           auto Memo = MemoMap.find(ChildPad);
314           if (Memo == MemoMap.end()) {
315             // Haven't resolved this child yet; queue it and keep searching.
316             Worklist.push_back(ChildPad);
317             continue;
318           }
319           // We've checked this child, but still need to ignore it if it
320           // had no proof either way.
321           ChildUnwindDestToken = Memo->second;
322           if (!ChildUnwindDestToken)
323             continue;
324         } else {
325           // Not a relevant user of the cleanuppad
326           continue;
327         }
328         // In a well-formed program, the child/invoke must either unwind to
329         // an(other) child of the cleanup, or exit the cleanup.  In the
330         // first case, continue searching.
331         if (isa<Instruction>(ChildUnwindDestToken) &&
332             getParentPad(ChildUnwindDestToken) == CleanupPad)
333           continue;
334         UnwindDestToken = ChildUnwindDestToken;
335         break;
336       }
337     }
338     // If we haven't found an unwind dest for CurrentPad, we may have queued its
339     // children, so move on to the next in the worklist.
340     if (!UnwindDestToken)
341       continue;
342 
343     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
344     // any ancestors of CurrentPad up to but not including UnwindDestToken's
345     // parent pad.  Record this in the memo map, and check to see if the
346     // original EHPad being queried is one of the ones exited.
347     Value *UnwindParent;
348     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
349       UnwindParent = getParentPad(UnwindPad);
350     else
351       UnwindParent = nullptr;
352     bool ExitedOriginalPad = false;
353     for (Instruction *ExitedPad = CurrentPad;
354          ExitedPad && ExitedPad != UnwindParent;
355          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
356       // Skip over catchpads since they just follow their catchswitches.
357       if (isa<CatchPadInst>(ExitedPad))
358         continue;
359       MemoMap[ExitedPad] = UnwindDestToken;
360       ExitedOriginalPad |= (ExitedPad == EHPad);
361     }
362 
363     if (ExitedOriginalPad)
364       return UnwindDestToken;
365 
366     // Continue the search.
367   }
368 
369   // No definitive information is contained within this funclet.
370   return nullptr;
371 }
372 
373 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
374 /// return that pad instruction.  If it unwinds to caller, return
375 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
376 /// return nullptr.
377 ///
378 /// This routine gets invoked for calls in funclets in inlinees when inlining
379 /// an invoke.  Since many funclets don't have calls inside them, it's queried
380 /// on-demand rather than building a map of pads to unwind dests up front.
381 /// Determining a funclet's unwind dest may require recursively searching its
382 /// descendants, and also ancestors and cousins if the descendants don't provide
383 /// an answer.  Since most funclets will have their unwind dest immediately
384 /// available as the unwind dest of a catchswitch or cleanupret, this routine
385 /// searches top-down from the given pad and then up. To avoid worst-case
386 /// quadratic run-time given that approach, it uses a memo map to avoid
387 /// re-processing funclet trees.  The callers that rewrite the IR as they go
388 /// take advantage of this, for correctness, by checking/forcing rewritten
389 /// pads' entries to match the original callee view.
390 static Value *getUnwindDestToken(Instruction *EHPad,
391                                  UnwindDestMemoTy &MemoMap) {
392   // Catchpads unwind to the same place as their catchswitch;
393   // redirct any queries on catchpads so the code below can
394   // deal with just catchswitches and cleanuppads.
395   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
396     EHPad = CPI->getCatchSwitch();
397 
398   // Check if we've already determined the unwind dest for this pad.
399   auto Memo = MemoMap.find(EHPad);
400   if (Memo != MemoMap.end())
401     return Memo->second;
402 
403   // Search EHPad and, if necessary, its descendants.
404   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
405   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
406   if (UnwindDestToken)
407     return UnwindDestToken;
408 
409   // No information is available for this EHPad from itself or any of its
410   // descendants.  An unwind all the way out to a pad in the caller would
411   // need also to agree with the unwind dest of the parent funclet, so
412   // search up the chain to try to find a funclet with information.  Put
413   // null entries in the memo map to avoid re-processing as we go up.
414   MemoMap[EHPad] = nullptr;
415 #ifndef NDEBUG
416   SmallPtrSet<Instruction *, 4> TempMemos;
417   TempMemos.insert(EHPad);
418 #endif
419   Instruction *LastUselessPad = EHPad;
420   Value *AncestorToken;
421   for (AncestorToken = getParentPad(EHPad);
422        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
423        AncestorToken = getParentPad(AncestorToken)) {
424     // Skip over catchpads since they just follow their catchswitches.
425     if (isa<CatchPadInst>(AncestorPad))
426       continue;
427     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
428     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
429     // call to getUnwindDestToken, that would mean that AncestorPad had no
430     // information in itself, its descendants, or its ancestors.  If that
431     // were the case, then we should also have recorded the lack of information
432     // for the descendant that we're coming from.  So assert that we don't
433     // find a null entry in the MemoMap for AncestorPad.
434     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
435     auto AncestorMemo = MemoMap.find(AncestorPad);
436     if (AncestorMemo == MemoMap.end()) {
437       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
438     } else {
439       UnwindDestToken = AncestorMemo->second;
440     }
441     if (UnwindDestToken)
442       break;
443     LastUselessPad = AncestorPad;
444     MemoMap[LastUselessPad] = nullptr;
445 #ifndef NDEBUG
446     TempMemos.insert(LastUselessPad);
447 #endif
448   }
449 
450   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
451   // returned nullptr (and likewise for EHPad and any of its ancestors up to
452   // LastUselessPad), so LastUselessPad has no information from below.  Since
453   // getUnwindDestTokenHelper must investigate all downward paths through
454   // no-information nodes to prove that a node has no information like this,
455   // and since any time it finds information it records it in the MemoMap for
456   // not just the immediately-containing funclet but also any ancestors also
457   // exited, it must be the case that, walking downward from LastUselessPad,
458   // visiting just those nodes which have not been mapped to an unwind dest
459   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
460   // they are just used to keep getUnwindDestTokenHelper from repeating work),
461   // any node visited must have been exhaustively searched with no information
462   // for it found.
463   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
464   while (!Worklist.empty()) {
465     Instruction *UselessPad = Worklist.pop_back_val();
466     auto Memo = MemoMap.find(UselessPad);
467     if (Memo != MemoMap.end() && Memo->second) {
468       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
469       // that it is a funclet that does have information about unwinding to
470       // a particular destination; its parent was a useless pad.
471       // Since its parent has no information, the unwind edge must not escape
472       // the parent, and must target a sibling of this pad.  This local unwind
473       // gives us no information about EHPad.  Leave it and the subtree rooted
474       // at it alone.
475       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
476       continue;
477     }
478     // We know we don't have information for UselesPad.  If it has an entry in
479     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
480     // added on this invocation of getUnwindDestToken; if a previous invocation
481     // recorded nullptr, it would have had to prove that the ancestors of
482     // UselessPad, which include LastUselessPad, had no information, and that
483     // in turn would have required proving that the descendants of
484     // LastUselesPad, which include EHPad, have no information about
485     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
486     // the MemoMap on that invocation, which isn't the case if we got here.
487     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
488     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
489     // information that we'd be contradicting by making a map entry for it
490     // (which is something that getUnwindDestTokenHelper must have proved for
491     // us to get here).  Just assert on is direct users here; the checks in
492     // this downward walk at its descendants will verify that they don't have
493     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
494     // unwind edges or unwind to a sibling).
495     MemoMap[UselessPad] = UnwindDestToken;
496     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
497       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
498       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
499         auto *CatchPad = HandlerBlock->getFirstNonPHI();
500         for (User *U : CatchPad->users()) {
501           assert(
502               (!isa<InvokeInst>(U) ||
503                (getParentPad(
504                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
505                 CatchPad)) &&
506               "Expected useless pad");
507           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
508             Worklist.push_back(cast<Instruction>(U));
509         }
510       }
511     } else {
512       assert(isa<CleanupPadInst>(UselessPad));
513       for (User *U : UselessPad->users()) {
514         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
515         assert((!isa<InvokeInst>(U) ||
516                 (getParentPad(
517                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
518                  UselessPad)) &&
519                "Expected useless pad");
520         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
521           Worklist.push_back(cast<Instruction>(U));
522       }
523     }
524   }
525 
526   return UnwindDestToken;
527 }
528 
529 /// When we inline a basic block into an invoke,
530 /// we have to turn all of the calls that can throw into invokes.
531 /// This function analyze BB to see if there are any calls, and if so,
532 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
533 /// nodes in that block with the values specified in InvokeDestPHIValues.
534 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
535     BasicBlock *BB, BasicBlock *UnwindEdge,
536     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
537   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
538     Instruction *I = &*BBI++;
539 
540     // We only need to check for function calls: inlined invoke
541     // instructions require no special handling.
542     CallInst *CI = dyn_cast<CallInst>(I);
543 
544     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
545       continue;
546 
547     // We do not need to (and in fact, cannot) convert possibly throwing calls
548     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
549     // invokes.  The caller's "segment" of the deoptimization continuation
550     // attached to the newly inlined @llvm.experimental_deoptimize
551     // (resp. @llvm.experimental.guard) call should contain the exception
552     // handling logic, if any.
553     if (auto *F = CI->getCalledFunction())
554       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
555           F->getIntrinsicID() == Intrinsic::experimental_guard)
556         continue;
557 
558     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
559       // This call is nested inside a funclet.  If that funclet has an unwind
560       // destination within the inlinee, then unwinding out of this call would
561       // be UB.  Rewriting this call to an invoke which targets the inlined
562       // invoke's unwind dest would give the call's parent funclet multiple
563       // unwind destinations, which is something that subsequent EH table
564       // generation can't handle and that the veirifer rejects.  So when we
565       // see such a call, leave it as a call.
566       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
567       Value *UnwindDestToken =
568           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
569       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
570         continue;
571 #ifndef NDEBUG
572       Instruction *MemoKey;
573       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
574         MemoKey = CatchPad->getCatchSwitch();
575       else
576         MemoKey = FuncletPad;
577       assert(FuncletUnwindMap->count(MemoKey) &&
578              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
579              "must get memoized to avoid confusing later searches");
580 #endif // NDEBUG
581     }
582 
583     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
584     return BB;
585   }
586   return nullptr;
587 }
588 
589 /// If we inlined an invoke site, we need to convert calls
590 /// in the body of the inlined function into invokes.
591 ///
592 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
593 /// block of the inlined code (the last block is the end of the function),
594 /// and InlineCodeInfo is information about the code that got inlined.
595 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
596                                     ClonedCodeInfo &InlinedCodeInfo) {
597   BasicBlock *InvokeDest = II->getUnwindDest();
598 
599   Function *Caller = FirstNewBlock->getParent();
600 
601   // The inlined code is currently at the end of the function, scan from the
602   // start of the inlined code to its end, checking for stuff we need to
603   // rewrite.
604   LandingPadInliningInfo Invoke(II);
605 
606   // Get all of the inlined landing pad instructions.
607   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
608   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
609        I != E; ++I)
610     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
611       InlinedLPads.insert(II->getLandingPadInst());
612 
613   // Append the clauses from the outer landing pad instruction into the inlined
614   // landing pad instructions.
615   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
616   for (LandingPadInst *InlinedLPad : InlinedLPads) {
617     unsigned OuterNum = OuterLPad->getNumClauses();
618     InlinedLPad->reserveClauses(OuterNum);
619     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
620       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
621     if (OuterLPad->isCleanup())
622       InlinedLPad->setCleanup(true);
623   }
624 
625   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
626        BB != E; ++BB) {
627     if (InlinedCodeInfo.ContainsCalls)
628       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
629               &*BB, Invoke.getOuterResumeDest()))
630         // Update any PHI nodes in the exceptional block to indicate that there
631         // is now a new entry in them.
632         Invoke.addIncomingPHIValuesFor(NewBB);
633 
634     // Forward any resumes that are remaining here.
635     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
636       Invoke.forwardResume(RI, InlinedLPads);
637   }
638 
639   // Now that everything is happy, we have one final detail.  The PHI nodes in
640   // the exception destination block still have entries due to the original
641   // invoke instruction. Eliminate these entries (which might even delete the
642   // PHI node) now.
643   InvokeDest->removePredecessor(II->getParent());
644 }
645 
646 /// If we inlined an invoke site, we need to convert calls
647 /// in the body of the inlined function into invokes.
648 ///
649 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
650 /// block of the inlined code (the last block is the end of the function),
651 /// and InlineCodeInfo is information about the code that got inlined.
652 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
653                                ClonedCodeInfo &InlinedCodeInfo) {
654   BasicBlock *UnwindDest = II->getUnwindDest();
655   Function *Caller = FirstNewBlock->getParent();
656 
657   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
658 
659   // If there are PHI nodes in the unwind destination block, we need to keep
660   // track of which values came into them from the invoke before removing the
661   // edge from this block.
662   SmallVector<Value *, 8> UnwindDestPHIValues;
663   BasicBlock *InvokeBB = II->getParent();
664   for (Instruction &I : *UnwindDest) {
665     // Save the value to use for this edge.
666     PHINode *PHI = dyn_cast<PHINode>(&I);
667     if (!PHI)
668       break;
669     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
670   }
671 
672   // Add incoming-PHI values to the unwind destination block for the given basic
673   // block, using the values for the original invoke's source block.
674   auto UpdatePHINodes = [&](BasicBlock *Src) {
675     BasicBlock::iterator I = UnwindDest->begin();
676     for (Value *V : UnwindDestPHIValues) {
677       PHINode *PHI = cast<PHINode>(I);
678       PHI->addIncoming(V, Src);
679       ++I;
680     }
681   };
682 
683   // This connects all the instructions which 'unwind to caller' to the invoke
684   // destination.
685   UnwindDestMemoTy FuncletUnwindMap;
686   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
687        BB != E; ++BB) {
688     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
689       if (CRI->unwindsToCaller()) {
690         auto *CleanupPad = CRI->getCleanupPad();
691         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
692         CRI->eraseFromParent();
693         UpdatePHINodes(&*BB);
694         // Finding a cleanupret with an unwind destination would confuse
695         // subsequent calls to getUnwindDestToken, so map the cleanuppad
696         // to short-circuit any such calls and recognize this as an "unwind
697         // to caller" cleanup.
698         assert(!FuncletUnwindMap.count(CleanupPad) ||
699                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
700         FuncletUnwindMap[CleanupPad] =
701             ConstantTokenNone::get(Caller->getContext());
702       }
703     }
704 
705     Instruction *I = BB->getFirstNonPHI();
706     if (!I->isEHPad())
707       continue;
708 
709     Instruction *Replacement = nullptr;
710     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
711       if (CatchSwitch->unwindsToCaller()) {
712         Value *UnwindDestToken;
713         if (auto *ParentPad =
714                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
715           // This catchswitch is nested inside another funclet.  If that
716           // funclet has an unwind destination within the inlinee, then
717           // unwinding out of this catchswitch would be UB.  Rewriting this
718           // catchswitch to unwind to the inlined invoke's unwind dest would
719           // give the parent funclet multiple unwind destinations, which is
720           // something that subsequent EH table generation can't handle and
721           // that the veirifer rejects.  So when we see such a call, leave it
722           // as "unwind to caller".
723           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
724           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
725             continue;
726         } else {
727           // This catchswitch has no parent to inherit constraints from, and
728           // none of its descendants can have an unwind edge that exits it and
729           // targets another funclet in the inlinee.  It may or may not have a
730           // descendant that definitively has an unwind to caller.  In either
731           // case, we'll have to assume that any unwinds out of it may need to
732           // be routed to the caller, so treat it as though it has a definitive
733           // unwind to caller.
734           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
735         }
736         auto *NewCatchSwitch = CatchSwitchInst::Create(
737             CatchSwitch->getParentPad(), UnwindDest,
738             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
739             CatchSwitch);
740         for (BasicBlock *PadBB : CatchSwitch->handlers())
741           NewCatchSwitch->addHandler(PadBB);
742         // Propagate info for the old catchswitch over to the new one in
743         // the unwind map.  This also serves to short-circuit any subsequent
744         // checks for the unwind dest of this catchswitch, which would get
745         // confused if they found the outer handler in the callee.
746         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
747         Replacement = NewCatchSwitch;
748       }
749     } else if (!isa<FuncletPadInst>(I)) {
750       llvm_unreachable("unexpected EHPad!");
751     }
752 
753     if (Replacement) {
754       Replacement->takeName(I);
755       I->replaceAllUsesWith(Replacement);
756       I->eraseFromParent();
757       UpdatePHINodes(&*BB);
758     }
759   }
760 
761   if (InlinedCodeInfo.ContainsCalls)
762     for (Function::iterator BB = FirstNewBlock->getIterator(),
763                             E = Caller->end();
764          BB != E; ++BB)
765       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
766               &*BB, UnwindDest, &FuncletUnwindMap))
767         // Update any PHI nodes in the exceptional block to indicate that there
768         // is now a new entry in them.
769         UpdatePHINodes(NewBB);
770 
771   // Now that everything is happy, we have one final detail.  The PHI nodes in
772   // the exception destination block still have entries due to the original
773   // invoke instruction. Eliminate these entries (which might even delete the
774   // PHI node) now.
775   UnwindDest->removePredecessor(InvokeBB);
776 }
777 
778 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
779 /// llvm.access.group metadata, that metadata should be propagated to all
780 /// memory-accessing cloned instructions.
781 static void PropagateParallelLoopAccessMetadata(CallSite CS,
782                                                 ValueToValueMapTy &VMap) {
783   MDNode *M =
784     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
785   MDNode *CallAccessGroup =
786       CS.getInstruction()->getMetadata(LLVMContext::MD_access_group);
787   if (!M && !CallAccessGroup)
788     return;
789 
790   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
791        VMI != VMIE; ++VMI) {
792     if (!VMI->second)
793       continue;
794 
795     Instruction *NI = dyn_cast<Instruction>(VMI->second);
796     if (!NI)
797       continue;
798 
799     if (M) {
800       if (MDNode *PM =
801               NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
802         M = MDNode::concatenate(PM, M);
803       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
804       } else if (NI->mayReadOrWriteMemory()) {
805         NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
806       }
807     }
808 
809     if (NI->mayReadOrWriteMemory()) {
810       MDNode *UnitedAccGroups = uniteAccessGroups(
811           NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
812       NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
813     }
814   }
815 }
816 
817 /// When inlining a function that contains noalias scope metadata,
818 /// this metadata needs to be cloned so that the inlined blocks
819 /// have different "unique scopes" at every call site. Were this not done, then
820 /// aliasing scopes from a function inlined into a caller multiple times could
821 /// not be differentiated (and this would lead to miscompiles because the
822 /// non-aliasing property communicated by the metadata could have
823 /// call-site-specific control dependencies).
824 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
825   const Function *CalledFunc = CS.getCalledFunction();
826   SetVector<const MDNode *> MD;
827 
828   // Note: We could only clone the metadata if it is already used in the
829   // caller. I'm omitting that check here because it might confuse
830   // inter-procedural alias analysis passes. We can revisit this if it becomes
831   // an efficiency or overhead problem.
832 
833   for (const BasicBlock &I : *CalledFunc)
834     for (const Instruction &J : I) {
835       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
836         MD.insert(M);
837       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
838         MD.insert(M);
839     }
840 
841   if (MD.empty())
842     return;
843 
844   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
845   // the set.
846   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
847   while (!Queue.empty()) {
848     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
849     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
850       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
851         if (MD.insert(M1))
852           Queue.push_back(M1);
853   }
854 
855   // Now we have a complete set of all metadata in the chains used to specify
856   // the noalias scopes and the lists of those scopes.
857   SmallVector<TempMDTuple, 16> DummyNodes;
858   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
859   for (const MDNode *I : MD) {
860     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
861     MDMap[I].reset(DummyNodes.back().get());
862   }
863 
864   // Create new metadata nodes to replace the dummy nodes, replacing old
865   // metadata references with either a dummy node or an already-created new
866   // node.
867   for (const MDNode *I : MD) {
868     SmallVector<Metadata *, 4> NewOps;
869     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
870       const Metadata *V = I->getOperand(i);
871       if (const MDNode *M = dyn_cast<MDNode>(V))
872         NewOps.push_back(MDMap[M]);
873       else
874         NewOps.push_back(const_cast<Metadata *>(V));
875     }
876 
877     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
878     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
879     assert(TempM->isTemporary() && "Expected temporary node");
880 
881     TempM->replaceAllUsesWith(NewM);
882   }
883 
884   // Now replace the metadata in the new inlined instructions with the
885   // repacements from the map.
886   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
887        VMI != VMIE; ++VMI) {
888     if (!VMI->second)
889       continue;
890 
891     Instruction *NI = dyn_cast<Instruction>(VMI->second);
892     if (!NI)
893       continue;
894 
895     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
896       MDNode *NewMD = MDMap[M];
897       // If the call site also had alias scope metadata (a list of scopes to
898       // which instructions inside it might belong), propagate those scopes to
899       // the inlined instructions.
900       if (MDNode *CSM =
901               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
902         NewMD = MDNode::concatenate(NewMD, CSM);
903       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
904     } else if (NI->mayReadOrWriteMemory()) {
905       if (MDNode *M =
906               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
907         NI->setMetadata(LLVMContext::MD_alias_scope, M);
908     }
909 
910     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
911       MDNode *NewMD = MDMap[M];
912       // If the call site also had noalias metadata (a list of scopes with
913       // which instructions inside it don't alias), propagate those scopes to
914       // the inlined instructions.
915       if (MDNode *CSM =
916               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
917         NewMD = MDNode::concatenate(NewMD, CSM);
918       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
919     } else if (NI->mayReadOrWriteMemory()) {
920       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
921         NI->setMetadata(LLVMContext::MD_noalias, M);
922     }
923   }
924 }
925 
926 /// If the inlined function has noalias arguments,
927 /// then add new alias scopes for each noalias argument, tag the mapped noalias
928 /// parameters with noalias metadata specifying the new scope, and tag all
929 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
930 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
931                                   const DataLayout &DL, AAResults *CalleeAAR) {
932   if (!EnableNoAliasConversion)
933     return;
934 
935   const Function *CalledFunc = CS.getCalledFunction();
936   SmallVector<const Argument *, 4> NoAliasArgs;
937 
938   for (const Argument &Arg : CalledFunc->args())
939     if (CS.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
940       NoAliasArgs.push_back(&Arg);
941 
942   if (NoAliasArgs.empty())
943     return;
944 
945   // To do a good job, if a noalias variable is captured, we need to know if
946   // the capture point dominates the particular use we're considering.
947   DominatorTree DT;
948   DT.recalculate(const_cast<Function&>(*CalledFunc));
949 
950   // noalias indicates that pointer values based on the argument do not alias
951   // pointer values which are not based on it. So we add a new "scope" for each
952   // noalias function argument. Accesses using pointers based on that argument
953   // become part of that alias scope, accesses using pointers not based on that
954   // argument are tagged as noalias with that scope.
955 
956   DenseMap<const Argument *, MDNode *> NewScopes;
957   MDBuilder MDB(CalledFunc->getContext());
958 
959   // Create a new scope domain for this function.
960   MDNode *NewDomain =
961     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
962   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
963     const Argument *A = NoAliasArgs[i];
964 
965     std::string Name = std::string(CalledFunc->getName());
966     if (A->hasName()) {
967       Name += ": %";
968       Name += A->getName();
969     } else {
970       Name += ": argument ";
971       Name += utostr(i);
972     }
973 
974     // Note: We always create a new anonymous root here. This is true regardless
975     // of the linkage of the callee because the aliasing "scope" is not just a
976     // property of the callee, but also all control dependencies in the caller.
977     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
978     NewScopes.insert(std::make_pair(A, NewScope));
979   }
980 
981   // Iterate over all new instructions in the map; for all memory-access
982   // instructions, add the alias scope metadata.
983   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
984        VMI != VMIE; ++VMI) {
985     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
986       if (!VMI->second)
987         continue;
988 
989       Instruction *NI = dyn_cast<Instruction>(VMI->second);
990       if (!NI)
991         continue;
992 
993       bool IsArgMemOnlyCall = false, IsFuncCall = false;
994       SmallVector<const Value *, 2> PtrArgs;
995 
996       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
997         PtrArgs.push_back(LI->getPointerOperand());
998       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
999         PtrArgs.push_back(SI->getPointerOperand());
1000       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1001         PtrArgs.push_back(VAAI->getPointerOperand());
1002       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1003         PtrArgs.push_back(CXI->getPointerOperand());
1004       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1005         PtrArgs.push_back(RMWI->getPointerOperand());
1006       else if (const auto *Call = dyn_cast<CallBase>(I)) {
1007         // If we know that the call does not access memory, then we'll still
1008         // know that about the inlined clone of this call site, and we don't
1009         // need to add metadata.
1010         if (Call->doesNotAccessMemory())
1011           continue;
1012 
1013         IsFuncCall = true;
1014         if (CalleeAAR) {
1015           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1016           if (AAResults::onlyAccessesArgPointees(MRB))
1017             IsArgMemOnlyCall = true;
1018         }
1019 
1020         for (Value *Arg : Call->args()) {
1021           // We need to check the underlying objects of all arguments, not just
1022           // the pointer arguments, because we might be passing pointers as
1023           // integers, etc.
1024           // However, if we know that the call only accesses pointer arguments,
1025           // then we only need to check the pointer arguments.
1026           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1027             continue;
1028 
1029           PtrArgs.push_back(Arg);
1030         }
1031       }
1032 
1033       // If we found no pointers, then this instruction is not suitable for
1034       // pairing with an instruction to receive aliasing metadata.
1035       // However, if this is a call, this we might just alias with none of the
1036       // noalias arguments.
1037       if (PtrArgs.empty() && !IsFuncCall)
1038         continue;
1039 
1040       // It is possible that there is only one underlying object, but you
1041       // need to go through several PHIs to see it, and thus could be
1042       // repeated in the Objects list.
1043       SmallPtrSet<const Value *, 4> ObjSet;
1044       SmallVector<Metadata *, 4> Scopes, NoAliases;
1045 
1046       SmallSetVector<const Argument *, 4> NAPtrArgs;
1047       for (const Value *V : PtrArgs) {
1048         SmallVector<const Value *, 4> Objects;
1049         GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
1050 
1051         for (const Value *O : Objects)
1052           ObjSet.insert(O);
1053       }
1054 
1055       // Figure out if we're derived from anything that is not a noalias
1056       // argument.
1057       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1058       for (const Value *V : ObjSet) {
1059         // Is this value a constant that cannot be derived from any pointer
1060         // value (we need to exclude constant expressions, for example, that
1061         // are formed from arithmetic on global symbols).
1062         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1063                              isa<ConstantPointerNull>(V) ||
1064                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1065         if (IsNonPtrConst)
1066           continue;
1067 
1068         // If this is anything other than a noalias argument, then we cannot
1069         // completely describe the aliasing properties using alias.scope
1070         // metadata (and, thus, won't add any).
1071         if (const Argument *A = dyn_cast<Argument>(V)) {
1072           if (!CS.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1073             UsesAliasingPtr = true;
1074         } else {
1075           UsesAliasingPtr = true;
1076         }
1077 
1078         // If this is not some identified function-local object (which cannot
1079         // directly alias a noalias argument), or some other argument (which,
1080         // by definition, also cannot alias a noalias argument), then we could
1081         // alias a noalias argument that has been captured).
1082         if (!isa<Argument>(V) &&
1083             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1084           CanDeriveViaCapture = true;
1085       }
1086 
1087       // A function call can always get captured noalias pointers (via other
1088       // parameters, globals, etc.).
1089       if (IsFuncCall && !IsArgMemOnlyCall)
1090         CanDeriveViaCapture = true;
1091 
1092       // First, we want to figure out all of the sets with which we definitely
1093       // don't alias. Iterate over all noalias set, and add those for which:
1094       //   1. The noalias argument is not in the set of objects from which we
1095       //      definitely derive.
1096       //   2. The noalias argument has not yet been captured.
1097       // An arbitrary function that might load pointers could see captured
1098       // noalias arguments via other noalias arguments or globals, and so we
1099       // must always check for prior capture.
1100       for (const Argument *A : NoAliasArgs) {
1101         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1102                                  // It might be tempting to skip the
1103                                  // PointerMayBeCapturedBefore check if
1104                                  // A->hasNoCaptureAttr() is true, but this is
1105                                  // incorrect because nocapture only guarantees
1106                                  // that no copies outlive the function, not
1107                                  // that the value cannot be locally captured.
1108                                  !PointerMayBeCapturedBefore(A,
1109                                    /* ReturnCaptures */ false,
1110                                    /* StoreCaptures */ false, I, &DT)))
1111           NoAliases.push_back(NewScopes[A]);
1112       }
1113 
1114       if (!NoAliases.empty())
1115         NI->setMetadata(LLVMContext::MD_noalias,
1116                         MDNode::concatenate(
1117                             NI->getMetadata(LLVMContext::MD_noalias),
1118                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1119 
1120       // Next, we want to figure out all of the sets to which we might belong.
1121       // We might belong to a set if the noalias argument is in the set of
1122       // underlying objects. If there is some non-noalias argument in our list
1123       // of underlying objects, then we cannot add a scope because the fact
1124       // that some access does not alias with any set of our noalias arguments
1125       // cannot itself guarantee that it does not alias with this access
1126       // (because there is some pointer of unknown origin involved and the
1127       // other access might also depend on this pointer). We also cannot add
1128       // scopes to arbitrary functions unless we know they don't access any
1129       // non-parameter pointer-values.
1130       bool CanAddScopes = !UsesAliasingPtr;
1131       if (CanAddScopes && IsFuncCall)
1132         CanAddScopes = IsArgMemOnlyCall;
1133 
1134       if (CanAddScopes)
1135         for (const Argument *A : NoAliasArgs) {
1136           if (ObjSet.count(A))
1137             Scopes.push_back(NewScopes[A]);
1138         }
1139 
1140       if (!Scopes.empty())
1141         NI->setMetadata(
1142             LLVMContext::MD_alias_scope,
1143             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1144                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1145     }
1146   }
1147 }
1148 
1149 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1150                                             Instruction *End) {
1151 
1152   assert(Begin->getParent() == End->getParent() &&
1153          "Expected to be in same basic block!");
1154   unsigned NumInstChecked = 0;
1155   // Check that all instructions in the range [Begin, End) are guaranteed to
1156   // transfer execution to successor.
1157   for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1158     if (NumInstChecked++ > InlinerAttributeWindow ||
1159         !isGuaranteedToTransferExecutionToSuccessor(&I))
1160       return true;
1161   return false;
1162 }
1163 
1164 static AttrBuilder IdentifyValidAttributes(CallSite CS) {
1165 
1166   AttrBuilder AB(CS.getAttributes(), AttributeList::ReturnIndex);
1167   if (AB.empty())
1168     return AB;
1169   AttrBuilder Valid;
1170   // Only allow these white listed attributes to be propagated back to the
1171   // callee. This is because other attributes may only be valid on the call
1172   // itself, i.e. attributes such as signext and zeroext.
1173   if (auto DerefBytes = AB.getDereferenceableBytes())
1174     Valid.addDereferenceableAttr(DerefBytes);
1175   if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1176     Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1177   if (AB.contains(Attribute::NoAlias))
1178     Valid.addAttribute(Attribute::NoAlias);
1179   if (AB.contains(Attribute::NonNull))
1180     Valid.addAttribute(Attribute::NonNull);
1181   return Valid;
1182 }
1183 
1184 static void AddReturnAttributes(CallSite CS, ValueToValueMapTy &VMap) {
1185   if (!UpdateReturnAttributes)
1186     return;
1187 
1188   AttrBuilder Valid = IdentifyValidAttributes(CS);
1189   if (Valid.empty())
1190     return;
1191   auto *CalledFunction = CS.getCalledFunction();
1192   auto &Context = CalledFunction->getContext();
1193 
1194   for (auto &BB : *CalledFunction) {
1195     auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1196     if (!RI || !isa<CallBase>(RI->getOperand(0)))
1197       continue;
1198     auto *RetVal = cast<CallBase>(RI->getOperand(0));
1199     // Sanity check that the cloned RetVal exists and is a call, otherwise we
1200     // cannot add the attributes on the cloned RetVal.
1201     // Simplification during inlining could have transformed the cloned
1202     // instruction.
1203     auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1204     if (!NewRetVal)
1205       continue;
1206     // Backward propagation of attributes to the returned value may be incorrect
1207     // if it is control flow dependent.
1208     // Consider:
1209     // @callee {
1210     //  %rv = call @foo()
1211     //  %rv2 = call @bar()
1212     //  if (%rv2 != null)
1213     //    return %rv2
1214     //  if (%rv == null)
1215     //    exit()
1216     //  return %rv
1217     // }
1218     // caller() {
1219     //   %val = call nonnull @callee()
1220     // }
1221     // Here we cannot add the nonnull attribute on either foo or bar. So, we
1222     // limit the check to both RetVal and RI are in the same basic block and
1223     // there are no throwing/exiting instructions between these instructions.
1224     if (RI->getParent() != RetVal->getParent() ||
1225         MayContainThrowingOrExitingCall(RetVal, RI))
1226       continue;
1227     // Add to the existing attributes of NewRetVal, i.e. the cloned call
1228     // instruction.
1229     // NB! When we have the same attribute already existing on NewRetVal, but
1230     // with a differing value, the AttributeList's merge API honours the already
1231     // existing attribute value (i.e. attributes such as dereferenceable,
1232     // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1233     AttributeList AL = NewRetVal->getAttributes();
1234     AttributeList NewAL =
1235         AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1236     NewRetVal->setAttributes(NewAL);
1237   }
1238 }
1239 
1240 /// If the inlined function has non-byval align arguments, then
1241 /// add @llvm.assume-based alignment assumptions to preserve this information.
1242 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1243   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1244     return;
1245 
1246   AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1247   auto &DL = CS.getCaller()->getParent()->getDataLayout();
1248 
1249   // To avoid inserting redundant assumptions, we should check for assumptions
1250   // already in the caller. To do this, we might need a DT of the caller.
1251   DominatorTree DT;
1252   bool DTCalculated = false;
1253 
1254   Function *CalledFunc = CS.getCalledFunction();
1255   for (Argument &Arg : CalledFunc->args()) {
1256     unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1257     if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1258       if (!DTCalculated) {
1259         DT.recalculate(*CS.getCaller());
1260         DTCalculated = true;
1261       }
1262 
1263       // If we can already prove the asserted alignment in the context of the
1264       // caller, then don't bother inserting the assumption.
1265       Value *ArgVal = CS.getArgument(Arg.getArgNo());
1266       if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1267         continue;
1268 
1269       CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1270                               .CreateAlignmentAssumption(DL, ArgVal, Align);
1271       AC->registerAssumption(NewAsmp);
1272     }
1273   }
1274 }
1275 
1276 /// Once we have cloned code over from a callee into the caller,
1277 /// update the specified callgraph to reflect the changes we made.
1278 /// Note that it's possible that not all code was copied over, so only
1279 /// some edges of the callgraph may remain.
1280 static void UpdateCallGraphAfterInlining(CallSite CS,
1281                                          Function::iterator FirstNewBlock,
1282                                          ValueToValueMapTy &VMap,
1283                                          InlineFunctionInfo &IFI) {
1284   CallGraph &CG = *IFI.CG;
1285   const Function *Caller = CS.getCaller();
1286   const Function *Callee = CS.getCalledFunction();
1287   CallGraphNode *CalleeNode = CG[Callee];
1288   CallGraphNode *CallerNode = CG[Caller];
1289 
1290   // Since we inlined some uninlined call sites in the callee into the caller,
1291   // add edges from the caller to all of the callees of the callee.
1292   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1293 
1294   // Consider the case where CalleeNode == CallerNode.
1295   CallGraphNode::CalledFunctionsVector CallCache;
1296   if (CalleeNode == CallerNode) {
1297     CallCache.assign(I, E);
1298     I = CallCache.begin();
1299     E = CallCache.end();
1300   }
1301 
1302   for (; I != E; ++I) {
1303     const Value *OrigCall = I->first;
1304 
1305     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1306     // Only copy the edge if the call was inlined!
1307     if (VMI == VMap.end() || VMI->second == nullptr)
1308       continue;
1309 
1310     // If the call was inlined, but then constant folded, there is no edge to
1311     // add.  Check for this case.
1312     auto *NewCall = dyn_cast<CallBase>(VMI->second);
1313     if (!NewCall)
1314       continue;
1315 
1316     // We do not treat intrinsic calls like real function calls because we
1317     // expect them to become inline code; do not add an edge for an intrinsic.
1318     if (NewCall->getCalledFunction() &&
1319         NewCall->getCalledFunction()->isIntrinsic())
1320       continue;
1321 
1322     // Remember that this call site got inlined for the client of
1323     // InlineFunction.
1324     IFI.InlinedCalls.push_back(NewCall);
1325 
1326     // It's possible that inlining the callsite will cause it to go from an
1327     // indirect to a direct call by resolving a function pointer.  If this
1328     // happens, set the callee of the new call site to a more precise
1329     // destination.  This can also happen if the call graph node of the caller
1330     // was just unnecessarily imprecise.
1331     if (!I->second->getFunction())
1332       if (Function *F = NewCall->getCalledFunction()) {
1333         // Indirect call site resolved to direct call.
1334         CallerNode->addCalledFunction(NewCall, CG[F]);
1335 
1336         continue;
1337       }
1338 
1339     CallerNode->addCalledFunction(NewCall, I->second);
1340   }
1341 
1342   // Update the call graph by deleting the edge from Callee to Caller.  We must
1343   // do this after the loop above in case Caller and Callee are the same.
1344   CallerNode->removeCallEdgeFor(*cast<CallBase>(CS.getInstruction()));
1345 }
1346 
1347 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1348                                     BasicBlock *InsertBlock,
1349                                     InlineFunctionInfo &IFI) {
1350   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1351   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1352 
1353   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1354 
1355   // Always generate a memcpy of alignment 1 here because we don't know
1356   // the alignment of the src pointer.  Other optimizations can infer
1357   // better alignment.
1358   Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1359                        /*SrcAlign*/ Align(1), Size);
1360 }
1361 
1362 /// When inlining a call site that has a byval argument,
1363 /// we have to make the implicit memcpy explicit by adding it.
1364 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1365                                   const Function *CalledFunc,
1366                                   InlineFunctionInfo &IFI,
1367                                   unsigned ByValAlignment) {
1368   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1369   Type *AggTy = ArgTy->getElementType();
1370 
1371   Function *Caller = TheCall->getFunction();
1372   const DataLayout &DL = Caller->getParent()->getDataLayout();
1373 
1374   // If the called function is readonly, then it could not mutate the caller's
1375   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1376   // temporary.
1377   if (CalledFunc->onlyReadsMemory()) {
1378     // If the byval argument has a specified alignment that is greater than the
1379     // passed in pointer, then we either have to round up the input pointer or
1380     // give up on this transformation.
1381     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1382       return Arg;
1383 
1384     AssumptionCache *AC =
1385         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1386 
1387     // If the pointer is already known to be sufficiently aligned, or if we can
1388     // round it up to a larger alignment, then we don't need a temporary.
1389     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1390         ByValAlignment)
1391       return Arg;
1392 
1393     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1394     // for code quality, but rarely happens and is required for correctness.
1395   }
1396 
1397   // Create the alloca.  If we have DataLayout, use nice alignment.
1398   Align Alignment(DL.getPrefTypeAlignment(AggTy));
1399 
1400   // If the byval had an alignment specified, we *must* use at least that
1401   // alignment, as it is required by the byval argument (and uses of the
1402   // pointer inside the callee).
1403   Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1404 
1405   Value *NewAlloca =
1406       new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1407                      Arg->getName(), &*Caller->begin()->begin());
1408   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1409 
1410   // Uses of the argument in the function should use our new alloca
1411   // instead.
1412   return NewAlloca;
1413 }
1414 
1415 // Check whether this Value is used by a lifetime intrinsic.
1416 static bool isUsedByLifetimeMarker(Value *V) {
1417   for (User *U : V->users())
1418     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1419       if (II->isLifetimeStartOrEnd())
1420         return true;
1421   return false;
1422 }
1423 
1424 // Check whether the given alloca already has
1425 // lifetime.start or lifetime.end intrinsics.
1426 static bool hasLifetimeMarkers(AllocaInst *AI) {
1427   Type *Ty = AI->getType();
1428   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1429                                        Ty->getPointerAddressSpace());
1430   if (Ty == Int8PtrTy)
1431     return isUsedByLifetimeMarker(AI);
1432 
1433   // Do a scan to find all the casts to i8*.
1434   for (User *U : AI->users()) {
1435     if (U->getType() != Int8PtrTy) continue;
1436     if (U->stripPointerCasts() != AI) continue;
1437     if (isUsedByLifetimeMarker(U))
1438       return true;
1439   }
1440   return false;
1441 }
1442 
1443 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1444 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1445 /// cannot be static.
1446 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1447   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1448 }
1449 
1450 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1451 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1452 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1453                                LLVMContext &Ctx,
1454                                DenseMap<const MDNode *, MDNode *> &IANodes) {
1455   auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1456   return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1457                        IA);
1458 }
1459 
1460 /// Update inlined instructions' line numbers to
1461 /// to encode location where these instructions are inlined.
1462 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1463                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1464   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1465   if (!TheCallDL)
1466     return;
1467 
1468   auto &Ctx = Fn->getContext();
1469   DILocation *InlinedAtNode = TheCallDL;
1470 
1471   // Create a unique call site, not to be confused with any other call from the
1472   // same location.
1473   InlinedAtNode = DILocation::getDistinct(
1474       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1475       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1476 
1477   // Cache the inlined-at nodes as they're built so they are reused, without
1478   // this every instruction's inlined-at chain would become distinct from each
1479   // other.
1480   DenseMap<const MDNode *, MDNode *> IANodes;
1481 
1482   // Check if we are not generating inline line tables and want to use
1483   // the call site location instead.
1484   bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1485 
1486   for (; FI != Fn->end(); ++FI) {
1487     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1488          BI != BE; ++BI) {
1489       // Loop metadata needs to be updated so that the start and end locs
1490       // reference inlined-at locations.
1491       auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1492                                    const DILocation &Loc) -> DILocation * {
1493         return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1494       };
1495       updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1496 
1497       if (!NoInlineLineTables)
1498         if (DebugLoc DL = BI->getDebugLoc()) {
1499           DebugLoc IDL =
1500               inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1501           BI->setDebugLoc(IDL);
1502           continue;
1503         }
1504 
1505       if (CalleeHasDebugInfo && !NoInlineLineTables)
1506         continue;
1507 
1508       // If the inlined instruction has no line number, or if inline info
1509       // is not being generated, make it look as if it originates from the call
1510       // location. This is important for ((__always_inline, __nodebug__))
1511       // functions which must use caller location for all instructions in their
1512       // function body.
1513 
1514       // Don't update static allocas, as they may get moved later.
1515       if (auto *AI = dyn_cast<AllocaInst>(BI))
1516         if (allocaWouldBeStaticInEntry(AI))
1517           continue;
1518 
1519       BI->setDebugLoc(TheCallDL);
1520     }
1521 
1522     // Remove debug info intrinsics if we're not keeping inline info.
1523     if (NoInlineLineTables) {
1524       BasicBlock::iterator BI = FI->begin();
1525       while (BI != FI->end()) {
1526         if (isa<DbgInfoIntrinsic>(BI)) {
1527           BI = BI->eraseFromParent();
1528           continue;
1529         }
1530         ++BI;
1531       }
1532     }
1533 
1534   }
1535 }
1536 
1537 /// Update the block frequencies of the caller after a callee has been inlined.
1538 ///
1539 /// Each block cloned into the caller has its block frequency scaled by the
1540 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1541 /// callee's entry block gets the same frequency as the callsite block and the
1542 /// relative frequencies of all cloned blocks remain the same after cloning.
1543 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1544                             const ValueToValueMapTy &VMap,
1545                             BlockFrequencyInfo *CallerBFI,
1546                             BlockFrequencyInfo *CalleeBFI,
1547                             const BasicBlock &CalleeEntryBlock) {
1548   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1549   for (auto Entry : VMap) {
1550     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1551       continue;
1552     auto *OrigBB = cast<BasicBlock>(Entry.first);
1553     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1554     uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1555     if (!ClonedBBs.insert(ClonedBB).second) {
1556       // Multiple blocks in the callee might get mapped to one cloned block in
1557       // the caller since we prune the callee as we clone it. When that happens,
1558       // we want to use the maximum among the original blocks' frequencies.
1559       uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1560       if (NewFreq > Freq)
1561         Freq = NewFreq;
1562     }
1563     CallerBFI->setBlockFreq(ClonedBB, Freq);
1564   }
1565   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1566   CallerBFI->setBlockFreqAndScale(
1567       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1568       ClonedBBs);
1569 }
1570 
1571 /// Update the branch metadata for cloned call instructions.
1572 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1573                               const ProfileCount &CalleeEntryCount,
1574                               const Instruction *TheCall,
1575                               ProfileSummaryInfo *PSI,
1576                               BlockFrequencyInfo *CallerBFI) {
1577   if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1578       CalleeEntryCount.getCount() < 1)
1579     return;
1580   auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1581   int64_t CallCount =
1582       std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1583                CalleeEntryCount.getCount());
1584   updateProfileCallee(Callee, -CallCount, &VMap);
1585 }
1586 
1587 void llvm::updateProfileCallee(
1588     Function *Callee, int64_t entryDelta,
1589     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1590   auto CalleeCount = Callee->getEntryCount();
1591   if (!CalleeCount.hasValue())
1592     return;
1593 
1594   uint64_t priorEntryCount = CalleeCount.getCount();
1595   uint64_t newEntryCount;
1596 
1597   // Since CallSiteCount is an estimate, it could exceed the original callee
1598   // count and has to be set to 0 so guard against underflow.
1599   if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1600     newEntryCount = 0;
1601   else
1602     newEntryCount = priorEntryCount + entryDelta;
1603 
1604   // During inlining ?
1605   if (VMap) {
1606     uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1607     for (auto Entry : *VMap)
1608       if (isa<CallInst>(Entry.first))
1609         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1610           CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1611   }
1612 
1613   if (entryDelta) {
1614     Callee->setEntryCount(newEntryCount);
1615 
1616     for (BasicBlock &BB : *Callee)
1617       // No need to update the callsite if it is pruned during inlining.
1618       if (!VMap || VMap->count(&BB))
1619         for (Instruction &I : BB)
1620           if (CallInst *CI = dyn_cast<CallInst>(&I))
1621             CI->updateProfWeight(newEntryCount, priorEntryCount);
1622   }
1623 }
1624 
1625 /// This function inlines the called function into the basic block of the
1626 /// caller. This returns false if it is not possible to inline this call.
1627 /// The program is still in a well defined state if this occurs though.
1628 ///
1629 /// Note that this only does one level of inlining.  For example, if the
1630 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1631 /// exists in the instruction stream.  Similarly this will inline a recursive
1632 /// function by one level.
1633 llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1634                                         AAResults *CalleeAAR,
1635                                         bool InsertLifetime,
1636                                         Function *ForwardVarArgsTo) {
1637   Instruction *TheCall = CS.getInstruction();
1638   assert(TheCall->getParent() && TheCall->getFunction()
1639          && "Instruction not in function!");
1640 
1641   // FIXME: we don't inline callbr yet.
1642   if (isa<CallBrInst>(TheCall))
1643     return InlineResult::failure("We don't inline callbr yet.");
1644 
1645   // If IFI has any state in it, zap it before we fill it in.
1646   IFI.reset();
1647 
1648   Function *CalledFunc = CS.getCalledFunction();
1649   if (!CalledFunc ||               // Can't inline external function or indirect
1650       CalledFunc->isDeclaration()) // call!
1651     return InlineResult::failure("external or indirect");
1652 
1653   // The inliner does not know how to inline through calls with operand bundles
1654   // in general ...
1655   if (CS.hasOperandBundles()) {
1656     for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1657       uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1658       // ... but it knows how to inline through "deopt" operand bundles ...
1659       if (Tag == LLVMContext::OB_deopt)
1660         continue;
1661       // ... and "funclet" operand bundles.
1662       if (Tag == LLVMContext::OB_funclet)
1663         continue;
1664 
1665       return InlineResult::failure("unsupported operand bundle");
1666     }
1667   }
1668 
1669   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1670   // calls that we inline.
1671   bool MarkNoUnwind = CS.doesNotThrow();
1672 
1673   BasicBlock *OrigBB = TheCall->getParent();
1674   Function *Caller = OrigBB->getParent();
1675 
1676   // GC poses two hazards to inlining, which only occur when the callee has GC:
1677   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1678   //     caller.
1679   //  2. If the caller has a differing GC, it is invalid to inline.
1680   if (CalledFunc->hasGC()) {
1681     if (!Caller->hasGC())
1682       Caller->setGC(CalledFunc->getGC());
1683     else if (CalledFunc->getGC() != Caller->getGC())
1684       return InlineResult::failure("incompatible GC");
1685   }
1686 
1687   // Get the personality function from the callee if it contains a landing pad.
1688   Constant *CalledPersonality =
1689       CalledFunc->hasPersonalityFn()
1690           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1691           : nullptr;
1692 
1693   // Find the personality function used by the landing pads of the caller. If it
1694   // exists, then check to see that it matches the personality function used in
1695   // the callee.
1696   Constant *CallerPersonality =
1697       Caller->hasPersonalityFn()
1698           ? Caller->getPersonalityFn()->stripPointerCasts()
1699           : nullptr;
1700   if (CalledPersonality) {
1701     if (!CallerPersonality)
1702       Caller->setPersonalityFn(CalledPersonality);
1703     // If the personality functions match, then we can perform the
1704     // inlining. Otherwise, we can't inline.
1705     // TODO: This isn't 100% true. Some personality functions are proper
1706     //       supersets of others and can be used in place of the other.
1707     else if (CalledPersonality != CallerPersonality)
1708       return InlineResult::failure("incompatible personality");
1709   }
1710 
1711   // We need to figure out which funclet the callsite was in so that we may
1712   // properly nest the callee.
1713   Instruction *CallSiteEHPad = nullptr;
1714   if (CallerPersonality) {
1715     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1716     if (isScopedEHPersonality(Personality)) {
1717       Optional<OperandBundleUse> ParentFunclet =
1718           CS.getOperandBundle(LLVMContext::OB_funclet);
1719       if (ParentFunclet)
1720         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1721 
1722       // OK, the inlining site is legal.  What about the target function?
1723 
1724       if (CallSiteEHPad) {
1725         if (Personality == EHPersonality::MSVC_CXX) {
1726           // The MSVC personality cannot tolerate catches getting inlined into
1727           // cleanup funclets.
1728           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1729             // Ok, the call site is within a cleanuppad.  Let's check the callee
1730             // for catchpads.
1731             for (const BasicBlock &CalledBB : *CalledFunc) {
1732               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1733                 return InlineResult::failure("catch in cleanup funclet");
1734             }
1735           }
1736         } else if (isAsynchronousEHPersonality(Personality)) {
1737           // SEH is even less tolerant, there may not be any sort of exceptional
1738           // funclet in the callee.
1739           for (const BasicBlock &CalledBB : *CalledFunc) {
1740             if (CalledBB.isEHPad())
1741               return InlineResult::failure("SEH in cleanup funclet");
1742           }
1743         }
1744       }
1745     }
1746   }
1747 
1748   // Determine if we are dealing with a call in an EHPad which does not unwind
1749   // to caller.
1750   bool EHPadForCallUnwindsLocally = false;
1751   if (CallSiteEHPad && CS.isCall()) {
1752     UnwindDestMemoTy FuncletUnwindMap;
1753     Value *CallSiteUnwindDestToken =
1754         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1755 
1756     EHPadForCallUnwindsLocally =
1757         CallSiteUnwindDestToken &&
1758         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1759   }
1760 
1761   // Get an iterator to the last basic block in the function, which will have
1762   // the new function inlined after it.
1763   Function::iterator LastBlock = --Caller->end();
1764 
1765   // Make sure to capture all of the return instructions from the cloned
1766   // function.
1767   SmallVector<ReturnInst*, 8> Returns;
1768   ClonedCodeInfo InlinedFunctionInfo;
1769   Function::iterator FirstNewBlock;
1770 
1771   { // Scope to destroy VMap after cloning.
1772     ValueToValueMapTy VMap;
1773     // Keep a list of pair (dst, src) to emit byval initializations.
1774     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1775 
1776     auto &DL = Caller->getParent()->getDataLayout();
1777 
1778     // Calculate the vector of arguments to pass into the function cloner, which
1779     // matches up the formal to the actual argument values.
1780     CallSite::arg_iterator AI = CS.arg_begin();
1781     unsigned ArgNo = 0;
1782     for (Function::arg_iterator I = CalledFunc->arg_begin(),
1783          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1784       Value *ActualArg = *AI;
1785 
1786       // When byval arguments actually inlined, we need to make the copy implied
1787       // by them explicit.  However, we don't do this if the callee is readonly
1788       // or readnone, because the copy would be unneeded: the callee doesn't
1789       // modify the struct.
1790       if (CS.isByValArgument(ArgNo)) {
1791         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1792                                         CalledFunc->getParamAlignment(ArgNo));
1793         if (ActualArg != *AI)
1794           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1795       }
1796 
1797       VMap[&*I] = ActualArg;
1798     }
1799 
1800     // TODO: Remove this when users have been updated to the assume bundles.
1801     // Add alignment assumptions if necessary. We do this before the inlined
1802     // instructions are actually cloned into the caller so that we can easily
1803     // check what will be known at the start of the inlined code.
1804     AddAlignmentAssumptions(CS, IFI);
1805 
1806     /// Preserve all attributes on of the call and its parameters.
1807     if (Instruction *Assume = buildAssumeFromInst(CS.getInstruction()))
1808       Assume->insertBefore(CS.getInstruction());
1809 
1810     // We want the inliner to prune the code as it copies.  We would LOVE to
1811     // have no dead or constant instructions leftover after inlining occurs
1812     // (which can happen, e.g., because an argument was constant), but we'll be
1813     // happy with whatever the cloner can do.
1814     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1815                               /*ModuleLevelChanges=*/false, Returns, ".i",
1816                               &InlinedFunctionInfo, TheCall);
1817     // Remember the first block that is newly cloned over.
1818     FirstNewBlock = LastBlock; ++FirstNewBlock;
1819 
1820     if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1821       // Update the BFI of blocks cloned into the caller.
1822       updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1823                       CalledFunc->front());
1824 
1825     updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1826                       IFI.PSI, IFI.CallerBFI);
1827 
1828     // Inject byval arguments initialization.
1829     for (std::pair<Value*, Value*> &Init : ByValInit)
1830       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1831                               &*FirstNewBlock, IFI);
1832 
1833     Optional<OperandBundleUse> ParentDeopt =
1834         CS.getOperandBundle(LLVMContext::OB_deopt);
1835     if (ParentDeopt) {
1836       SmallVector<OperandBundleDef, 2> OpDefs;
1837 
1838       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1839         Instruction *I = dyn_cast_or_null<Instruction>(VH);
1840         if (!I) continue;  // instruction was DCE'd or RAUW'ed to undef
1841 
1842         OpDefs.clear();
1843 
1844         CallSite ICS(I);
1845         OpDefs.reserve(ICS.getNumOperandBundles());
1846 
1847         for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1848           auto ChildOB = ICS.getOperandBundleAt(i);
1849           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1850             // If the inlined call has other operand bundles, let them be
1851             OpDefs.emplace_back(ChildOB);
1852             continue;
1853           }
1854 
1855           // It may be useful to separate this logic (of handling operand
1856           // bundles) out to a separate "policy" component if this gets crowded.
1857           // Prepend the parent's deoptimization continuation to the newly
1858           // inlined call's deoptimization continuation.
1859           std::vector<Value *> MergedDeoptArgs;
1860           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1861                                   ChildOB.Inputs.size());
1862 
1863           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1864                                  ParentDeopt->Inputs.begin(),
1865                                  ParentDeopt->Inputs.end());
1866           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1867                                  ChildOB.Inputs.end());
1868 
1869           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1870         }
1871 
1872         Instruction *NewI = nullptr;
1873         if (isa<CallInst>(I))
1874           NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1875         else if (isa<CallBrInst>(I))
1876           NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I);
1877         else
1878           NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1879 
1880         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1881         // this even if the call returns void.
1882         I->replaceAllUsesWith(NewI);
1883 
1884         VH = nullptr;
1885         I->eraseFromParent();
1886       }
1887     }
1888 
1889     // Update the callgraph if requested.
1890     if (IFI.CG)
1891       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1892 
1893     // For 'nodebug' functions, the associated DISubprogram is always null.
1894     // Conservatively avoid propagating the callsite debug location to
1895     // instructions inlined from a function whose DISubprogram is not null.
1896     fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1897                      CalledFunc->getSubprogram() != nullptr);
1898 
1899     // Clone existing noalias metadata if necessary.
1900     CloneAliasScopeMetadata(CS, VMap);
1901 
1902     // Add noalias metadata if necessary.
1903     AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1904 
1905     // Clone return attributes on the callsite into the calls within the inlined
1906     // function which feed into its return value.
1907     AddReturnAttributes(CS, VMap);
1908 
1909     // Propagate llvm.mem.parallel_loop_access if necessary.
1910     PropagateParallelLoopAccessMetadata(CS, VMap);
1911 
1912     // Register any cloned assumptions.
1913     if (IFI.GetAssumptionCache)
1914       for (BasicBlock &NewBlock :
1915            make_range(FirstNewBlock->getIterator(), Caller->end()))
1916         for (Instruction &I : NewBlock) {
1917           if (auto *II = dyn_cast<IntrinsicInst>(&I))
1918             if (II->getIntrinsicID() == Intrinsic::assume)
1919               (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1920         }
1921   }
1922 
1923   // If there are any alloca instructions in the block that used to be the entry
1924   // block for the callee, move them to the entry block of the caller.  First
1925   // calculate which instruction they should be inserted before.  We insert the
1926   // instructions at the end of the current alloca list.
1927   {
1928     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1929     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1930          E = FirstNewBlock->end(); I != E; ) {
1931       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1932       if (!AI) continue;
1933 
1934       // If the alloca is now dead, remove it.  This often occurs due to code
1935       // specialization.
1936       if (AI->use_empty()) {
1937         AI->eraseFromParent();
1938         continue;
1939       }
1940 
1941       if (!allocaWouldBeStaticInEntry(AI))
1942         continue;
1943 
1944       // Keep track of the static allocas that we inline into the caller.
1945       IFI.StaticAllocas.push_back(AI);
1946 
1947       // Scan for the block of allocas that we can move over, and move them
1948       // all at once.
1949       while (isa<AllocaInst>(I) &&
1950              !cast<AllocaInst>(I)->use_empty() &&
1951              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1952         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1953         ++I;
1954       }
1955 
1956       // Transfer all of the allocas over in a block.  Using splice means
1957       // that the instructions aren't removed from the symbol table, then
1958       // reinserted.
1959       Caller->getEntryBlock().getInstList().splice(
1960           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1961     }
1962   }
1963 
1964   SmallVector<Value*,4> VarArgsToForward;
1965   SmallVector<AttributeSet, 4> VarArgsAttrs;
1966   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1967        i < CS.getNumArgOperands(); i++) {
1968     VarArgsToForward.push_back(CS.getArgOperand(i));
1969     VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i));
1970   }
1971 
1972   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1973   if (InlinedFunctionInfo.ContainsCalls) {
1974     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1975     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1976       CallSiteTailKind = CI->getTailCallKind();
1977 
1978     // For inlining purposes, the "notail" marker is the same as no marker.
1979     if (CallSiteTailKind == CallInst::TCK_NoTail)
1980       CallSiteTailKind = CallInst::TCK_None;
1981 
1982     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1983          ++BB) {
1984       for (auto II = BB->begin(); II != BB->end();) {
1985         Instruction &I = *II++;
1986         CallInst *CI = dyn_cast<CallInst>(&I);
1987         if (!CI)
1988           continue;
1989 
1990         // Forward varargs from inlined call site to calls to the
1991         // ForwardVarArgsTo function, if requested, and to musttail calls.
1992         if (!VarArgsToForward.empty() &&
1993             ((ForwardVarArgsTo &&
1994               CI->getCalledFunction() == ForwardVarArgsTo) ||
1995              CI->isMustTailCall())) {
1996           // Collect attributes for non-vararg parameters.
1997           AttributeList Attrs = CI->getAttributes();
1998           SmallVector<AttributeSet, 8> ArgAttrs;
1999           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2000             for (unsigned ArgNo = 0;
2001                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2002               ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2003           }
2004 
2005           // Add VarArg attributes.
2006           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2007           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2008                                      Attrs.getRetAttributes(), ArgAttrs);
2009           // Add VarArgs to existing parameters.
2010           SmallVector<Value *, 6> Params(CI->arg_operands());
2011           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2012           CallInst *NewCI = CallInst::Create(
2013               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2014           NewCI->setDebugLoc(CI->getDebugLoc());
2015           NewCI->setAttributes(Attrs);
2016           NewCI->setCallingConv(CI->getCallingConv());
2017           CI->replaceAllUsesWith(NewCI);
2018           CI->eraseFromParent();
2019           CI = NewCI;
2020         }
2021 
2022         if (Function *F = CI->getCalledFunction())
2023           InlinedDeoptimizeCalls |=
2024               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2025 
2026         // We need to reduce the strength of any inlined tail calls.  For
2027         // musttail, we have to avoid introducing potential unbounded stack
2028         // growth.  For example, if functions 'f' and 'g' are mutually recursive
2029         // with musttail, we can inline 'g' into 'f' so long as we preserve
2030         // musttail on the cloned call to 'f'.  If either the inlined call site
2031         // or the cloned call site is *not* musttail, the program already has
2032         // one frame of stack growth, so it's safe to remove musttail.  Here is
2033         // a table of example transformations:
2034         //
2035         //    f -> musttail g -> musttail f  ==>  f -> musttail f
2036         //    f -> musttail g ->     tail f  ==>  f ->     tail f
2037         //    f ->          g -> musttail f  ==>  f ->          f
2038         //    f ->          g ->     tail f  ==>  f ->          f
2039         //
2040         // Inlined notail calls should remain notail calls.
2041         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2042         if (ChildTCK != CallInst::TCK_NoTail)
2043           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2044         CI->setTailCallKind(ChildTCK);
2045         InlinedMustTailCalls |= CI->isMustTailCall();
2046 
2047         // Calls inlined through a 'nounwind' call site should be marked
2048         // 'nounwind'.
2049         if (MarkNoUnwind)
2050           CI->setDoesNotThrow();
2051       }
2052     }
2053   }
2054 
2055   // Leave lifetime markers for the static alloca's, scoping them to the
2056   // function we just inlined.
2057   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2058     IRBuilder<> builder(&FirstNewBlock->front());
2059     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2060       AllocaInst *AI = IFI.StaticAllocas[ai];
2061       // Don't mark swifterror allocas. They can't have bitcast uses.
2062       if (AI->isSwiftError())
2063         continue;
2064 
2065       // If the alloca is already scoped to something smaller than the whole
2066       // function then there's no need to add redundant, less accurate markers.
2067       if (hasLifetimeMarkers(AI))
2068         continue;
2069 
2070       // Try to determine the size of the allocation.
2071       ConstantInt *AllocaSize = nullptr;
2072       if (ConstantInt *AIArraySize =
2073           dyn_cast<ConstantInt>(AI->getArraySize())) {
2074         auto &DL = Caller->getParent()->getDataLayout();
2075         Type *AllocaType = AI->getAllocatedType();
2076         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2077         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2078 
2079         // Don't add markers for zero-sized allocas.
2080         if (AllocaArraySize == 0)
2081           continue;
2082 
2083         // Check that array size doesn't saturate uint64_t and doesn't
2084         // overflow when it's multiplied by type size.
2085         if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2086             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2087                 AllocaTypeSize) {
2088           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2089                                         AllocaArraySize * AllocaTypeSize);
2090         }
2091       }
2092 
2093       builder.CreateLifetimeStart(AI, AllocaSize);
2094       for (ReturnInst *RI : Returns) {
2095         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2096         // call and a return.  The return kills all local allocas.
2097         if (InlinedMustTailCalls &&
2098             RI->getParent()->getTerminatingMustTailCall())
2099           continue;
2100         if (InlinedDeoptimizeCalls &&
2101             RI->getParent()->getTerminatingDeoptimizeCall())
2102           continue;
2103         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2104       }
2105     }
2106   }
2107 
2108   // If the inlined code contained dynamic alloca instructions, wrap the inlined
2109   // code with llvm.stacksave/llvm.stackrestore intrinsics.
2110   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2111     Module *M = Caller->getParent();
2112     // Get the two intrinsics we care about.
2113     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2114     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2115 
2116     // Insert the llvm.stacksave.
2117     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2118                              .CreateCall(StackSave, {}, "savedstack");
2119 
2120     // Insert a call to llvm.stackrestore before any return instructions in the
2121     // inlined function.
2122     for (ReturnInst *RI : Returns) {
2123       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2124       // call and a return.  The return will restore the stack pointer.
2125       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2126         continue;
2127       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2128         continue;
2129       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2130     }
2131   }
2132 
2133   // If we are inlining for an invoke instruction, we must make sure to rewrite
2134   // any call instructions into invoke instructions.  This is sensitive to which
2135   // funclet pads were top-level in the inlinee, so must be done before
2136   // rewriting the "parent pad" links.
2137   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
2138     BasicBlock *UnwindDest = II->getUnwindDest();
2139     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2140     if (isa<LandingPadInst>(FirstNonPHI)) {
2141       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2142     } else {
2143       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2144     }
2145   }
2146 
2147   // Update the lexical scopes of the new funclets and callsites.
2148   // Anything that had 'none' as its parent is now nested inside the callsite's
2149   // EHPad.
2150 
2151   if (CallSiteEHPad) {
2152     for (Function::iterator BB = FirstNewBlock->getIterator(),
2153                             E = Caller->end();
2154          BB != E; ++BB) {
2155       // Add bundle operands to any top-level call sites.
2156       SmallVector<OperandBundleDef, 1> OpBundles;
2157       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2158         Instruction *I = &*BBI++;
2159         CallSite CS(I);
2160         if (!CS)
2161           continue;
2162 
2163         // Skip call sites which are nounwind intrinsics.
2164         auto *CalledFn =
2165             dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2166         if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
2167           continue;
2168 
2169         // Skip call sites which already have a "funclet" bundle.
2170         if (CS.getOperandBundle(LLVMContext::OB_funclet))
2171           continue;
2172 
2173         CS.getOperandBundlesAsDefs(OpBundles);
2174         OpBundles.emplace_back("funclet", CallSiteEHPad);
2175 
2176         Instruction *NewInst;
2177         if (CS.isCall())
2178           NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2179         else if (CS.isCallBr())
2180           NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I);
2181         else
2182           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2183         NewInst->takeName(I);
2184         I->replaceAllUsesWith(NewInst);
2185         I->eraseFromParent();
2186 
2187         OpBundles.clear();
2188       }
2189 
2190       // It is problematic if the inlinee has a cleanupret which unwinds to
2191       // caller and we inline it into a call site which doesn't unwind but into
2192       // an EH pad that does.  Such an edge must be dynamically unreachable.
2193       // As such, we replace the cleanupret with unreachable.
2194       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2195         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2196           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2197 
2198       Instruction *I = BB->getFirstNonPHI();
2199       if (!I->isEHPad())
2200         continue;
2201 
2202       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2203         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2204           CatchSwitch->setParentPad(CallSiteEHPad);
2205       } else {
2206         auto *FPI = cast<FuncletPadInst>(I);
2207         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2208           FPI->setParentPad(CallSiteEHPad);
2209       }
2210     }
2211   }
2212 
2213   if (InlinedDeoptimizeCalls) {
2214     // We need to at least remove the deoptimizing returns from the Return set,
2215     // so that the control flow from those returns does not get merged into the
2216     // caller (but terminate it instead).  If the caller's return type does not
2217     // match the callee's return type, we also need to change the return type of
2218     // the intrinsic.
2219     if (Caller->getReturnType() == TheCall->getType()) {
2220       auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2221         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2222       });
2223       Returns.erase(NewEnd, Returns.end());
2224     } else {
2225       SmallVector<ReturnInst *, 8> NormalReturns;
2226       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2227           Caller->getParent(), Intrinsic::experimental_deoptimize,
2228           {Caller->getReturnType()});
2229 
2230       for (ReturnInst *RI : Returns) {
2231         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2232         if (!DeoptCall) {
2233           NormalReturns.push_back(RI);
2234           continue;
2235         }
2236 
2237         // The calling convention on the deoptimize call itself may be bogus,
2238         // since the code we're inlining may have undefined behavior (and may
2239         // never actually execute at runtime); but all
2240         // @llvm.experimental.deoptimize declarations have to have the same
2241         // calling convention in a well-formed module.
2242         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2243         NewDeoptIntrinsic->setCallingConv(CallingConv);
2244         auto *CurBB = RI->getParent();
2245         RI->eraseFromParent();
2246 
2247         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2248                                          DeoptCall->arg_end());
2249 
2250         SmallVector<OperandBundleDef, 1> OpBundles;
2251         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2252         DeoptCall->eraseFromParent();
2253         assert(!OpBundles.empty() &&
2254                "Expected at least the deopt operand bundle");
2255 
2256         IRBuilder<> Builder(CurBB);
2257         CallInst *NewDeoptCall =
2258             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2259         NewDeoptCall->setCallingConv(CallingConv);
2260         if (NewDeoptCall->getType()->isVoidTy())
2261           Builder.CreateRetVoid();
2262         else
2263           Builder.CreateRet(NewDeoptCall);
2264       }
2265 
2266       // Leave behind the normal returns so we can merge control flow.
2267       std::swap(Returns, NormalReturns);
2268     }
2269   }
2270 
2271   // Handle any inlined musttail call sites.  In order for a new call site to be
2272   // musttail, the source of the clone and the inlined call site must have been
2273   // musttail.  Therefore it's safe to return without merging control into the
2274   // phi below.
2275   if (InlinedMustTailCalls) {
2276     // Check if we need to bitcast the result of any musttail calls.
2277     Type *NewRetTy = Caller->getReturnType();
2278     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2279 
2280     // Handle the returns preceded by musttail calls separately.
2281     SmallVector<ReturnInst *, 8> NormalReturns;
2282     for (ReturnInst *RI : Returns) {
2283       CallInst *ReturnedMustTail =
2284           RI->getParent()->getTerminatingMustTailCall();
2285       if (!ReturnedMustTail) {
2286         NormalReturns.push_back(RI);
2287         continue;
2288       }
2289       if (!NeedBitCast)
2290         continue;
2291 
2292       // Delete the old return and any preceding bitcast.
2293       BasicBlock *CurBB = RI->getParent();
2294       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2295       RI->eraseFromParent();
2296       if (OldCast)
2297         OldCast->eraseFromParent();
2298 
2299       // Insert a new bitcast and return with the right type.
2300       IRBuilder<> Builder(CurBB);
2301       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2302     }
2303 
2304     // Leave behind the normal returns so we can merge control flow.
2305     std::swap(Returns, NormalReturns);
2306   }
2307 
2308   // Now that all of the transforms on the inlined code have taken place but
2309   // before we splice the inlined code into the CFG and lose track of which
2310   // blocks were actually inlined, collect the call sites. We only do this if
2311   // call graph updates weren't requested, as those provide value handle based
2312   // tracking of inlined call sites instead.
2313   if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2314     // Otherwise just collect the raw call sites that were inlined.
2315     for (BasicBlock &NewBB :
2316          make_range(FirstNewBlock->getIterator(), Caller->end()))
2317       for (Instruction &I : NewBB)
2318         if (auto CS = CallSite(&I))
2319           IFI.InlinedCallSites.push_back(CS);
2320   }
2321 
2322   // If we cloned in _exactly one_ basic block, and if that block ends in a
2323   // return instruction, we splice the body of the inlined callee directly into
2324   // the calling basic block.
2325   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2326     // Move all of the instructions right before the call.
2327     OrigBB->getInstList().splice(TheCall->getIterator(),
2328                                  FirstNewBlock->getInstList(),
2329                                  FirstNewBlock->begin(), FirstNewBlock->end());
2330     // Remove the cloned basic block.
2331     Caller->getBasicBlockList().pop_back();
2332 
2333     // If the call site was an invoke instruction, add a branch to the normal
2334     // destination.
2335     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2336       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2337       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2338     }
2339 
2340     // If the return instruction returned a value, replace uses of the call with
2341     // uses of the returned value.
2342     if (!TheCall->use_empty()) {
2343       ReturnInst *R = Returns[0];
2344       if (TheCall == R->getReturnValue())
2345         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2346       else
2347         TheCall->replaceAllUsesWith(R->getReturnValue());
2348     }
2349     // Since we are now done with the Call/Invoke, we can delete it.
2350     TheCall->eraseFromParent();
2351 
2352     // Since we are now done with the return instruction, delete it also.
2353     Returns[0]->eraseFromParent();
2354 
2355     // We are now done with the inlining.
2356     return InlineResult::success();
2357   }
2358 
2359   // Otherwise, we have the normal case, of more than one block to inline or
2360   // multiple return sites.
2361 
2362   // We want to clone the entire callee function into the hole between the
2363   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2364   // this is an invoke instruction or a call instruction.
2365   BasicBlock *AfterCallBB;
2366   BranchInst *CreatedBranchToNormalDest = nullptr;
2367   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2368 
2369     // Add an unconditional branch to make this look like the CallInst case...
2370     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2371 
2372     // Split the basic block.  This guarantees that no PHI nodes will have to be
2373     // updated due to new incoming edges, and make the invoke case more
2374     // symmetric to the call case.
2375     AfterCallBB =
2376         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2377                                 CalledFunc->getName() + ".exit");
2378 
2379   } else {  // It's a call
2380     // If this is a call instruction, we need to split the basic block that
2381     // the call lives in.
2382     //
2383     AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2384                                           CalledFunc->getName() + ".exit");
2385   }
2386 
2387   if (IFI.CallerBFI) {
2388     // Copy original BB's block frequency to AfterCallBB
2389     IFI.CallerBFI->setBlockFreq(
2390         AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2391   }
2392 
2393   // Change the branch that used to go to AfterCallBB to branch to the first
2394   // basic block of the inlined function.
2395   //
2396   Instruction *Br = OrigBB->getTerminator();
2397   assert(Br && Br->getOpcode() == Instruction::Br &&
2398          "splitBasicBlock broken!");
2399   Br->setOperand(0, &*FirstNewBlock);
2400 
2401   // Now that the function is correct, make it a little bit nicer.  In
2402   // particular, move the basic blocks inserted from the end of the function
2403   // into the space made by splitting the source basic block.
2404   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2405                                      Caller->getBasicBlockList(), FirstNewBlock,
2406                                      Caller->end());
2407 
2408   // Handle all of the return instructions that we just cloned in, and eliminate
2409   // any users of the original call/invoke instruction.
2410   Type *RTy = CalledFunc->getReturnType();
2411 
2412   PHINode *PHI = nullptr;
2413   if (Returns.size() > 1) {
2414     // The PHI node should go at the front of the new basic block to merge all
2415     // possible incoming values.
2416     if (!TheCall->use_empty()) {
2417       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2418                             &AfterCallBB->front());
2419       // Anything that used the result of the function call should now use the
2420       // PHI node as their operand.
2421       TheCall->replaceAllUsesWith(PHI);
2422     }
2423 
2424     // Loop over all of the return instructions adding entries to the PHI node
2425     // as appropriate.
2426     if (PHI) {
2427       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2428         ReturnInst *RI = Returns[i];
2429         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2430                "Ret value not consistent in function!");
2431         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2432       }
2433     }
2434 
2435     // Add a branch to the merge points and remove return instructions.
2436     DebugLoc Loc;
2437     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2438       ReturnInst *RI = Returns[i];
2439       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2440       Loc = RI->getDebugLoc();
2441       BI->setDebugLoc(Loc);
2442       RI->eraseFromParent();
2443     }
2444     // We need to set the debug location to *somewhere* inside the
2445     // inlined function. The line number may be nonsensical, but the
2446     // instruction will at least be associated with the right
2447     // function.
2448     if (CreatedBranchToNormalDest)
2449       CreatedBranchToNormalDest->setDebugLoc(Loc);
2450   } else if (!Returns.empty()) {
2451     // Otherwise, if there is exactly one return value, just replace anything
2452     // using the return value of the call with the computed value.
2453     if (!TheCall->use_empty()) {
2454       if (TheCall == Returns[0]->getReturnValue())
2455         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2456       else
2457         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2458     }
2459 
2460     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2461     BasicBlock *ReturnBB = Returns[0]->getParent();
2462     ReturnBB->replaceAllUsesWith(AfterCallBB);
2463 
2464     // Splice the code from the return block into the block that it will return
2465     // to, which contains the code that was after the call.
2466     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2467                                       ReturnBB->getInstList());
2468 
2469     if (CreatedBranchToNormalDest)
2470       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2471 
2472     // Delete the return instruction now and empty ReturnBB now.
2473     Returns[0]->eraseFromParent();
2474     ReturnBB->eraseFromParent();
2475   } else if (!TheCall->use_empty()) {
2476     // No returns, but something is using the return value of the call.  Just
2477     // nuke the result.
2478     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2479   }
2480 
2481   // Since we are now done with the Call/Invoke, we can delete it.
2482   TheCall->eraseFromParent();
2483 
2484   // If we inlined any musttail calls and the original return is now
2485   // unreachable, delete it.  It can only contain a bitcast and ret.
2486   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2487     AfterCallBB->eraseFromParent();
2488 
2489   // We should always be able to fold the entry block of the function into the
2490   // single predecessor of the block...
2491   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2492   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2493 
2494   // Splice the code entry block into calling block, right before the
2495   // unconditional branch.
2496   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2497   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2498 
2499   // Remove the unconditional branch.
2500   OrigBB->getInstList().erase(Br);
2501 
2502   // Now we can remove the CalleeEntry block, which is now empty.
2503   Caller->getBasicBlockList().erase(CalleeEntry);
2504 
2505   // If we inserted a phi node, check to see if it has a single value (e.g. all
2506   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2507   // block other optimizations.
2508   if (PHI) {
2509     AssumptionCache *AC =
2510         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2511     auto &DL = Caller->getParent()->getDataLayout();
2512     if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2513       PHI->replaceAllUsesWith(V);
2514       PHI->eraseFromParent();
2515     }
2516   }
2517 
2518   return InlineResult::success();
2519 }
2520