1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DIBuilder.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Intrinsics.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/MDBuilder.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76 
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79   cl::Hidden,
80   cl::desc("Convert noalias attributes to metadata during inlining."));
81 
82 // Disabled by default, because the added alignment assumptions may increase
83 // compile-time and block optimizations. This option is not suitable for use
84 // with frontends that emit comprehensive parameter alignment annotations.
85 static cl::opt<bool>
86 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
87   cl::init(false), cl::Hidden,
88   cl::desc("Convert align attributes to assumptions during inlining."));
89 
90 static cl::opt<bool> UpdateReturnAttributes(
91         "update-return-attrs", cl::init(true), cl::Hidden,
92             cl::desc("Update return attributes on calls within inlined body"));
93 
94 static cl::opt<unsigned> InlinerAttributeWindow(
95     "max-inst-checked-for-throw-during-inlining", cl::Hidden,
96     cl::desc("the maximum number of instructions analyzed for may throw during "
97              "attribute inference in inlined body"),
98     cl::init(4));
99 
100 namespace {
101 
102   /// A class for recording information about inlining a landing pad.
103   class LandingPadInliningInfo {
104     /// Destination of the invoke's unwind.
105     BasicBlock *OuterResumeDest;
106 
107     /// Destination for the callee's resume.
108     BasicBlock *InnerResumeDest = nullptr;
109 
110     /// LandingPadInst associated with the invoke.
111     LandingPadInst *CallerLPad = nullptr;
112 
113     /// PHI for EH values from landingpad insts.
114     PHINode *InnerEHValuesPHI = nullptr;
115 
116     SmallVector<Value*, 8> UnwindDestPHIValues;
117 
118   public:
119     LandingPadInliningInfo(InvokeInst *II)
120         : OuterResumeDest(II->getUnwindDest()) {
121       // If there are PHI nodes in the unwind destination block, we need to keep
122       // track of which values came into them from the invoke before removing
123       // the edge from this block.
124       BasicBlock *InvokeBB = II->getParent();
125       BasicBlock::iterator I = OuterResumeDest->begin();
126       for (; isa<PHINode>(I); ++I) {
127         // Save the value to use for this edge.
128         PHINode *PHI = cast<PHINode>(I);
129         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
130       }
131 
132       CallerLPad = cast<LandingPadInst>(I);
133     }
134 
135     /// The outer unwind destination is the target of
136     /// unwind edges introduced for calls within the inlined function.
137     BasicBlock *getOuterResumeDest() const {
138       return OuterResumeDest;
139     }
140 
141     BasicBlock *getInnerResumeDest();
142 
143     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
144 
145     /// Forward the 'resume' instruction to the caller's landing pad block.
146     /// When the landing pad block has only one predecessor, this is
147     /// a simple branch. When there is more than one predecessor, we need to
148     /// split the landing pad block after the landingpad instruction and jump
149     /// to there.
150     void forwardResume(ResumeInst *RI,
151                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
152 
153     /// Add incoming-PHI values to the unwind destination block for the given
154     /// basic block, using the values for the original invoke's source block.
155     void addIncomingPHIValuesFor(BasicBlock *BB) const {
156       addIncomingPHIValuesForInto(BB, OuterResumeDest);
157     }
158 
159     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
160       BasicBlock::iterator I = dest->begin();
161       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
162         PHINode *phi = cast<PHINode>(I);
163         phi->addIncoming(UnwindDestPHIValues[i], src);
164       }
165     }
166   };
167 
168 } // end anonymous namespace
169 
170 /// Get or create a target for the branch from ResumeInsts.
171 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
172   if (InnerResumeDest) return InnerResumeDest;
173 
174   // Split the landing pad.
175   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
176   InnerResumeDest =
177     OuterResumeDest->splitBasicBlock(SplitPoint,
178                                      OuterResumeDest->getName() + ".body");
179 
180   // The number of incoming edges we expect to the inner landing pad.
181   const unsigned PHICapacity = 2;
182 
183   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
184   Instruction *InsertPoint = &InnerResumeDest->front();
185   BasicBlock::iterator I = OuterResumeDest->begin();
186   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
187     PHINode *OuterPHI = cast<PHINode>(I);
188     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
189                                         OuterPHI->getName() + ".lpad-body",
190                                         InsertPoint);
191     OuterPHI->replaceAllUsesWith(InnerPHI);
192     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
193   }
194 
195   // Create a PHI for the exception values.
196   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
197                                      "eh.lpad-body", InsertPoint);
198   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
199   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
200 
201   // All done.
202   return InnerResumeDest;
203 }
204 
205 /// Forward the 'resume' instruction to the caller's landing pad block.
206 /// When the landing pad block has only one predecessor, this is a simple
207 /// branch. When there is more than one predecessor, we need to split the
208 /// landing pad block after the landingpad instruction and jump to there.
209 void LandingPadInliningInfo::forwardResume(
210     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
211   BasicBlock *Dest = getInnerResumeDest();
212   BasicBlock *Src = RI->getParent();
213 
214   BranchInst::Create(Dest, Src);
215 
216   // Update the PHIs in the destination. They were inserted in an order which
217   // makes this work.
218   addIncomingPHIValuesForInto(Src, Dest);
219 
220   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
221   RI->eraseFromParent();
222 }
223 
224 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
225 static Value *getParentPad(Value *EHPad) {
226   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
227     return FPI->getParentPad();
228   return cast<CatchSwitchInst>(EHPad)->getParentPad();
229 }
230 
231 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
232 
233 /// Helper for getUnwindDestToken that does the descendant-ward part of
234 /// the search.
235 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
236                                        UnwindDestMemoTy &MemoMap) {
237   SmallVector<Instruction *, 8> Worklist(1, EHPad);
238 
239   while (!Worklist.empty()) {
240     Instruction *CurrentPad = Worklist.pop_back_val();
241     // We only put pads on the worklist that aren't in the MemoMap.  When
242     // we find an unwind dest for a pad we may update its ancestors, but
243     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
244     // so they should never get updated while queued on the worklist.
245     assert(!MemoMap.count(CurrentPad));
246     Value *UnwindDestToken = nullptr;
247     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
248       if (CatchSwitch->hasUnwindDest()) {
249         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
250       } else {
251         // Catchswitch doesn't have a 'nounwind' variant, and one might be
252         // annotated as "unwinds to caller" when really it's nounwind (see
253         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
254         // parent's unwind dest from this.  We can check its catchpads'
255         // descendants, since they might include a cleanuppad with an
256         // "unwinds to caller" cleanupret, which can be trusted.
257         for (auto HI = CatchSwitch->handler_begin(),
258                   HE = CatchSwitch->handler_end();
259              HI != HE && !UnwindDestToken; ++HI) {
260           BasicBlock *HandlerBlock = *HI;
261           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
262           for (User *Child : CatchPad->users()) {
263             // Intentionally ignore invokes here -- since the catchswitch is
264             // marked "unwind to caller", it would be a verifier error if it
265             // contained an invoke which unwinds out of it, so any invoke we'd
266             // encounter must unwind to some child of the catch.
267             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
268               continue;
269 
270             Instruction *ChildPad = cast<Instruction>(Child);
271             auto Memo = MemoMap.find(ChildPad);
272             if (Memo == MemoMap.end()) {
273               // Haven't figured out this child pad yet; queue it.
274               Worklist.push_back(ChildPad);
275               continue;
276             }
277             // We've already checked this child, but might have found that
278             // it offers no proof either way.
279             Value *ChildUnwindDestToken = Memo->second;
280             if (!ChildUnwindDestToken)
281               continue;
282             // We already know the child's unwind dest, which can either
283             // be ConstantTokenNone to indicate unwind to caller, or can
284             // be another child of the catchpad.  Only the former indicates
285             // the unwind dest of the catchswitch.
286             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
287               UnwindDestToken = ChildUnwindDestToken;
288               break;
289             }
290             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
291           }
292         }
293       }
294     } else {
295       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
296       for (User *U : CleanupPad->users()) {
297         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
298           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
299             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
300           else
301             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
302           break;
303         }
304         Value *ChildUnwindDestToken;
305         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
306           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
307         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
308           Instruction *ChildPad = cast<Instruction>(U);
309           auto Memo = MemoMap.find(ChildPad);
310           if (Memo == MemoMap.end()) {
311             // Haven't resolved this child yet; queue it and keep searching.
312             Worklist.push_back(ChildPad);
313             continue;
314           }
315           // We've checked this child, but still need to ignore it if it
316           // had no proof either way.
317           ChildUnwindDestToken = Memo->second;
318           if (!ChildUnwindDestToken)
319             continue;
320         } else {
321           // Not a relevant user of the cleanuppad
322           continue;
323         }
324         // In a well-formed program, the child/invoke must either unwind to
325         // an(other) child of the cleanup, or exit the cleanup.  In the
326         // first case, continue searching.
327         if (isa<Instruction>(ChildUnwindDestToken) &&
328             getParentPad(ChildUnwindDestToken) == CleanupPad)
329           continue;
330         UnwindDestToken = ChildUnwindDestToken;
331         break;
332       }
333     }
334     // If we haven't found an unwind dest for CurrentPad, we may have queued its
335     // children, so move on to the next in the worklist.
336     if (!UnwindDestToken)
337       continue;
338 
339     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
340     // any ancestors of CurrentPad up to but not including UnwindDestToken's
341     // parent pad.  Record this in the memo map, and check to see if the
342     // original EHPad being queried is one of the ones exited.
343     Value *UnwindParent;
344     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
345       UnwindParent = getParentPad(UnwindPad);
346     else
347       UnwindParent = nullptr;
348     bool ExitedOriginalPad = false;
349     for (Instruction *ExitedPad = CurrentPad;
350          ExitedPad && ExitedPad != UnwindParent;
351          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
352       // Skip over catchpads since they just follow their catchswitches.
353       if (isa<CatchPadInst>(ExitedPad))
354         continue;
355       MemoMap[ExitedPad] = UnwindDestToken;
356       ExitedOriginalPad |= (ExitedPad == EHPad);
357     }
358 
359     if (ExitedOriginalPad)
360       return UnwindDestToken;
361 
362     // Continue the search.
363   }
364 
365   // No definitive information is contained within this funclet.
366   return nullptr;
367 }
368 
369 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
370 /// return that pad instruction.  If it unwinds to caller, return
371 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
372 /// return nullptr.
373 ///
374 /// This routine gets invoked for calls in funclets in inlinees when inlining
375 /// an invoke.  Since many funclets don't have calls inside them, it's queried
376 /// on-demand rather than building a map of pads to unwind dests up front.
377 /// Determining a funclet's unwind dest may require recursively searching its
378 /// descendants, and also ancestors and cousins if the descendants don't provide
379 /// an answer.  Since most funclets will have their unwind dest immediately
380 /// available as the unwind dest of a catchswitch or cleanupret, this routine
381 /// searches top-down from the given pad and then up. To avoid worst-case
382 /// quadratic run-time given that approach, it uses a memo map to avoid
383 /// re-processing funclet trees.  The callers that rewrite the IR as they go
384 /// take advantage of this, for correctness, by checking/forcing rewritten
385 /// pads' entries to match the original callee view.
386 static Value *getUnwindDestToken(Instruction *EHPad,
387                                  UnwindDestMemoTy &MemoMap) {
388   // Catchpads unwind to the same place as their catchswitch;
389   // redirct any queries on catchpads so the code below can
390   // deal with just catchswitches and cleanuppads.
391   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
392     EHPad = CPI->getCatchSwitch();
393 
394   // Check if we've already determined the unwind dest for this pad.
395   auto Memo = MemoMap.find(EHPad);
396   if (Memo != MemoMap.end())
397     return Memo->second;
398 
399   // Search EHPad and, if necessary, its descendants.
400   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
401   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
402   if (UnwindDestToken)
403     return UnwindDestToken;
404 
405   // No information is available for this EHPad from itself or any of its
406   // descendants.  An unwind all the way out to a pad in the caller would
407   // need also to agree with the unwind dest of the parent funclet, so
408   // search up the chain to try to find a funclet with information.  Put
409   // null entries in the memo map to avoid re-processing as we go up.
410   MemoMap[EHPad] = nullptr;
411 #ifndef NDEBUG
412   SmallPtrSet<Instruction *, 4> TempMemos;
413   TempMemos.insert(EHPad);
414 #endif
415   Instruction *LastUselessPad = EHPad;
416   Value *AncestorToken;
417   for (AncestorToken = getParentPad(EHPad);
418        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
419        AncestorToken = getParentPad(AncestorToken)) {
420     // Skip over catchpads since they just follow their catchswitches.
421     if (isa<CatchPadInst>(AncestorPad))
422       continue;
423     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
424     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
425     // call to getUnwindDestToken, that would mean that AncestorPad had no
426     // information in itself, its descendants, or its ancestors.  If that
427     // were the case, then we should also have recorded the lack of information
428     // for the descendant that we're coming from.  So assert that we don't
429     // find a null entry in the MemoMap for AncestorPad.
430     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
431     auto AncestorMemo = MemoMap.find(AncestorPad);
432     if (AncestorMemo == MemoMap.end()) {
433       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
434     } else {
435       UnwindDestToken = AncestorMemo->second;
436     }
437     if (UnwindDestToken)
438       break;
439     LastUselessPad = AncestorPad;
440     MemoMap[LastUselessPad] = nullptr;
441 #ifndef NDEBUG
442     TempMemos.insert(LastUselessPad);
443 #endif
444   }
445 
446   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
447   // returned nullptr (and likewise for EHPad and any of its ancestors up to
448   // LastUselessPad), so LastUselessPad has no information from below.  Since
449   // getUnwindDestTokenHelper must investigate all downward paths through
450   // no-information nodes to prove that a node has no information like this,
451   // and since any time it finds information it records it in the MemoMap for
452   // not just the immediately-containing funclet but also any ancestors also
453   // exited, it must be the case that, walking downward from LastUselessPad,
454   // visiting just those nodes which have not been mapped to an unwind dest
455   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
456   // they are just used to keep getUnwindDestTokenHelper from repeating work),
457   // any node visited must have been exhaustively searched with no information
458   // for it found.
459   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
460   while (!Worklist.empty()) {
461     Instruction *UselessPad = Worklist.pop_back_val();
462     auto Memo = MemoMap.find(UselessPad);
463     if (Memo != MemoMap.end() && Memo->second) {
464       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
465       // that it is a funclet that does have information about unwinding to
466       // a particular destination; its parent was a useless pad.
467       // Since its parent has no information, the unwind edge must not escape
468       // the parent, and must target a sibling of this pad.  This local unwind
469       // gives us no information about EHPad.  Leave it and the subtree rooted
470       // at it alone.
471       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
472       continue;
473     }
474     // We know we don't have information for UselesPad.  If it has an entry in
475     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
476     // added on this invocation of getUnwindDestToken; if a previous invocation
477     // recorded nullptr, it would have had to prove that the ancestors of
478     // UselessPad, which include LastUselessPad, had no information, and that
479     // in turn would have required proving that the descendants of
480     // LastUselesPad, which include EHPad, have no information about
481     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
482     // the MemoMap on that invocation, which isn't the case if we got here.
483     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
484     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
485     // information that we'd be contradicting by making a map entry for it
486     // (which is something that getUnwindDestTokenHelper must have proved for
487     // us to get here).  Just assert on is direct users here; the checks in
488     // this downward walk at its descendants will verify that they don't have
489     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
490     // unwind edges or unwind to a sibling).
491     MemoMap[UselessPad] = UnwindDestToken;
492     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
493       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
494       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
495         auto *CatchPad = HandlerBlock->getFirstNonPHI();
496         for (User *U : CatchPad->users()) {
497           assert(
498               (!isa<InvokeInst>(U) ||
499                (getParentPad(
500                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
501                 CatchPad)) &&
502               "Expected useless pad");
503           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
504             Worklist.push_back(cast<Instruction>(U));
505         }
506       }
507     } else {
508       assert(isa<CleanupPadInst>(UselessPad));
509       for (User *U : UselessPad->users()) {
510         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
511         assert((!isa<InvokeInst>(U) ||
512                 (getParentPad(
513                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
514                  UselessPad)) &&
515                "Expected useless pad");
516         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
517           Worklist.push_back(cast<Instruction>(U));
518       }
519     }
520   }
521 
522   return UnwindDestToken;
523 }
524 
525 /// When we inline a basic block into an invoke,
526 /// we have to turn all of the calls that can throw into invokes.
527 /// This function analyze BB to see if there are any calls, and if so,
528 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
529 /// nodes in that block with the values specified in InvokeDestPHIValues.
530 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
531     BasicBlock *BB, BasicBlock *UnwindEdge,
532     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
533   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
534     Instruction *I = &*BBI++;
535 
536     // We only need to check for function calls: inlined invoke
537     // instructions require no special handling.
538     CallInst *CI = dyn_cast<CallInst>(I);
539 
540     if (!CI || CI->doesNotThrow() || CI->isInlineAsm())
541       continue;
542 
543     // We do not need to (and in fact, cannot) convert possibly throwing calls
544     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
545     // invokes.  The caller's "segment" of the deoptimization continuation
546     // attached to the newly inlined @llvm.experimental_deoptimize
547     // (resp. @llvm.experimental.guard) call should contain the exception
548     // handling logic, if any.
549     if (auto *F = CI->getCalledFunction())
550       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
551           F->getIntrinsicID() == Intrinsic::experimental_guard)
552         continue;
553 
554     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
555       // This call is nested inside a funclet.  If that funclet has an unwind
556       // destination within the inlinee, then unwinding out of this call would
557       // be UB.  Rewriting this call to an invoke which targets the inlined
558       // invoke's unwind dest would give the call's parent funclet multiple
559       // unwind destinations, which is something that subsequent EH table
560       // generation can't handle and that the veirifer rejects.  So when we
561       // see such a call, leave it as a call.
562       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
563       Value *UnwindDestToken =
564           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
565       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
566         continue;
567 #ifndef NDEBUG
568       Instruction *MemoKey;
569       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
570         MemoKey = CatchPad->getCatchSwitch();
571       else
572         MemoKey = FuncletPad;
573       assert(FuncletUnwindMap->count(MemoKey) &&
574              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
575              "must get memoized to avoid confusing later searches");
576 #endif // NDEBUG
577     }
578 
579     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
580     return BB;
581   }
582   return nullptr;
583 }
584 
585 /// If we inlined an invoke site, we need to convert calls
586 /// in the body of the inlined function into invokes.
587 ///
588 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
589 /// block of the inlined code (the last block is the end of the function),
590 /// and InlineCodeInfo is information about the code that got inlined.
591 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
592                                     ClonedCodeInfo &InlinedCodeInfo) {
593   BasicBlock *InvokeDest = II->getUnwindDest();
594 
595   Function *Caller = FirstNewBlock->getParent();
596 
597   // The inlined code is currently at the end of the function, scan from the
598   // start of the inlined code to its end, checking for stuff we need to
599   // rewrite.
600   LandingPadInliningInfo Invoke(II);
601 
602   // Get all of the inlined landing pad instructions.
603   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
604   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
605        I != E; ++I)
606     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
607       InlinedLPads.insert(II->getLandingPadInst());
608 
609   // Append the clauses from the outer landing pad instruction into the inlined
610   // landing pad instructions.
611   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
612   for (LandingPadInst *InlinedLPad : InlinedLPads) {
613     unsigned OuterNum = OuterLPad->getNumClauses();
614     InlinedLPad->reserveClauses(OuterNum);
615     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
616       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
617     if (OuterLPad->isCleanup())
618       InlinedLPad->setCleanup(true);
619   }
620 
621   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
622        BB != E; ++BB) {
623     if (InlinedCodeInfo.ContainsCalls)
624       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
625               &*BB, Invoke.getOuterResumeDest()))
626         // Update any PHI nodes in the exceptional block to indicate that there
627         // is now a new entry in them.
628         Invoke.addIncomingPHIValuesFor(NewBB);
629 
630     // Forward any resumes that are remaining here.
631     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
632       Invoke.forwardResume(RI, InlinedLPads);
633   }
634 
635   // Now that everything is happy, we have one final detail.  The PHI nodes in
636   // the exception destination block still have entries due to the original
637   // invoke instruction. Eliminate these entries (which might even delete the
638   // PHI node) now.
639   InvokeDest->removePredecessor(II->getParent());
640 }
641 
642 /// If we inlined an invoke site, we need to convert calls
643 /// in the body of the inlined function into invokes.
644 ///
645 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
646 /// block of the inlined code (the last block is the end of the function),
647 /// and InlineCodeInfo is information about the code that got inlined.
648 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
649                                ClonedCodeInfo &InlinedCodeInfo) {
650   BasicBlock *UnwindDest = II->getUnwindDest();
651   Function *Caller = FirstNewBlock->getParent();
652 
653   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
654 
655   // If there are PHI nodes in the unwind destination block, we need to keep
656   // track of which values came into them from the invoke before removing the
657   // edge from this block.
658   SmallVector<Value *, 8> UnwindDestPHIValues;
659   BasicBlock *InvokeBB = II->getParent();
660   for (Instruction &I : *UnwindDest) {
661     // Save the value to use for this edge.
662     PHINode *PHI = dyn_cast<PHINode>(&I);
663     if (!PHI)
664       break;
665     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
666   }
667 
668   // Add incoming-PHI values to the unwind destination block for the given basic
669   // block, using the values for the original invoke's source block.
670   auto UpdatePHINodes = [&](BasicBlock *Src) {
671     BasicBlock::iterator I = UnwindDest->begin();
672     for (Value *V : UnwindDestPHIValues) {
673       PHINode *PHI = cast<PHINode>(I);
674       PHI->addIncoming(V, Src);
675       ++I;
676     }
677   };
678 
679   // This connects all the instructions which 'unwind to caller' to the invoke
680   // destination.
681   UnwindDestMemoTy FuncletUnwindMap;
682   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
683        BB != E; ++BB) {
684     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
685       if (CRI->unwindsToCaller()) {
686         auto *CleanupPad = CRI->getCleanupPad();
687         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
688         CRI->eraseFromParent();
689         UpdatePHINodes(&*BB);
690         // Finding a cleanupret with an unwind destination would confuse
691         // subsequent calls to getUnwindDestToken, so map the cleanuppad
692         // to short-circuit any such calls and recognize this as an "unwind
693         // to caller" cleanup.
694         assert(!FuncletUnwindMap.count(CleanupPad) ||
695                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
696         FuncletUnwindMap[CleanupPad] =
697             ConstantTokenNone::get(Caller->getContext());
698       }
699     }
700 
701     Instruction *I = BB->getFirstNonPHI();
702     if (!I->isEHPad())
703       continue;
704 
705     Instruction *Replacement = nullptr;
706     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
707       if (CatchSwitch->unwindsToCaller()) {
708         Value *UnwindDestToken;
709         if (auto *ParentPad =
710                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
711           // This catchswitch is nested inside another funclet.  If that
712           // funclet has an unwind destination within the inlinee, then
713           // unwinding out of this catchswitch would be UB.  Rewriting this
714           // catchswitch to unwind to the inlined invoke's unwind dest would
715           // give the parent funclet multiple unwind destinations, which is
716           // something that subsequent EH table generation can't handle and
717           // that the veirifer rejects.  So when we see such a call, leave it
718           // as "unwind to caller".
719           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
720           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
721             continue;
722         } else {
723           // This catchswitch has no parent to inherit constraints from, and
724           // none of its descendants can have an unwind edge that exits it and
725           // targets another funclet in the inlinee.  It may or may not have a
726           // descendant that definitively has an unwind to caller.  In either
727           // case, we'll have to assume that any unwinds out of it may need to
728           // be routed to the caller, so treat it as though it has a definitive
729           // unwind to caller.
730           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
731         }
732         auto *NewCatchSwitch = CatchSwitchInst::Create(
733             CatchSwitch->getParentPad(), UnwindDest,
734             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
735             CatchSwitch);
736         for (BasicBlock *PadBB : CatchSwitch->handlers())
737           NewCatchSwitch->addHandler(PadBB);
738         // Propagate info for the old catchswitch over to the new one in
739         // the unwind map.  This also serves to short-circuit any subsequent
740         // checks for the unwind dest of this catchswitch, which would get
741         // confused if they found the outer handler in the callee.
742         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
743         Replacement = NewCatchSwitch;
744       }
745     } else if (!isa<FuncletPadInst>(I)) {
746       llvm_unreachable("unexpected EHPad!");
747     }
748 
749     if (Replacement) {
750       Replacement->takeName(I);
751       I->replaceAllUsesWith(Replacement);
752       I->eraseFromParent();
753       UpdatePHINodes(&*BB);
754     }
755   }
756 
757   if (InlinedCodeInfo.ContainsCalls)
758     for (Function::iterator BB = FirstNewBlock->getIterator(),
759                             E = Caller->end();
760          BB != E; ++BB)
761       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
762               &*BB, UnwindDest, &FuncletUnwindMap))
763         // Update any PHI nodes in the exceptional block to indicate that there
764         // is now a new entry in them.
765         UpdatePHINodes(NewBB);
766 
767   // Now that everything is happy, we have one final detail.  The PHI nodes in
768   // the exception destination block still have entries due to the original
769   // invoke instruction. Eliminate these entries (which might even delete the
770   // PHI node) now.
771   UnwindDest->removePredecessor(InvokeBB);
772 }
773 
774 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
775 /// llvm.access.group metadata, that metadata should be propagated to all
776 /// memory-accessing cloned instructions.
777 static void PropagateParallelLoopAccessMetadata(CallBase &CB,
778                                                 ValueToValueMapTy &VMap) {
779   MDNode *M = CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
780   MDNode *CallAccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
781   if (!M && !CallAccessGroup)
782     return;
783 
784   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
785        VMI != VMIE; ++VMI) {
786     if (!VMI->second)
787       continue;
788 
789     Instruction *NI = dyn_cast<Instruction>(VMI->second);
790     if (!NI)
791       continue;
792 
793     if (M) {
794       if (MDNode *PM =
795               NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
796         M = MDNode::concatenate(PM, M);
797       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
798       } else if (NI->mayReadOrWriteMemory()) {
799         NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
800       }
801     }
802 
803     if (NI->mayReadOrWriteMemory()) {
804       MDNode *UnitedAccGroups = uniteAccessGroups(
805           NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
806       NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
807     }
808   }
809 }
810 
811 /// When inlining a function that contains noalias scope metadata,
812 /// this metadata needs to be cloned so that the inlined blocks
813 /// have different "unique scopes" at every call site. Were this not done, then
814 /// aliasing scopes from a function inlined into a caller multiple times could
815 /// not be differentiated (and this would lead to miscompiles because the
816 /// non-aliasing property communicated by the metadata could have
817 /// call-site-specific control dependencies).
818 static void CloneAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap) {
819   const Function *CalledFunc = CB.getCalledFunction();
820   SetVector<const MDNode *> MD;
821 
822   // Note: We could only clone the metadata if it is already used in the
823   // caller. I'm omitting that check here because it might confuse
824   // inter-procedural alias analysis passes. We can revisit this if it becomes
825   // an efficiency or overhead problem.
826 
827   for (const BasicBlock &I : *CalledFunc)
828     for (const Instruction &J : I) {
829       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
830         MD.insert(M);
831       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
832         MD.insert(M);
833     }
834 
835   if (MD.empty())
836     return;
837 
838   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
839   // the set.
840   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
841   while (!Queue.empty()) {
842     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
843     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
844       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
845         if (MD.insert(M1))
846           Queue.push_back(M1);
847   }
848 
849   // Now we have a complete set of all metadata in the chains used to specify
850   // the noalias scopes and the lists of those scopes.
851   SmallVector<TempMDTuple, 16> DummyNodes;
852   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
853   for (const MDNode *I : MD) {
854     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
855     MDMap[I].reset(DummyNodes.back().get());
856   }
857 
858   // Create new metadata nodes to replace the dummy nodes, replacing old
859   // metadata references with either a dummy node or an already-created new
860   // node.
861   for (const MDNode *I : MD) {
862     SmallVector<Metadata *, 4> NewOps;
863     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
864       const Metadata *V = I->getOperand(i);
865       if (const MDNode *M = dyn_cast<MDNode>(V))
866         NewOps.push_back(MDMap[M]);
867       else
868         NewOps.push_back(const_cast<Metadata *>(V));
869     }
870 
871     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
872     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
873     assert(TempM->isTemporary() && "Expected temporary node");
874 
875     TempM->replaceAllUsesWith(NewM);
876   }
877 
878   // Now replace the metadata in the new inlined instructions with the
879   // repacements from the map.
880   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
881        VMI != VMIE; ++VMI) {
882     if (!VMI->second)
883       continue;
884 
885     Instruction *NI = dyn_cast<Instruction>(VMI->second);
886     if (!NI)
887       continue;
888 
889     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
890       MDNode *NewMD = MDMap[M];
891       // If the call site also had alias scope metadata (a list of scopes to
892       // which instructions inside it might belong), propagate those scopes to
893       // the inlined instructions.
894       if (MDNode *CSM = CB.getMetadata(LLVMContext::MD_alias_scope))
895         NewMD = MDNode::concatenate(NewMD, CSM);
896       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
897     } else if (NI->mayReadOrWriteMemory()) {
898       if (MDNode *M = CB.getMetadata(LLVMContext::MD_alias_scope))
899         NI->setMetadata(LLVMContext::MD_alias_scope, M);
900     }
901 
902     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
903       MDNode *NewMD = MDMap[M];
904       // If the call site also had noalias metadata (a list of scopes with
905       // which instructions inside it don't alias), propagate those scopes to
906       // the inlined instructions.
907       if (MDNode *CSM = CB.getMetadata(LLVMContext::MD_noalias))
908         NewMD = MDNode::concatenate(NewMD, CSM);
909       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
910     } else if (NI->mayReadOrWriteMemory()) {
911       if (MDNode *M = CB.getMetadata(LLVMContext::MD_noalias))
912         NI->setMetadata(LLVMContext::MD_noalias, M);
913     }
914   }
915 }
916 
917 /// If the inlined function has noalias arguments,
918 /// then add new alias scopes for each noalias argument, tag the mapped noalias
919 /// parameters with noalias metadata specifying the new scope, and tag all
920 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
921 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
922                                   const DataLayout &DL, AAResults *CalleeAAR) {
923   if (!EnableNoAliasConversion)
924     return;
925 
926   const Function *CalledFunc = CB.getCalledFunction();
927   SmallVector<const Argument *, 4> NoAliasArgs;
928 
929   for (const Argument &Arg : CalledFunc->args())
930     if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
931       NoAliasArgs.push_back(&Arg);
932 
933   if (NoAliasArgs.empty())
934     return;
935 
936   // To do a good job, if a noalias variable is captured, we need to know if
937   // the capture point dominates the particular use we're considering.
938   DominatorTree DT;
939   DT.recalculate(const_cast<Function&>(*CalledFunc));
940 
941   // noalias indicates that pointer values based on the argument do not alias
942   // pointer values which are not based on it. So we add a new "scope" for each
943   // noalias function argument. Accesses using pointers based on that argument
944   // become part of that alias scope, accesses using pointers not based on that
945   // argument are tagged as noalias with that scope.
946 
947   DenseMap<const Argument *, MDNode *> NewScopes;
948   MDBuilder MDB(CalledFunc->getContext());
949 
950   // Create a new scope domain for this function.
951   MDNode *NewDomain =
952     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
953   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
954     const Argument *A = NoAliasArgs[i];
955 
956     std::string Name = std::string(CalledFunc->getName());
957     if (A->hasName()) {
958       Name += ": %";
959       Name += A->getName();
960     } else {
961       Name += ": argument ";
962       Name += utostr(i);
963     }
964 
965     // Note: We always create a new anonymous root here. This is true regardless
966     // of the linkage of the callee because the aliasing "scope" is not just a
967     // property of the callee, but also all control dependencies in the caller.
968     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
969     NewScopes.insert(std::make_pair(A, NewScope));
970   }
971 
972   // Iterate over all new instructions in the map; for all memory-access
973   // instructions, add the alias scope metadata.
974   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
975        VMI != VMIE; ++VMI) {
976     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
977       if (!VMI->second)
978         continue;
979 
980       Instruction *NI = dyn_cast<Instruction>(VMI->second);
981       if (!NI)
982         continue;
983 
984       bool IsArgMemOnlyCall = false, IsFuncCall = false;
985       SmallVector<const Value *, 2> PtrArgs;
986 
987       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
988         PtrArgs.push_back(LI->getPointerOperand());
989       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
990         PtrArgs.push_back(SI->getPointerOperand());
991       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
992         PtrArgs.push_back(VAAI->getPointerOperand());
993       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
994         PtrArgs.push_back(CXI->getPointerOperand());
995       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
996         PtrArgs.push_back(RMWI->getPointerOperand());
997       else if (const auto *Call = dyn_cast<CallBase>(I)) {
998         // If we know that the call does not access memory, then we'll still
999         // know that about the inlined clone of this call site, and we don't
1000         // need to add metadata.
1001         if (Call->doesNotAccessMemory())
1002           continue;
1003 
1004         IsFuncCall = true;
1005         if (CalleeAAR) {
1006           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1007           if (AAResults::onlyAccessesArgPointees(MRB))
1008             IsArgMemOnlyCall = true;
1009         }
1010 
1011         for (Value *Arg : Call->args()) {
1012           // We need to check the underlying objects of all arguments, not just
1013           // the pointer arguments, because we might be passing pointers as
1014           // integers, etc.
1015           // However, if we know that the call only accesses pointer arguments,
1016           // then we only need to check the pointer arguments.
1017           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1018             continue;
1019 
1020           PtrArgs.push_back(Arg);
1021         }
1022       }
1023 
1024       // If we found no pointers, then this instruction is not suitable for
1025       // pairing with an instruction to receive aliasing metadata.
1026       // However, if this is a call, this we might just alias with none of the
1027       // noalias arguments.
1028       if (PtrArgs.empty() && !IsFuncCall)
1029         continue;
1030 
1031       // It is possible that there is only one underlying object, but you
1032       // need to go through several PHIs to see it, and thus could be
1033       // repeated in the Objects list.
1034       SmallPtrSet<const Value *, 4> ObjSet;
1035       SmallVector<Metadata *, 4> Scopes, NoAliases;
1036 
1037       SmallSetVector<const Argument *, 4> NAPtrArgs;
1038       for (const Value *V : PtrArgs) {
1039         SmallVector<const Value *, 4> Objects;
1040         getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1041 
1042         for (const Value *O : Objects)
1043           ObjSet.insert(O);
1044       }
1045 
1046       // Figure out if we're derived from anything that is not a noalias
1047       // argument.
1048       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1049       for (const Value *V : ObjSet) {
1050         // Is this value a constant that cannot be derived from any pointer
1051         // value (we need to exclude constant expressions, for example, that
1052         // are formed from arithmetic on global symbols).
1053         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1054                              isa<ConstantPointerNull>(V) ||
1055                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1056         if (IsNonPtrConst)
1057           continue;
1058 
1059         // If this is anything other than a noalias argument, then we cannot
1060         // completely describe the aliasing properties using alias.scope
1061         // metadata (and, thus, won't add any).
1062         if (const Argument *A = dyn_cast<Argument>(V)) {
1063           if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1064             UsesAliasingPtr = true;
1065         } else {
1066           UsesAliasingPtr = true;
1067         }
1068 
1069         // If this is not some identified function-local object (which cannot
1070         // directly alias a noalias argument), or some other argument (which,
1071         // by definition, also cannot alias a noalias argument), then we could
1072         // alias a noalias argument that has been captured).
1073         if (!isa<Argument>(V) &&
1074             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1075           CanDeriveViaCapture = true;
1076       }
1077 
1078       // A function call can always get captured noalias pointers (via other
1079       // parameters, globals, etc.).
1080       if (IsFuncCall && !IsArgMemOnlyCall)
1081         CanDeriveViaCapture = true;
1082 
1083       // First, we want to figure out all of the sets with which we definitely
1084       // don't alias. Iterate over all noalias set, and add those for which:
1085       //   1. The noalias argument is not in the set of objects from which we
1086       //      definitely derive.
1087       //   2. The noalias argument has not yet been captured.
1088       // An arbitrary function that might load pointers could see captured
1089       // noalias arguments via other noalias arguments or globals, and so we
1090       // must always check for prior capture.
1091       for (const Argument *A : NoAliasArgs) {
1092         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1093                                  // It might be tempting to skip the
1094                                  // PointerMayBeCapturedBefore check if
1095                                  // A->hasNoCaptureAttr() is true, but this is
1096                                  // incorrect because nocapture only guarantees
1097                                  // that no copies outlive the function, not
1098                                  // that the value cannot be locally captured.
1099                                  !PointerMayBeCapturedBefore(A,
1100                                    /* ReturnCaptures */ false,
1101                                    /* StoreCaptures */ false, I, &DT)))
1102           NoAliases.push_back(NewScopes[A]);
1103       }
1104 
1105       if (!NoAliases.empty())
1106         NI->setMetadata(LLVMContext::MD_noalias,
1107                         MDNode::concatenate(
1108                             NI->getMetadata(LLVMContext::MD_noalias),
1109                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1110 
1111       // Next, we want to figure out all of the sets to which we might belong.
1112       // We might belong to a set if the noalias argument is in the set of
1113       // underlying objects. If there is some non-noalias argument in our list
1114       // of underlying objects, then we cannot add a scope because the fact
1115       // that some access does not alias with any set of our noalias arguments
1116       // cannot itself guarantee that it does not alias with this access
1117       // (because there is some pointer of unknown origin involved and the
1118       // other access might also depend on this pointer). We also cannot add
1119       // scopes to arbitrary functions unless we know they don't access any
1120       // non-parameter pointer-values.
1121       bool CanAddScopes = !UsesAliasingPtr;
1122       if (CanAddScopes && IsFuncCall)
1123         CanAddScopes = IsArgMemOnlyCall;
1124 
1125       if (CanAddScopes)
1126         for (const Argument *A : NoAliasArgs) {
1127           if (ObjSet.count(A))
1128             Scopes.push_back(NewScopes[A]);
1129         }
1130 
1131       if (!Scopes.empty())
1132         NI->setMetadata(
1133             LLVMContext::MD_alias_scope,
1134             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1135                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1136     }
1137   }
1138 }
1139 
1140 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1141                                             Instruction *End) {
1142 
1143   assert(Begin->getParent() == End->getParent() &&
1144          "Expected to be in same basic block!");
1145   unsigned NumInstChecked = 0;
1146   // Check that all instructions in the range [Begin, End) are guaranteed to
1147   // transfer execution to successor.
1148   for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1149     if (NumInstChecked++ > InlinerAttributeWindow ||
1150         !isGuaranteedToTransferExecutionToSuccessor(&I))
1151       return true;
1152   return false;
1153 }
1154 
1155 static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
1156 
1157   AttrBuilder AB(CB.getAttributes(), AttributeList::ReturnIndex);
1158   if (AB.empty())
1159     return AB;
1160   AttrBuilder Valid;
1161   // Only allow these white listed attributes to be propagated back to the
1162   // callee. This is because other attributes may only be valid on the call
1163   // itself, i.e. attributes such as signext and zeroext.
1164   if (auto DerefBytes = AB.getDereferenceableBytes())
1165     Valid.addDereferenceableAttr(DerefBytes);
1166   if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1167     Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1168   if (AB.contains(Attribute::NoAlias))
1169     Valid.addAttribute(Attribute::NoAlias);
1170   if (AB.contains(Attribute::NonNull))
1171     Valid.addAttribute(Attribute::NonNull);
1172   return Valid;
1173 }
1174 
1175 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1176   if (!UpdateReturnAttributes)
1177     return;
1178 
1179   AttrBuilder Valid = IdentifyValidAttributes(CB);
1180   if (Valid.empty())
1181     return;
1182   auto *CalledFunction = CB.getCalledFunction();
1183   auto &Context = CalledFunction->getContext();
1184 
1185   for (auto &BB : *CalledFunction) {
1186     auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1187     if (!RI || !isa<CallBase>(RI->getOperand(0)))
1188       continue;
1189     auto *RetVal = cast<CallBase>(RI->getOperand(0));
1190     // Sanity check that the cloned RetVal exists and is a call, otherwise we
1191     // cannot add the attributes on the cloned RetVal.
1192     // Simplification during inlining could have transformed the cloned
1193     // instruction.
1194     auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1195     if (!NewRetVal)
1196       continue;
1197     // Backward propagation of attributes to the returned value may be incorrect
1198     // if it is control flow dependent.
1199     // Consider:
1200     // @callee {
1201     //  %rv = call @foo()
1202     //  %rv2 = call @bar()
1203     //  if (%rv2 != null)
1204     //    return %rv2
1205     //  if (%rv == null)
1206     //    exit()
1207     //  return %rv
1208     // }
1209     // caller() {
1210     //   %val = call nonnull @callee()
1211     // }
1212     // Here we cannot add the nonnull attribute on either foo or bar. So, we
1213     // limit the check to both RetVal and RI are in the same basic block and
1214     // there are no throwing/exiting instructions between these instructions.
1215     if (RI->getParent() != RetVal->getParent() ||
1216         MayContainThrowingOrExitingCall(RetVal, RI))
1217       continue;
1218     // Add to the existing attributes of NewRetVal, i.e. the cloned call
1219     // instruction.
1220     // NB! When we have the same attribute already existing on NewRetVal, but
1221     // with a differing value, the AttributeList's merge API honours the already
1222     // existing attribute value (i.e. attributes such as dereferenceable,
1223     // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1224     AttributeList AL = NewRetVal->getAttributes();
1225     AttributeList NewAL =
1226         AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1227     NewRetVal->setAttributes(NewAL);
1228   }
1229 }
1230 
1231 /// If the inlined function has non-byval align arguments, then
1232 /// add @llvm.assume-based alignment assumptions to preserve this information.
1233 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1234   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1235     return;
1236 
1237   AssumptionCache *AC = &IFI.GetAssumptionCache(*CB.getCaller());
1238   auto &DL = CB.getCaller()->getParent()->getDataLayout();
1239 
1240   // To avoid inserting redundant assumptions, we should check for assumptions
1241   // already in the caller. To do this, we might need a DT of the caller.
1242   DominatorTree DT;
1243   bool DTCalculated = false;
1244 
1245   Function *CalledFunc = CB.getCalledFunction();
1246   for (Argument &Arg : CalledFunc->args()) {
1247     unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1248     if (Align && !Arg.hasPassPointeeByValueCopyAttr() && !Arg.hasNUses(0)) {
1249       if (!DTCalculated) {
1250         DT.recalculate(*CB.getCaller());
1251         DTCalculated = true;
1252       }
1253 
1254       // If we can already prove the asserted alignment in the context of the
1255       // caller, then don't bother inserting the assumption.
1256       Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1257       if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1258         continue;
1259 
1260       CallInst *NewAsmp =
1261           IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align);
1262       AC->registerAssumption(NewAsmp);
1263     }
1264   }
1265 }
1266 
1267 /// Once we have cloned code over from a callee into the caller,
1268 /// update the specified callgraph to reflect the changes we made.
1269 /// Note that it's possible that not all code was copied over, so only
1270 /// some edges of the callgraph may remain.
1271 static void UpdateCallGraphAfterInlining(CallBase &CB,
1272                                          Function::iterator FirstNewBlock,
1273                                          ValueToValueMapTy &VMap,
1274                                          InlineFunctionInfo &IFI) {
1275   CallGraph &CG = *IFI.CG;
1276   const Function *Caller = CB.getCaller();
1277   const Function *Callee = CB.getCalledFunction();
1278   CallGraphNode *CalleeNode = CG[Callee];
1279   CallGraphNode *CallerNode = CG[Caller];
1280 
1281   // Since we inlined some uninlined call sites in the callee into the caller,
1282   // add edges from the caller to all of the callees of the callee.
1283   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1284 
1285   // Consider the case where CalleeNode == CallerNode.
1286   CallGraphNode::CalledFunctionsVector CallCache;
1287   if (CalleeNode == CallerNode) {
1288     CallCache.assign(I, E);
1289     I = CallCache.begin();
1290     E = CallCache.end();
1291   }
1292 
1293   for (; I != E; ++I) {
1294     // Skip 'refererence' call records.
1295     if (!I->first)
1296       continue;
1297 
1298     const Value *OrigCall = *I->first;
1299 
1300     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1301     // Only copy the edge if the call was inlined!
1302     if (VMI == VMap.end() || VMI->second == nullptr)
1303       continue;
1304 
1305     // If the call was inlined, but then constant folded, there is no edge to
1306     // add.  Check for this case.
1307     auto *NewCall = dyn_cast<CallBase>(VMI->second);
1308     if (!NewCall)
1309       continue;
1310 
1311     // We do not treat intrinsic calls like real function calls because we
1312     // expect them to become inline code; do not add an edge for an intrinsic.
1313     if (NewCall->getCalledFunction() &&
1314         NewCall->getCalledFunction()->isIntrinsic())
1315       continue;
1316 
1317     // Remember that this call site got inlined for the client of
1318     // InlineFunction.
1319     IFI.InlinedCalls.push_back(NewCall);
1320 
1321     // It's possible that inlining the callsite will cause it to go from an
1322     // indirect to a direct call by resolving a function pointer.  If this
1323     // happens, set the callee of the new call site to a more precise
1324     // destination.  This can also happen if the call graph node of the caller
1325     // was just unnecessarily imprecise.
1326     if (!I->second->getFunction())
1327       if (Function *F = NewCall->getCalledFunction()) {
1328         // Indirect call site resolved to direct call.
1329         CallerNode->addCalledFunction(NewCall, CG[F]);
1330 
1331         continue;
1332       }
1333 
1334     CallerNode->addCalledFunction(NewCall, I->second);
1335   }
1336 
1337   // Update the call graph by deleting the edge from Callee to Caller.  We must
1338   // do this after the loop above in case Caller and Callee are the same.
1339   CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1340 }
1341 
1342 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1343                                     BasicBlock *InsertBlock,
1344                                     InlineFunctionInfo &IFI) {
1345   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1346   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1347 
1348   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1349 
1350   // Always generate a memcpy of alignment 1 here because we don't know
1351   // the alignment of the src pointer.  Other optimizations can infer
1352   // better alignment.
1353   Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1354                        /*SrcAlign*/ Align(1), Size);
1355 }
1356 
1357 /// When inlining a call site that has a byval argument,
1358 /// we have to make the implicit memcpy explicit by adding it.
1359 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1360                                   const Function *CalledFunc,
1361                                   InlineFunctionInfo &IFI,
1362                                   unsigned ByValAlignment) {
1363   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1364   Type *AggTy = ArgTy->getElementType();
1365 
1366   Function *Caller = TheCall->getFunction();
1367   const DataLayout &DL = Caller->getParent()->getDataLayout();
1368 
1369   // If the called function is readonly, then it could not mutate the caller's
1370   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1371   // temporary.
1372   if (CalledFunc->onlyReadsMemory()) {
1373     // If the byval argument has a specified alignment that is greater than the
1374     // passed in pointer, then we either have to round up the input pointer or
1375     // give up on this transformation.
1376     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1377       return Arg;
1378 
1379     AssumptionCache *AC =
1380         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1381 
1382     // If the pointer is already known to be sufficiently aligned, or if we can
1383     // round it up to a larger alignment, then we don't need a temporary.
1384     if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1385                                    AC) >= ByValAlignment)
1386       return Arg;
1387 
1388     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1389     // for code quality, but rarely happens and is required for correctness.
1390   }
1391 
1392   // Create the alloca.  If we have DataLayout, use nice alignment.
1393   Align Alignment(DL.getPrefTypeAlignment(AggTy));
1394 
1395   // If the byval had an alignment specified, we *must* use at least that
1396   // alignment, as it is required by the byval argument (and uses of the
1397   // pointer inside the callee).
1398   Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1399 
1400   Value *NewAlloca =
1401       new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1402                      Arg->getName(), &*Caller->begin()->begin());
1403   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1404 
1405   // Uses of the argument in the function should use our new alloca
1406   // instead.
1407   return NewAlloca;
1408 }
1409 
1410 // Check whether this Value is used by a lifetime intrinsic.
1411 static bool isUsedByLifetimeMarker(Value *V) {
1412   for (User *U : V->users())
1413     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1414       if (II->isLifetimeStartOrEnd())
1415         return true;
1416   return false;
1417 }
1418 
1419 // Check whether the given alloca already has
1420 // lifetime.start or lifetime.end intrinsics.
1421 static bool hasLifetimeMarkers(AllocaInst *AI) {
1422   Type *Ty = AI->getType();
1423   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1424                                        Ty->getPointerAddressSpace());
1425   if (Ty == Int8PtrTy)
1426     return isUsedByLifetimeMarker(AI);
1427 
1428   // Do a scan to find all the casts to i8*.
1429   for (User *U : AI->users()) {
1430     if (U->getType() != Int8PtrTy) continue;
1431     if (U->stripPointerCasts() != AI) continue;
1432     if (isUsedByLifetimeMarker(U))
1433       return true;
1434   }
1435   return false;
1436 }
1437 
1438 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1439 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1440 /// cannot be static.
1441 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1442   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1443 }
1444 
1445 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1446 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1447 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1448                                LLVMContext &Ctx,
1449                                DenseMap<const MDNode *, MDNode *> &IANodes) {
1450   auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1451   return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1452                        IA);
1453 }
1454 
1455 /// Update inlined instructions' line numbers to
1456 /// to encode location where these instructions are inlined.
1457 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1458                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1459   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1460   if (!TheCallDL)
1461     return;
1462 
1463   auto &Ctx = Fn->getContext();
1464   DILocation *InlinedAtNode = TheCallDL;
1465 
1466   // Create a unique call site, not to be confused with any other call from the
1467   // same location.
1468   InlinedAtNode = DILocation::getDistinct(
1469       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1470       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1471 
1472   // Cache the inlined-at nodes as they're built so they are reused, without
1473   // this every instruction's inlined-at chain would become distinct from each
1474   // other.
1475   DenseMap<const MDNode *, MDNode *> IANodes;
1476 
1477   // Check if we are not generating inline line tables and want to use
1478   // the call site location instead.
1479   bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1480 
1481   for (; FI != Fn->end(); ++FI) {
1482     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1483          BI != BE; ++BI) {
1484       // Loop metadata needs to be updated so that the start and end locs
1485       // reference inlined-at locations.
1486       auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1487                                    const DILocation &Loc) -> DILocation * {
1488         return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1489       };
1490       updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1491 
1492       if (!NoInlineLineTables)
1493         if (DebugLoc DL = BI->getDebugLoc()) {
1494           DebugLoc IDL =
1495               inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1496           BI->setDebugLoc(IDL);
1497           continue;
1498         }
1499 
1500       if (CalleeHasDebugInfo && !NoInlineLineTables)
1501         continue;
1502 
1503       // If the inlined instruction has no line number, or if inline info
1504       // is not being generated, make it look as if it originates from the call
1505       // location. This is important for ((__always_inline, __nodebug__))
1506       // functions which must use caller location for all instructions in their
1507       // function body.
1508 
1509       // Don't update static allocas, as they may get moved later.
1510       if (auto *AI = dyn_cast<AllocaInst>(BI))
1511         if (allocaWouldBeStaticInEntry(AI))
1512           continue;
1513 
1514       BI->setDebugLoc(TheCallDL);
1515     }
1516 
1517     // Remove debug info intrinsics if we're not keeping inline info.
1518     if (NoInlineLineTables) {
1519       BasicBlock::iterator BI = FI->begin();
1520       while (BI != FI->end()) {
1521         if (isa<DbgInfoIntrinsic>(BI)) {
1522           BI = BI->eraseFromParent();
1523           continue;
1524         }
1525         ++BI;
1526       }
1527     }
1528 
1529   }
1530 }
1531 
1532 /// Update the block frequencies of the caller after a callee has been inlined.
1533 ///
1534 /// Each block cloned into the caller has its block frequency scaled by the
1535 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1536 /// callee's entry block gets the same frequency as the callsite block and the
1537 /// relative frequencies of all cloned blocks remain the same after cloning.
1538 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1539                             const ValueToValueMapTy &VMap,
1540                             BlockFrequencyInfo *CallerBFI,
1541                             BlockFrequencyInfo *CalleeBFI,
1542                             const BasicBlock &CalleeEntryBlock) {
1543   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1544   for (auto Entry : VMap) {
1545     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1546       continue;
1547     auto *OrigBB = cast<BasicBlock>(Entry.first);
1548     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1549     uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1550     if (!ClonedBBs.insert(ClonedBB).second) {
1551       // Multiple blocks in the callee might get mapped to one cloned block in
1552       // the caller since we prune the callee as we clone it. When that happens,
1553       // we want to use the maximum among the original blocks' frequencies.
1554       uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1555       if (NewFreq > Freq)
1556         Freq = NewFreq;
1557     }
1558     CallerBFI->setBlockFreq(ClonedBB, Freq);
1559   }
1560   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1561   CallerBFI->setBlockFreqAndScale(
1562       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1563       ClonedBBs);
1564 }
1565 
1566 /// Update the branch metadata for cloned call instructions.
1567 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1568                               const ProfileCount &CalleeEntryCount,
1569                               const CallBase &TheCall, ProfileSummaryInfo *PSI,
1570                               BlockFrequencyInfo *CallerBFI) {
1571   if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1572       CalleeEntryCount.getCount() < 1)
1573     return;
1574   auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1575   int64_t CallCount =
1576       std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1577                CalleeEntryCount.getCount());
1578   updateProfileCallee(Callee, -CallCount, &VMap);
1579 }
1580 
1581 void llvm::updateProfileCallee(
1582     Function *Callee, int64_t entryDelta,
1583     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1584   auto CalleeCount = Callee->getEntryCount();
1585   if (!CalleeCount.hasValue())
1586     return;
1587 
1588   uint64_t priorEntryCount = CalleeCount.getCount();
1589   uint64_t newEntryCount;
1590 
1591   // Since CallSiteCount is an estimate, it could exceed the original callee
1592   // count and has to be set to 0 so guard against underflow.
1593   if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1594     newEntryCount = 0;
1595   else
1596     newEntryCount = priorEntryCount + entryDelta;
1597 
1598   // During inlining ?
1599   if (VMap) {
1600     uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1601     for (auto Entry : *VMap)
1602       if (isa<CallInst>(Entry.first))
1603         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1604           CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1605   }
1606 
1607   if (entryDelta) {
1608     Callee->setEntryCount(newEntryCount);
1609 
1610     for (BasicBlock &BB : *Callee)
1611       // No need to update the callsite if it is pruned during inlining.
1612       if (!VMap || VMap->count(&BB))
1613         for (Instruction &I : BB)
1614           if (CallInst *CI = dyn_cast<CallInst>(&I))
1615             CI->updateProfWeight(newEntryCount, priorEntryCount);
1616   }
1617 }
1618 
1619 /// This function inlines the called function into the basic block of the
1620 /// caller. This returns false if it is not possible to inline this call.
1621 /// The program is still in a well defined state if this occurs though.
1622 ///
1623 /// Note that this only does one level of inlining.  For example, if the
1624 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1625 /// exists in the instruction stream.  Similarly this will inline a recursive
1626 /// function by one level.
1627 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
1628                                         AAResults *CalleeAAR,
1629                                         bool InsertLifetime,
1630                                         Function *ForwardVarArgsTo) {
1631   assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1632 
1633   // FIXME: we don't inline callbr yet.
1634   if (isa<CallBrInst>(CB))
1635     return InlineResult::failure("We don't inline callbr yet.");
1636 
1637   // If IFI has any state in it, zap it before we fill it in.
1638   IFI.reset();
1639 
1640   Function *CalledFunc = CB.getCalledFunction();
1641   if (!CalledFunc ||               // Can't inline external function or indirect
1642       CalledFunc->isDeclaration()) // call!
1643     return InlineResult::failure("external or indirect");
1644 
1645   // The inliner does not know how to inline through calls with operand bundles
1646   // in general ...
1647   if (CB.hasOperandBundles()) {
1648     for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1649       uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1650       // ... but it knows how to inline through "deopt" operand bundles ...
1651       if (Tag == LLVMContext::OB_deopt)
1652         continue;
1653       // ... and "funclet" operand bundles.
1654       if (Tag == LLVMContext::OB_funclet)
1655         continue;
1656 
1657       return InlineResult::failure("unsupported operand bundle");
1658     }
1659   }
1660 
1661   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1662   // calls that we inline.
1663   bool MarkNoUnwind = CB.doesNotThrow();
1664 
1665   BasicBlock *OrigBB = CB.getParent();
1666   Function *Caller = OrigBB->getParent();
1667 
1668   // GC poses two hazards to inlining, which only occur when the callee has GC:
1669   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1670   //     caller.
1671   //  2. If the caller has a differing GC, it is invalid to inline.
1672   if (CalledFunc->hasGC()) {
1673     if (!Caller->hasGC())
1674       Caller->setGC(CalledFunc->getGC());
1675     else if (CalledFunc->getGC() != Caller->getGC())
1676       return InlineResult::failure("incompatible GC");
1677   }
1678 
1679   // Inlining a function that explicitly should not have a stack protector may
1680   // break the code if inlined into a function that does have a stack
1681   // protector.
1682   if (LLVM_UNLIKELY(Caller->hasFnAttribute(Attribute::NoStackProtect)))
1683     if (CalledFunc->hasFnAttribute(Attribute::StackProtect) ||
1684         CalledFunc->hasFnAttribute(Attribute::StackProtectStrong) ||
1685         CalledFunc->hasFnAttribute(Attribute::StackProtectReq))
1686       return InlineResult::failure(
1687           "stack protected callee but caller requested no stack protector");
1688   if (LLVM_UNLIKELY(CalledFunc->hasFnAttribute(Attribute::NoStackProtect)))
1689     if (Caller->hasFnAttribute(Attribute::StackProtect) ||
1690         Caller->hasFnAttribute(Attribute::StackProtectStrong) ||
1691         Caller->hasFnAttribute(Attribute::StackProtectReq))
1692       return InlineResult::failure(
1693           "stack protected caller but callee requested no stack protector");
1694 
1695   // Get the personality function from the callee if it contains a landing pad.
1696   Constant *CalledPersonality =
1697       CalledFunc->hasPersonalityFn()
1698           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1699           : nullptr;
1700 
1701   // Find the personality function used by the landing pads of the caller. If it
1702   // exists, then check to see that it matches the personality function used in
1703   // the callee.
1704   Constant *CallerPersonality =
1705       Caller->hasPersonalityFn()
1706           ? Caller->getPersonalityFn()->stripPointerCasts()
1707           : nullptr;
1708   if (CalledPersonality) {
1709     if (!CallerPersonality)
1710       Caller->setPersonalityFn(CalledPersonality);
1711     // If the personality functions match, then we can perform the
1712     // inlining. Otherwise, we can't inline.
1713     // TODO: This isn't 100% true. Some personality functions are proper
1714     //       supersets of others and can be used in place of the other.
1715     else if (CalledPersonality != CallerPersonality)
1716       return InlineResult::failure("incompatible personality");
1717   }
1718 
1719   // We need to figure out which funclet the callsite was in so that we may
1720   // properly nest the callee.
1721   Instruction *CallSiteEHPad = nullptr;
1722   if (CallerPersonality) {
1723     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1724     if (isScopedEHPersonality(Personality)) {
1725       Optional<OperandBundleUse> ParentFunclet =
1726           CB.getOperandBundle(LLVMContext::OB_funclet);
1727       if (ParentFunclet)
1728         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1729 
1730       // OK, the inlining site is legal.  What about the target function?
1731 
1732       if (CallSiteEHPad) {
1733         if (Personality == EHPersonality::MSVC_CXX) {
1734           // The MSVC personality cannot tolerate catches getting inlined into
1735           // cleanup funclets.
1736           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1737             // Ok, the call site is within a cleanuppad.  Let's check the callee
1738             // for catchpads.
1739             for (const BasicBlock &CalledBB : *CalledFunc) {
1740               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1741                 return InlineResult::failure("catch in cleanup funclet");
1742             }
1743           }
1744         } else if (isAsynchronousEHPersonality(Personality)) {
1745           // SEH is even less tolerant, there may not be any sort of exceptional
1746           // funclet in the callee.
1747           for (const BasicBlock &CalledBB : *CalledFunc) {
1748             if (CalledBB.isEHPad())
1749               return InlineResult::failure("SEH in cleanup funclet");
1750           }
1751         }
1752       }
1753     }
1754   }
1755 
1756   // Determine if we are dealing with a call in an EHPad which does not unwind
1757   // to caller.
1758   bool EHPadForCallUnwindsLocally = false;
1759   if (CallSiteEHPad && isa<CallInst>(CB)) {
1760     UnwindDestMemoTy FuncletUnwindMap;
1761     Value *CallSiteUnwindDestToken =
1762         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1763 
1764     EHPadForCallUnwindsLocally =
1765         CallSiteUnwindDestToken &&
1766         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1767   }
1768 
1769   // Get an iterator to the last basic block in the function, which will have
1770   // the new function inlined after it.
1771   Function::iterator LastBlock = --Caller->end();
1772 
1773   // Make sure to capture all of the return instructions from the cloned
1774   // function.
1775   SmallVector<ReturnInst*, 8> Returns;
1776   ClonedCodeInfo InlinedFunctionInfo;
1777   Function::iterator FirstNewBlock;
1778 
1779   { // Scope to destroy VMap after cloning.
1780     ValueToValueMapTy VMap;
1781     // Keep a list of pair (dst, src) to emit byval initializations.
1782     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1783 
1784     auto &DL = Caller->getParent()->getDataLayout();
1785 
1786     // Calculate the vector of arguments to pass into the function cloner, which
1787     // matches up the formal to the actual argument values.
1788     auto AI = CB.arg_begin();
1789     unsigned ArgNo = 0;
1790     for (Function::arg_iterator I = CalledFunc->arg_begin(),
1791          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1792       Value *ActualArg = *AI;
1793 
1794       // When byval arguments actually inlined, we need to make the copy implied
1795       // by them explicit.  However, we don't do this if the callee is readonly
1796       // or readnone, because the copy would be unneeded: the callee doesn't
1797       // modify the struct.
1798       if (CB.isByValArgument(ArgNo)) {
1799         ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1800                                         CalledFunc->getParamAlignment(ArgNo));
1801         if (ActualArg != *AI)
1802           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1803       }
1804 
1805       VMap[&*I] = ActualArg;
1806     }
1807 
1808     // TODO: Remove this when users have been updated to the assume bundles.
1809     // Add alignment assumptions if necessary. We do this before the inlined
1810     // instructions are actually cloned into the caller so that we can easily
1811     // check what will be known at the start of the inlined code.
1812     AddAlignmentAssumptions(CB, IFI);
1813 
1814     AssumptionCache *AC =
1815         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1816 
1817     /// Preserve all attributes on of the call and its parameters.
1818     salvageKnowledge(&CB, AC);
1819 
1820     // We want the inliner to prune the code as it copies.  We would LOVE to
1821     // have no dead or constant instructions leftover after inlining occurs
1822     // (which can happen, e.g., because an argument was constant), but we'll be
1823     // happy with whatever the cloner can do.
1824     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1825                               /*ModuleLevelChanges=*/false, Returns, ".i",
1826                               &InlinedFunctionInfo, &CB);
1827     // Remember the first block that is newly cloned over.
1828     FirstNewBlock = LastBlock; ++FirstNewBlock;
1829 
1830     if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1831       // Update the BFI of blocks cloned into the caller.
1832       updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1833                       CalledFunc->front());
1834 
1835     updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), CB,
1836                       IFI.PSI, IFI.CallerBFI);
1837 
1838     // Inject byval arguments initialization.
1839     for (std::pair<Value*, Value*> &Init : ByValInit)
1840       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1841                               &*FirstNewBlock, IFI);
1842 
1843     Optional<OperandBundleUse> ParentDeopt =
1844         CB.getOperandBundle(LLVMContext::OB_deopt);
1845     if (ParentDeopt) {
1846       SmallVector<OperandBundleDef, 2> OpDefs;
1847 
1848       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1849         CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1850         if (!ICS)
1851           continue; // instruction was DCE'd or RAUW'ed to undef
1852 
1853         OpDefs.clear();
1854 
1855         OpDefs.reserve(ICS->getNumOperandBundles());
1856 
1857         for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1858              ++COBi) {
1859           auto ChildOB = ICS->getOperandBundleAt(COBi);
1860           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1861             // If the inlined call has other operand bundles, let them be
1862             OpDefs.emplace_back(ChildOB);
1863             continue;
1864           }
1865 
1866           // It may be useful to separate this logic (of handling operand
1867           // bundles) out to a separate "policy" component if this gets crowded.
1868           // Prepend the parent's deoptimization continuation to the newly
1869           // inlined call's deoptimization continuation.
1870           std::vector<Value *> MergedDeoptArgs;
1871           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1872                                   ChildOB.Inputs.size());
1873 
1874           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1875                                  ParentDeopt->Inputs.begin(),
1876                                  ParentDeopt->Inputs.end());
1877           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1878                                  ChildOB.Inputs.end());
1879 
1880           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1881         }
1882 
1883         Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS);
1884 
1885         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1886         // this even if the call returns void.
1887         ICS->replaceAllUsesWith(NewI);
1888 
1889         VH = nullptr;
1890         ICS->eraseFromParent();
1891       }
1892     }
1893 
1894     // Update the callgraph if requested.
1895     if (IFI.CG)
1896       UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
1897 
1898     // For 'nodebug' functions, the associated DISubprogram is always null.
1899     // Conservatively avoid propagating the callsite debug location to
1900     // instructions inlined from a function whose DISubprogram is not null.
1901     fixupLineNumbers(Caller, FirstNewBlock, &CB,
1902                      CalledFunc->getSubprogram() != nullptr);
1903 
1904     // Clone existing noalias metadata if necessary.
1905     CloneAliasScopeMetadata(CB, VMap);
1906 
1907     // Add noalias metadata if necessary.
1908     AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR);
1909 
1910     // Clone return attributes on the callsite into the calls within the inlined
1911     // function which feed into its return value.
1912     AddReturnAttributes(CB, VMap);
1913 
1914     // Propagate llvm.mem.parallel_loop_access if necessary.
1915     PropagateParallelLoopAccessMetadata(CB, VMap);
1916 
1917     // Register any cloned assumptions.
1918     if (IFI.GetAssumptionCache)
1919       for (BasicBlock &NewBlock :
1920            make_range(FirstNewBlock->getIterator(), Caller->end()))
1921         for (Instruction &I : NewBlock)
1922           if (auto *II = dyn_cast<IntrinsicInst>(&I))
1923             if (II->getIntrinsicID() == Intrinsic::assume)
1924               IFI.GetAssumptionCache(*Caller).registerAssumption(II);
1925   }
1926 
1927   // If there are any alloca instructions in the block that used to be the entry
1928   // block for the callee, move them to the entry block of the caller.  First
1929   // calculate which instruction they should be inserted before.  We insert the
1930   // instructions at the end of the current alloca list.
1931   {
1932     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1933     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1934          E = FirstNewBlock->end(); I != E; ) {
1935       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1936       if (!AI) continue;
1937 
1938       // If the alloca is now dead, remove it.  This often occurs due to code
1939       // specialization.
1940       if (AI->use_empty()) {
1941         AI->eraseFromParent();
1942         continue;
1943       }
1944 
1945       if (!allocaWouldBeStaticInEntry(AI))
1946         continue;
1947 
1948       // Keep track of the static allocas that we inline into the caller.
1949       IFI.StaticAllocas.push_back(AI);
1950 
1951       // Scan for the block of allocas that we can move over, and move them
1952       // all at once.
1953       while (isa<AllocaInst>(I) &&
1954              !cast<AllocaInst>(I)->use_empty() &&
1955              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1956         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1957         ++I;
1958       }
1959 
1960       // Transfer all of the allocas over in a block.  Using splice means
1961       // that the instructions aren't removed from the symbol table, then
1962       // reinserted.
1963       Caller->getEntryBlock().getInstList().splice(
1964           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1965     }
1966   }
1967 
1968   SmallVector<Value*,4> VarArgsToForward;
1969   SmallVector<AttributeSet, 4> VarArgsAttrs;
1970   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1971        i < CB.getNumArgOperands(); i++) {
1972     VarArgsToForward.push_back(CB.getArgOperand(i));
1973     VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
1974   }
1975 
1976   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1977   if (InlinedFunctionInfo.ContainsCalls) {
1978     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1979     if (CallInst *CI = dyn_cast<CallInst>(&CB))
1980       CallSiteTailKind = CI->getTailCallKind();
1981 
1982     // For inlining purposes, the "notail" marker is the same as no marker.
1983     if (CallSiteTailKind == CallInst::TCK_NoTail)
1984       CallSiteTailKind = CallInst::TCK_None;
1985 
1986     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1987          ++BB) {
1988       for (auto II = BB->begin(); II != BB->end();) {
1989         Instruction &I = *II++;
1990         CallInst *CI = dyn_cast<CallInst>(&I);
1991         if (!CI)
1992           continue;
1993 
1994         // Forward varargs from inlined call site to calls to the
1995         // ForwardVarArgsTo function, if requested, and to musttail calls.
1996         if (!VarArgsToForward.empty() &&
1997             ((ForwardVarArgsTo &&
1998               CI->getCalledFunction() == ForwardVarArgsTo) ||
1999              CI->isMustTailCall())) {
2000           // Collect attributes for non-vararg parameters.
2001           AttributeList Attrs = CI->getAttributes();
2002           SmallVector<AttributeSet, 8> ArgAttrs;
2003           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2004             for (unsigned ArgNo = 0;
2005                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2006               ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2007           }
2008 
2009           // Add VarArg attributes.
2010           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2011           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2012                                      Attrs.getRetAttributes(), ArgAttrs);
2013           // Add VarArgs to existing parameters.
2014           SmallVector<Value *, 6> Params(CI->arg_operands());
2015           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2016           CallInst *NewCI = CallInst::Create(
2017               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2018           NewCI->setDebugLoc(CI->getDebugLoc());
2019           NewCI->setAttributes(Attrs);
2020           NewCI->setCallingConv(CI->getCallingConv());
2021           CI->replaceAllUsesWith(NewCI);
2022           CI->eraseFromParent();
2023           CI = NewCI;
2024         }
2025 
2026         if (Function *F = CI->getCalledFunction())
2027           InlinedDeoptimizeCalls |=
2028               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2029 
2030         // We need to reduce the strength of any inlined tail calls.  For
2031         // musttail, we have to avoid introducing potential unbounded stack
2032         // growth.  For example, if functions 'f' and 'g' are mutually recursive
2033         // with musttail, we can inline 'g' into 'f' so long as we preserve
2034         // musttail on the cloned call to 'f'.  If either the inlined call site
2035         // or the cloned call site is *not* musttail, the program already has
2036         // one frame of stack growth, so it's safe to remove musttail.  Here is
2037         // a table of example transformations:
2038         //
2039         //    f -> musttail g -> musttail f  ==>  f -> musttail f
2040         //    f -> musttail g ->     tail f  ==>  f ->     tail f
2041         //    f ->          g -> musttail f  ==>  f ->          f
2042         //    f ->          g ->     tail f  ==>  f ->          f
2043         //
2044         // Inlined notail calls should remain notail calls.
2045         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2046         if (ChildTCK != CallInst::TCK_NoTail)
2047           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2048         CI->setTailCallKind(ChildTCK);
2049         InlinedMustTailCalls |= CI->isMustTailCall();
2050 
2051         // Calls inlined through a 'nounwind' call site should be marked
2052         // 'nounwind'.
2053         if (MarkNoUnwind)
2054           CI->setDoesNotThrow();
2055       }
2056     }
2057   }
2058 
2059   // Leave lifetime markers for the static alloca's, scoping them to the
2060   // function we just inlined.
2061   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2062     IRBuilder<> builder(&FirstNewBlock->front());
2063     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2064       AllocaInst *AI = IFI.StaticAllocas[ai];
2065       // Don't mark swifterror allocas. They can't have bitcast uses.
2066       if (AI->isSwiftError())
2067         continue;
2068 
2069       // If the alloca is already scoped to something smaller than the whole
2070       // function then there's no need to add redundant, less accurate markers.
2071       if (hasLifetimeMarkers(AI))
2072         continue;
2073 
2074       // Try to determine the size of the allocation.
2075       ConstantInt *AllocaSize = nullptr;
2076       if (ConstantInt *AIArraySize =
2077           dyn_cast<ConstantInt>(AI->getArraySize())) {
2078         auto &DL = Caller->getParent()->getDataLayout();
2079         Type *AllocaType = AI->getAllocatedType();
2080         TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2081         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2082 
2083         // Don't add markers for zero-sized allocas.
2084         if (AllocaArraySize == 0)
2085           continue;
2086 
2087         // Check that array size doesn't saturate uint64_t and doesn't
2088         // overflow when it's multiplied by type size.
2089         if (!AllocaTypeSize.isScalable() &&
2090             AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2091             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2092                 AllocaTypeSize.getFixedSize()) {
2093           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2094                                         AllocaArraySize * AllocaTypeSize);
2095         }
2096       }
2097 
2098       builder.CreateLifetimeStart(AI, AllocaSize);
2099       for (ReturnInst *RI : Returns) {
2100         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2101         // call and a return.  The return kills all local allocas.
2102         if (InlinedMustTailCalls &&
2103             RI->getParent()->getTerminatingMustTailCall())
2104           continue;
2105         if (InlinedDeoptimizeCalls &&
2106             RI->getParent()->getTerminatingDeoptimizeCall())
2107           continue;
2108         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2109       }
2110     }
2111   }
2112 
2113   // If the inlined code contained dynamic alloca instructions, wrap the inlined
2114   // code with llvm.stacksave/llvm.stackrestore intrinsics.
2115   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2116     Module *M = Caller->getParent();
2117     // Get the two intrinsics we care about.
2118     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2119     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2120 
2121     // Insert the llvm.stacksave.
2122     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2123                              .CreateCall(StackSave, {}, "savedstack");
2124 
2125     // Insert a call to llvm.stackrestore before any return instructions in the
2126     // inlined function.
2127     for (ReturnInst *RI : Returns) {
2128       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2129       // call and a return.  The return will restore the stack pointer.
2130       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2131         continue;
2132       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2133         continue;
2134       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2135     }
2136   }
2137 
2138   // If we are inlining for an invoke instruction, we must make sure to rewrite
2139   // any call instructions into invoke instructions.  This is sensitive to which
2140   // funclet pads were top-level in the inlinee, so must be done before
2141   // rewriting the "parent pad" links.
2142   if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2143     BasicBlock *UnwindDest = II->getUnwindDest();
2144     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2145     if (isa<LandingPadInst>(FirstNonPHI)) {
2146       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2147     } else {
2148       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2149     }
2150   }
2151 
2152   // Update the lexical scopes of the new funclets and callsites.
2153   // Anything that had 'none' as its parent is now nested inside the callsite's
2154   // EHPad.
2155 
2156   if (CallSiteEHPad) {
2157     for (Function::iterator BB = FirstNewBlock->getIterator(),
2158                             E = Caller->end();
2159          BB != E; ++BB) {
2160       // Add bundle operands to any top-level call sites.
2161       SmallVector<OperandBundleDef, 1> OpBundles;
2162       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2163         CallBase *I = dyn_cast<CallBase>(&*BBI++);
2164         if (!I)
2165           continue;
2166 
2167         // Skip call sites which are nounwind intrinsics.
2168         auto *CalledFn =
2169             dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
2170         if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2171           continue;
2172 
2173         // Skip call sites which already have a "funclet" bundle.
2174         if (I->getOperandBundle(LLVMContext::OB_funclet))
2175           continue;
2176 
2177         I->getOperandBundlesAsDefs(OpBundles);
2178         OpBundles.emplace_back("funclet", CallSiteEHPad);
2179 
2180         Instruction *NewInst = CallBase::Create(I, OpBundles, I);
2181         NewInst->takeName(I);
2182         I->replaceAllUsesWith(NewInst);
2183         I->eraseFromParent();
2184 
2185         OpBundles.clear();
2186       }
2187 
2188       // It is problematic if the inlinee has a cleanupret which unwinds to
2189       // caller and we inline it into a call site which doesn't unwind but into
2190       // an EH pad that does.  Such an edge must be dynamically unreachable.
2191       // As such, we replace the cleanupret with unreachable.
2192       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2193         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2194           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2195 
2196       Instruction *I = BB->getFirstNonPHI();
2197       if (!I->isEHPad())
2198         continue;
2199 
2200       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2201         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2202           CatchSwitch->setParentPad(CallSiteEHPad);
2203       } else {
2204         auto *FPI = cast<FuncletPadInst>(I);
2205         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2206           FPI->setParentPad(CallSiteEHPad);
2207       }
2208     }
2209   }
2210 
2211   if (InlinedDeoptimizeCalls) {
2212     // We need to at least remove the deoptimizing returns from the Return set,
2213     // so that the control flow from those returns does not get merged into the
2214     // caller (but terminate it instead).  If the caller's return type does not
2215     // match the callee's return type, we also need to change the return type of
2216     // the intrinsic.
2217     if (Caller->getReturnType() == CB.getType()) {
2218       auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2219         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2220       });
2221       Returns.erase(NewEnd, Returns.end());
2222     } else {
2223       SmallVector<ReturnInst *, 8> NormalReturns;
2224       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2225           Caller->getParent(), Intrinsic::experimental_deoptimize,
2226           {Caller->getReturnType()});
2227 
2228       for (ReturnInst *RI : Returns) {
2229         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2230         if (!DeoptCall) {
2231           NormalReturns.push_back(RI);
2232           continue;
2233         }
2234 
2235         // The calling convention on the deoptimize call itself may be bogus,
2236         // since the code we're inlining may have undefined behavior (and may
2237         // never actually execute at runtime); but all
2238         // @llvm.experimental.deoptimize declarations have to have the same
2239         // calling convention in a well-formed module.
2240         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2241         NewDeoptIntrinsic->setCallingConv(CallingConv);
2242         auto *CurBB = RI->getParent();
2243         RI->eraseFromParent();
2244 
2245         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2246                                          DeoptCall->arg_end());
2247 
2248         SmallVector<OperandBundleDef, 1> OpBundles;
2249         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2250         DeoptCall->eraseFromParent();
2251         assert(!OpBundles.empty() &&
2252                "Expected at least the deopt operand bundle");
2253 
2254         IRBuilder<> Builder(CurBB);
2255         CallInst *NewDeoptCall =
2256             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2257         NewDeoptCall->setCallingConv(CallingConv);
2258         if (NewDeoptCall->getType()->isVoidTy())
2259           Builder.CreateRetVoid();
2260         else
2261           Builder.CreateRet(NewDeoptCall);
2262       }
2263 
2264       // Leave behind the normal returns so we can merge control flow.
2265       std::swap(Returns, NormalReturns);
2266     }
2267   }
2268 
2269   // Handle any inlined musttail call sites.  In order for a new call site to be
2270   // musttail, the source of the clone and the inlined call site must have been
2271   // musttail.  Therefore it's safe to return without merging control into the
2272   // phi below.
2273   if (InlinedMustTailCalls) {
2274     // Check if we need to bitcast the result of any musttail calls.
2275     Type *NewRetTy = Caller->getReturnType();
2276     bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2277 
2278     // Handle the returns preceded by musttail calls separately.
2279     SmallVector<ReturnInst *, 8> NormalReturns;
2280     for (ReturnInst *RI : Returns) {
2281       CallInst *ReturnedMustTail =
2282           RI->getParent()->getTerminatingMustTailCall();
2283       if (!ReturnedMustTail) {
2284         NormalReturns.push_back(RI);
2285         continue;
2286       }
2287       if (!NeedBitCast)
2288         continue;
2289 
2290       // Delete the old return and any preceding bitcast.
2291       BasicBlock *CurBB = RI->getParent();
2292       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2293       RI->eraseFromParent();
2294       if (OldCast)
2295         OldCast->eraseFromParent();
2296 
2297       // Insert a new bitcast and return with the right type.
2298       IRBuilder<> Builder(CurBB);
2299       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2300     }
2301 
2302     // Leave behind the normal returns so we can merge control flow.
2303     std::swap(Returns, NormalReturns);
2304   }
2305 
2306   // Now that all of the transforms on the inlined code have taken place but
2307   // before we splice the inlined code into the CFG and lose track of which
2308   // blocks were actually inlined, collect the call sites. We only do this if
2309   // call graph updates weren't requested, as those provide value handle based
2310   // tracking of inlined call sites instead.
2311   if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2312     // Otherwise just collect the raw call sites that were inlined.
2313     for (BasicBlock &NewBB :
2314          make_range(FirstNewBlock->getIterator(), Caller->end()))
2315       for (Instruction &I : NewBB)
2316         if (auto *CB = dyn_cast<CallBase>(&I))
2317           IFI.InlinedCallSites.push_back(CB);
2318   }
2319 
2320   // If we cloned in _exactly one_ basic block, and if that block ends in a
2321   // return instruction, we splice the body of the inlined callee directly into
2322   // the calling basic block.
2323   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2324     // Move all of the instructions right before the call.
2325     OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2326                                  FirstNewBlock->begin(), FirstNewBlock->end());
2327     // Remove the cloned basic block.
2328     Caller->getBasicBlockList().pop_back();
2329 
2330     // If the call site was an invoke instruction, add a branch to the normal
2331     // destination.
2332     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2333       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2334       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2335     }
2336 
2337     // If the return instruction returned a value, replace uses of the call with
2338     // uses of the returned value.
2339     if (!CB.use_empty()) {
2340       ReturnInst *R = Returns[0];
2341       if (&CB == R->getReturnValue())
2342         CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2343       else
2344         CB.replaceAllUsesWith(R->getReturnValue());
2345     }
2346     // Since we are now done with the Call/Invoke, we can delete it.
2347     CB.eraseFromParent();
2348 
2349     // Since we are now done with the return instruction, delete it also.
2350     Returns[0]->eraseFromParent();
2351 
2352     // We are now done with the inlining.
2353     return InlineResult::success();
2354   }
2355 
2356   // Otherwise, we have the normal case, of more than one block to inline or
2357   // multiple return sites.
2358 
2359   // We want to clone the entire callee function into the hole between the
2360   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2361   // this is an invoke instruction or a call instruction.
2362   BasicBlock *AfterCallBB;
2363   BranchInst *CreatedBranchToNormalDest = nullptr;
2364   if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2365 
2366     // Add an unconditional branch to make this look like the CallInst case...
2367     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2368 
2369     // Split the basic block.  This guarantees that no PHI nodes will have to be
2370     // updated due to new incoming edges, and make the invoke case more
2371     // symmetric to the call case.
2372     AfterCallBB =
2373         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2374                                 CalledFunc->getName() + ".exit");
2375 
2376   } else { // It's a call
2377     // If this is a call instruction, we need to split the basic block that
2378     // the call lives in.
2379     //
2380     AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2381                                           CalledFunc->getName() + ".exit");
2382   }
2383 
2384   if (IFI.CallerBFI) {
2385     // Copy original BB's block frequency to AfterCallBB
2386     IFI.CallerBFI->setBlockFreq(
2387         AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2388   }
2389 
2390   // Change the branch that used to go to AfterCallBB to branch to the first
2391   // basic block of the inlined function.
2392   //
2393   Instruction *Br = OrigBB->getTerminator();
2394   assert(Br && Br->getOpcode() == Instruction::Br &&
2395          "splitBasicBlock broken!");
2396   Br->setOperand(0, &*FirstNewBlock);
2397 
2398   // Now that the function is correct, make it a little bit nicer.  In
2399   // particular, move the basic blocks inserted from the end of the function
2400   // into the space made by splitting the source basic block.
2401   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2402                                      Caller->getBasicBlockList(), FirstNewBlock,
2403                                      Caller->end());
2404 
2405   // Handle all of the return instructions that we just cloned in, and eliminate
2406   // any users of the original call/invoke instruction.
2407   Type *RTy = CalledFunc->getReturnType();
2408 
2409   PHINode *PHI = nullptr;
2410   if (Returns.size() > 1) {
2411     // The PHI node should go at the front of the new basic block to merge all
2412     // possible incoming values.
2413     if (!CB.use_empty()) {
2414       PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2415                             &AfterCallBB->front());
2416       // Anything that used the result of the function call should now use the
2417       // PHI node as their operand.
2418       CB.replaceAllUsesWith(PHI);
2419     }
2420 
2421     // Loop over all of the return instructions adding entries to the PHI node
2422     // as appropriate.
2423     if (PHI) {
2424       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2425         ReturnInst *RI = Returns[i];
2426         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2427                "Ret value not consistent in function!");
2428         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2429       }
2430     }
2431 
2432     // Add a branch to the merge points and remove return instructions.
2433     DebugLoc Loc;
2434     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2435       ReturnInst *RI = Returns[i];
2436       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2437       Loc = RI->getDebugLoc();
2438       BI->setDebugLoc(Loc);
2439       RI->eraseFromParent();
2440     }
2441     // We need to set the debug location to *somewhere* inside the
2442     // inlined function. The line number may be nonsensical, but the
2443     // instruction will at least be associated with the right
2444     // function.
2445     if (CreatedBranchToNormalDest)
2446       CreatedBranchToNormalDest->setDebugLoc(Loc);
2447   } else if (!Returns.empty()) {
2448     // Otherwise, if there is exactly one return value, just replace anything
2449     // using the return value of the call with the computed value.
2450     if (!CB.use_empty()) {
2451       if (&CB == Returns[0]->getReturnValue())
2452         CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2453       else
2454         CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2455     }
2456 
2457     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2458     BasicBlock *ReturnBB = Returns[0]->getParent();
2459     ReturnBB->replaceAllUsesWith(AfterCallBB);
2460 
2461     // Splice the code from the return block into the block that it will return
2462     // to, which contains the code that was after the call.
2463     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2464                                       ReturnBB->getInstList());
2465 
2466     if (CreatedBranchToNormalDest)
2467       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2468 
2469     // Delete the return instruction now and empty ReturnBB now.
2470     Returns[0]->eraseFromParent();
2471     ReturnBB->eraseFromParent();
2472   } else if (!CB.use_empty()) {
2473     // No returns, but something is using the return value of the call.  Just
2474     // nuke the result.
2475     CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2476   }
2477 
2478   // Since we are now done with the Call/Invoke, we can delete it.
2479   CB.eraseFromParent();
2480 
2481   // If we inlined any musttail calls and the original return is now
2482   // unreachable, delete it.  It can only contain a bitcast and ret.
2483   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2484     AfterCallBB->eraseFromParent();
2485 
2486   // We should always be able to fold the entry block of the function into the
2487   // single predecessor of the block...
2488   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2489   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2490 
2491   // Splice the code entry block into calling block, right before the
2492   // unconditional branch.
2493   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2494   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2495 
2496   // Remove the unconditional branch.
2497   OrigBB->getInstList().erase(Br);
2498 
2499   // Now we can remove the CalleeEntry block, which is now empty.
2500   Caller->getBasicBlockList().erase(CalleeEntry);
2501 
2502   // If we inserted a phi node, check to see if it has a single value (e.g. all
2503   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2504   // block other optimizations.
2505   if (PHI) {
2506     AssumptionCache *AC =
2507         IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2508     auto &DL = Caller->getParent()->getDataLayout();
2509     if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2510       PHI->replaceAllUsesWith(V);
2511       PHI->eraseFromParent();
2512     }
2513   }
2514 
2515   return InlineResult::success();
2516 }
2517