1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DIBuilder.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DebugInfoMetadata.h"
42 #include "llvm/IR/DebugLoc.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstrTypes.h"
48 #include "llvm/IR/Instruction.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/IntrinsicInst.h"
51 #include "llvm/IR/Intrinsics.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/MDBuilder.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/ValueMapper.h"
65 #include <algorithm>
66 #include <cassert>
67 #include <cstdint>
68 #include <iterator>
69 #include <limits>
70 #include <string>
71 #include <utility>
72 #include <vector>
73 
74 using namespace llvm;
75 using ProfileCount = Function::ProfileCount;
76 
77 static cl::opt<bool>
78 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
79   cl::Hidden,
80   cl::desc("Convert noalias attributes to metadata during inlining."));
81 
82 static cl::opt<bool>
83 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
84   cl::init(true), cl::Hidden,
85   cl::desc("Convert align attributes to assumptions during inlining."));
86 
87 static cl::opt<bool> UpdateReturnAttributes(
88         "update-return-attrs", cl::init(true), cl::Hidden,
89             cl::desc("Update return attributes on calls within inlined body"));
90 
91 static cl::opt<unsigned> InlinerAttributeWindow(
92     "max-inst-checked-for-throw-during-inlining", cl::Hidden,
93     cl::desc("the maximum number of instructions analyzed for may throw during "
94              "attribute inference in inlined body"),
95     cl::init(4));
96 
97 namespace {
98 
99   /// A class for recording information about inlining a landing pad.
100   class LandingPadInliningInfo {
101     /// Destination of the invoke's unwind.
102     BasicBlock *OuterResumeDest;
103 
104     /// Destination for the callee's resume.
105     BasicBlock *InnerResumeDest = nullptr;
106 
107     /// LandingPadInst associated with the invoke.
108     LandingPadInst *CallerLPad = nullptr;
109 
110     /// PHI for EH values from landingpad insts.
111     PHINode *InnerEHValuesPHI = nullptr;
112 
113     SmallVector<Value*, 8> UnwindDestPHIValues;
114 
115   public:
116     LandingPadInliningInfo(InvokeInst *II)
117         : OuterResumeDest(II->getUnwindDest()) {
118       // If there are PHI nodes in the unwind destination block, we need to keep
119       // track of which values came into them from the invoke before removing
120       // the edge from this block.
121       BasicBlock *InvokeBB = II->getParent();
122       BasicBlock::iterator I = OuterResumeDest->begin();
123       for (; isa<PHINode>(I); ++I) {
124         // Save the value to use for this edge.
125         PHINode *PHI = cast<PHINode>(I);
126         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
127       }
128 
129       CallerLPad = cast<LandingPadInst>(I);
130     }
131 
132     /// The outer unwind destination is the target of
133     /// unwind edges introduced for calls within the inlined function.
134     BasicBlock *getOuterResumeDest() const {
135       return OuterResumeDest;
136     }
137 
138     BasicBlock *getInnerResumeDest();
139 
140     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
141 
142     /// Forward the 'resume' instruction to the caller's landing pad block.
143     /// When the landing pad block has only one predecessor, this is
144     /// a simple branch. When there is more than one predecessor, we need to
145     /// split the landing pad block after the landingpad instruction and jump
146     /// to there.
147     void forwardResume(ResumeInst *RI,
148                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
149 
150     /// Add incoming-PHI values to the unwind destination block for the given
151     /// basic block, using the values for the original invoke's source block.
152     void addIncomingPHIValuesFor(BasicBlock *BB) const {
153       addIncomingPHIValuesForInto(BB, OuterResumeDest);
154     }
155 
156     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
157       BasicBlock::iterator I = dest->begin();
158       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
159         PHINode *phi = cast<PHINode>(I);
160         phi->addIncoming(UnwindDestPHIValues[i], src);
161       }
162     }
163   };
164 
165 } // end anonymous namespace
166 
167 /// Get or create a target for the branch from ResumeInsts.
168 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
169   if (InnerResumeDest) return InnerResumeDest;
170 
171   // Split the landing pad.
172   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
173   InnerResumeDest =
174     OuterResumeDest->splitBasicBlock(SplitPoint,
175                                      OuterResumeDest->getName() + ".body");
176 
177   // The number of incoming edges we expect to the inner landing pad.
178   const unsigned PHICapacity = 2;
179 
180   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
181   Instruction *InsertPoint = &InnerResumeDest->front();
182   BasicBlock::iterator I = OuterResumeDest->begin();
183   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
184     PHINode *OuterPHI = cast<PHINode>(I);
185     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
186                                         OuterPHI->getName() + ".lpad-body",
187                                         InsertPoint);
188     OuterPHI->replaceAllUsesWith(InnerPHI);
189     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
190   }
191 
192   // Create a PHI for the exception values.
193   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
194                                      "eh.lpad-body", InsertPoint);
195   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
196   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
197 
198   // All done.
199   return InnerResumeDest;
200 }
201 
202 /// Forward the 'resume' instruction to the caller's landing pad block.
203 /// When the landing pad block has only one predecessor, this is a simple
204 /// branch. When there is more than one predecessor, we need to split the
205 /// landing pad block after the landingpad instruction and jump to there.
206 void LandingPadInliningInfo::forwardResume(
207     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
208   BasicBlock *Dest = getInnerResumeDest();
209   BasicBlock *Src = RI->getParent();
210 
211   BranchInst::Create(Dest, Src);
212 
213   // Update the PHIs in the destination. They were inserted in an order which
214   // makes this work.
215   addIncomingPHIValuesForInto(Src, Dest);
216 
217   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
218   RI->eraseFromParent();
219 }
220 
221 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
222 static Value *getParentPad(Value *EHPad) {
223   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
224     return FPI->getParentPad();
225   return cast<CatchSwitchInst>(EHPad)->getParentPad();
226 }
227 
228 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
229 
230 /// Helper for getUnwindDestToken that does the descendant-ward part of
231 /// the search.
232 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
233                                        UnwindDestMemoTy &MemoMap) {
234   SmallVector<Instruction *, 8> Worklist(1, EHPad);
235 
236   while (!Worklist.empty()) {
237     Instruction *CurrentPad = Worklist.pop_back_val();
238     // We only put pads on the worklist that aren't in the MemoMap.  When
239     // we find an unwind dest for a pad we may update its ancestors, but
240     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
241     // so they should never get updated while queued on the worklist.
242     assert(!MemoMap.count(CurrentPad));
243     Value *UnwindDestToken = nullptr;
244     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
245       if (CatchSwitch->hasUnwindDest()) {
246         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
247       } else {
248         // Catchswitch doesn't have a 'nounwind' variant, and one might be
249         // annotated as "unwinds to caller" when really it's nounwind (see
250         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
251         // parent's unwind dest from this.  We can check its catchpads'
252         // descendants, since they might include a cleanuppad with an
253         // "unwinds to caller" cleanupret, which can be trusted.
254         for (auto HI = CatchSwitch->handler_begin(),
255                   HE = CatchSwitch->handler_end();
256              HI != HE && !UnwindDestToken; ++HI) {
257           BasicBlock *HandlerBlock = *HI;
258           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
259           for (User *Child : CatchPad->users()) {
260             // Intentionally ignore invokes here -- since the catchswitch is
261             // marked "unwind to caller", it would be a verifier error if it
262             // contained an invoke which unwinds out of it, so any invoke we'd
263             // encounter must unwind to some child of the catch.
264             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
265               continue;
266 
267             Instruction *ChildPad = cast<Instruction>(Child);
268             auto Memo = MemoMap.find(ChildPad);
269             if (Memo == MemoMap.end()) {
270               // Haven't figured out this child pad yet; queue it.
271               Worklist.push_back(ChildPad);
272               continue;
273             }
274             // We've already checked this child, but might have found that
275             // it offers no proof either way.
276             Value *ChildUnwindDestToken = Memo->second;
277             if (!ChildUnwindDestToken)
278               continue;
279             // We already know the child's unwind dest, which can either
280             // be ConstantTokenNone to indicate unwind to caller, or can
281             // be another child of the catchpad.  Only the former indicates
282             // the unwind dest of the catchswitch.
283             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
284               UnwindDestToken = ChildUnwindDestToken;
285               break;
286             }
287             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
288           }
289         }
290       }
291     } else {
292       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
293       for (User *U : CleanupPad->users()) {
294         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
295           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
296             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
297           else
298             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
299           break;
300         }
301         Value *ChildUnwindDestToken;
302         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
303           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
304         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
305           Instruction *ChildPad = cast<Instruction>(U);
306           auto Memo = MemoMap.find(ChildPad);
307           if (Memo == MemoMap.end()) {
308             // Haven't resolved this child yet; queue it and keep searching.
309             Worklist.push_back(ChildPad);
310             continue;
311           }
312           // We've checked this child, but still need to ignore it if it
313           // had no proof either way.
314           ChildUnwindDestToken = Memo->second;
315           if (!ChildUnwindDestToken)
316             continue;
317         } else {
318           // Not a relevant user of the cleanuppad
319           continue;
320         }
321         // In a well-formed program, the child/invoke must either unwind to
322         // an(other) child of the cleanup, or exit the cleanup.  In the
323         // first case, continue searching.
324         if (isa<Instruction>(ChildUnwindDestToken) &&
325             getParentPad(ChildUnwindDestToken) == CleanupPad)
326           continue;
327         UnwindDestToken = ChildUnwindDestToken;
328         break;
329       }
330     }
331     // If we haven't found an unwind dest for CurrentPad, we may have queued its
332     // children, so move on to the next in the worklist.
333     if (!UnwindDestToken)
334       continue;
335 
336     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
337     // any ancestors of CurrentPad up to but not including UnwindDestToken's
338     // parent pad.  Record this in the memo map, and check to see if the
339     // original EHPad being queried is one of the ones exited.
340     Value *UnwindParent;
341     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
342       UnwindParent = getParentPad(UnwindPad);
343     else
344       UnwindParent = nullptr;
345     bool ExitedOriginalPad = false;
346     for (Instruction *ExitedPad = CurrentPad;
347          ExitedPad && ExitedPad != UnwindParent;
348          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
349       // Skip over catchpads since they just follow their catchswitches.
350       if (isa<CatchPadInst>(ExitedPad))
351         continue;
352       MemoMap[ExitedPad] = UnwindDestToken;
353       ExitedOriginalPad |= (ExitedPad == EHPad);
354     }
355 
356     if (ExitedOriginalPad)
357       return UnwindDestToken;
358 
359     // Continue the search.
360   }
361 
362   // No definitive information is contained within this funclet.
363   return nullptr;
364 }
365 
366 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
367 /// return that pad instruction.  If it unwinds to caller, return
368 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
369 /// return nullptr.
370 ///
371 /// This routine gets invoked for calls in funclets in inlinees when inlining
372 /// an invoke.  Since many funclets don't have calls inside them, it's queried
373 /// on-demand rather than building a map of pads to unwind dests up front.
374 /// Determining a funclet's unwind dest may require recursively searching its
375 /// descendants, and also ancestors and cousins if the descendants don't provide
376 /// an answer.  Since most funclets will have their unwind dest immediately
377 /// available as the unwind dest of a catchswitch or cleanupret, this routine
378 /// searches top-down from the given pad and then up. To avoid worst-case
379 /// quadratic run-time given that approach, it uses a memo map to avoid
380 /// re-processing funclet trees.  The callers that rewrite the IR as they go
381 /// take advantage of this, for correctness, by checking/forcing rewritten
382 /// pads' entries to match the original callee view.
383 static Value *getUnwindDestToken(Instruction *EHPad,
384                                  UnwindDestMemoTy &MemoMap) {
385   // Catchpads unwind to the same place as their catchswitch;
386   // redirct any queries on catchpads so the code below can
387   // deal with just catchswitches and cleanuppads.
388   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
389     EHPad = CPI->getCatchSwitch();
390 
391   // Check if we've already determined the unwind dest for this pad.
392   auto Memo = MemoMap.find(EHPad);
393   if (Memo != MemoMap.end())
394     return Memo->second;
395 
396   // Search EHPad and, if necessary, its descendants.
397   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
398   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
399   if (UnwindDestToken)
400     return UnwindDestToken;
401 
402   // No information is available for this EHPad from itself or any of its
403   // descendants.  An unwind all the way out to a pad in the caller would
404   // need also to agree with the unwind dest of the parent funclet, so
405   // search up the chain to try to find a funclet with information.  Put
406   // null entries in the memo map to avoid re-processing as we go up.
407   MemoMap[EHPad] = nullptr;
408 #ifndef NDEBUG
409   SmallPtrSet<Instruction *, 4> TempMemos;
410   TempMemos.insert(EHPad);
411 #endif
412   Instruction *LastUselessPad = EHPad;
413   Value *AncestorToken;
414   for (AncestorToken = getParentPad(EHPad);
415        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
416        AncestorToken = getParentPad(AncestorToken)) {
417     // Skip over catchpads since they just follow their catchswitches.
418     if (isa<CatchPadInst>(AncestorPad))
419       continue;
420     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
421     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
422     // call to getUnwindDestToken, that would mean that AncestorPad had no
423     // information in itself, its descendants, or its ancestors.  If that
424     // were the case, then we should also have recorded the lack of information
425     // for the descendant that we're coming from.  So assert that we don't
426     // find a null entry in the MemoMap for AncestorPad.
427     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
428     auto AncestorMemo = MemoMap.find(AncestorPad);
429     if (AncestorMemo == MemoMap.end()) {
430       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
431     } else {
432       UnwindDestToken = AncestorMemo->second;
433     }
434     if (UnwindDestToken)
435       break;
436     LastUselessPad = AncestorPad;
437     MemoMap[LastUselessPad] = nullptr;
438 #ifndef NDEBUG
439     TempMemos.insert(LastUselessPad);
440 #endif
441   }
442 
443   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
444   // returned nullptr (and likewise for EHPad and any of its ancestors up to
445   // LastUselessPad), so LastUselessPad has no information from below.  Since
446   // getUnwindDestTokenHelper must investigate all downward paths through
447   // no-information nodes to prove that a node has no information like this,
448   // and since any time it finds information it records it in the MemoMap for
449   // not just the immediately-containing funclet but also any ancestors also
450   // exited, it must be the case that, walking downward from LastUselessPad,
451   // visiting just those nodes which have not been mapped to an unwind dest
452   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
453   // they are just used to keep getUnwindDestTokenHelper from repeating work),
454   // any node visited must have been exhaustively searched with no information
455   // for it found.
456   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
457   while (!Worklist.empty()) {
458     Instruction *UselessPad = Worklist.pop_back_val();
459     auto Memo = MemoMap.find(UselessPad);
460     if (Memo != MemoMap.end() && Memo->second) {
461       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
462       // that it is a funclet that does have information about unwinding to
463       // a particular destination; its parent was a useless pad.
464       // Since its parent has no information, the unwind edge must not escape
465       // the parent, and must target a sibling of this pad.  This local unwind
466       // gives us no information about EHPad.  Leave it and the subtree rooted
467       // at it alone.
468       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
469       continue;
470     }
471     // We know we don't have information for UselesPad.  If it has an entry in
472     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
473     // added on this invocation of getUnwindDestToken; if a previous invocation
474     // recorded nullptr, it would have had to prove that the ancestors of
475     // UselessPad, which include LastUselessPad, had no information, and that
476     // in turn would have required proving that the descendants of
477     // LastUselesPad, which include EHPad, have no information about
478     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
479     // the MemoMap on that invocation, which isn't the case if we got here.
480     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
481     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
482     // information that we'd be contradicting by making a map entry for it
483     // (which is something that getUnwindDestTokenHelper must have proved for
484     // us to get here).  Just assert on is direct users here; the checks in
485     // this downward walk at its descendants will verify that they don't have
486     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
487     // unwind edges or unwind to a sibling).
488     MemoMap[UselessPad] = UnwindDestToken;
489     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
490       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
491       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
492         auto *CatchPad = HandlerBlock->getFirstNonPHI();
493         for (User *U : CatchPad->users()) {
494           assert(
495               (!isa<InvokeInst>(U) ||
496                (getParentPad(
497                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
498                 CatchPad)) &&
499               "Expected useless pad");
500           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
501             Worklist.push_back(cast<Instruction>(U));
502         }
503       }
504     } else {
505       assert(isa<CleanupPadInst>(UselessPad));
506       for (User *U : UselessPad->users()) {
507         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
508         assert((!isa<InvokeInst>(U) ||
509                 (getParentPad(
510                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
511                  UselessPad)) &&
512                "Expected useless pad");
513         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
514           Worklist.push_back(cast<Instruction>(U));
515       }
516     }
517   }
518 
519   return UnwindDestToken;
520 }
521 
522 /// When we inline a basic block into an invoke,
523 /// we have to turn all of the calls that can throw into invokes.
524 /// This function analyze BB to see if there are any calls, and if so,
525 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
526 /// nodes in that block with the values specified in InvokeDestPHIValues.
527 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
528     BasicBlock *BB, BasicBlock *UnwindEdge,
529     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
530   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
531     Instruction *I = &*BBI++;
532 
533     // We only need to check for function calls: inlined invoke
534     // instructions require no special handling.
535     CallInst *CI = dyn_cast<CallInst>(I);
536 
537     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
538       continue;
539 
540     // We do not need to (and in fact, cannot) convert possibly throwing calls
541     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
542     // invokes.  The caller's "segment" of the deoptimization continuation
543     // attached to the newly inlined @llvm.experimental_deoptimize
544     // (resp. @llvm.experimental.guard) call should contain the exception
545     // handling logic, if any.
546     if (auto *F = CI->getCalledFunction())
547       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
548           F->getIntrinsicID() == Intrinsic::experimental_guard)
549         continue;
550 
551     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
552       // This call is nested inside a funclet.  If that funclet has an unwind
553       // destination within the inlinee, then unwinding out of this call would
554       // be UB.  Rewriting this call to an invoke which targets the inlined
555       // invoke's unwind dest would give the call's parent funclet multiple
556       // unwind destinations, which is something that subsequent EH table
557       // generation can't handle and that the veirifer rejects.  So when we
558       // see such a call, leave it as a call.
559       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
560       Value *UnwindDestToken =
561           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
562       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
563         continue;
564 #ifndef NDEBUG
565       Instruction *MemoKey;
566       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
567         MemoKey = CatchPad->getCatchSwitch();
568       else
569         MemoKey = FuncletPad;
570       assert(FuncletUnwindMap->count(MemoKey) &&
571              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
572              "must get memoized to avoid confusing later searches");
573 #endif // NDEBUG
574     }
575 
576     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
577     return BB;
578   }
579   return nullptr;
580 }
581 
582 /// If we inlined an invoke site, we need to convert calls
583 /// in the body of the inlined function into invokes.
584 ///
585 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
586 /// block of the inlined code (the last block is the end of the function),
587 /// and InlineCodeInfo is information about the code that got inlined.
588 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
589                                     ClonedCodeInfo &InlinedCodeInfo) {
590   BasicBlock *InvokeDest = II->getUnwindDest();
591 
592   Function *Caller = FirstNewBlock->getParent();
593 
594   // The inlined code is currently at the end of the function, scan from the
595   // start of the inlined code to its end, checking for stuff we need to
596   // rewrite.
597   LandingPadInliningInfo Invoke(II);
598 
599   // Get all of the inlined landing pad instructions.
600   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
601   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
602        I != E; ++I)
603     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
604       InlinedLPads.insert(II->getLandingPadInst());
605 
606   // Append the clauses from the outer landing pad instruction into the inlined
607   // landing pad instructions.
608   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
609   for (LandingPadInst *InlinedLPad : InlinedLPads) {
610     unsigned OuterNum = OuterLPad->getNumClauses();
611     InlinedLPad->reserveClauses(OuterNum);
612     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
613       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
614     if (OuterLPad->isCleanup())
615       InlinedLPad->setCleanup(true);
616   }
617 
618   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
619        BB != E; ++BB) {
620     if (InlinedCodeInfo.ContainsCalls)
621       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
622               &*BB, Invoke.getOuterResumeDest()))
623         // Update any PHI nodes in the exceptional block to indicate that there
624         // is now a new entry in them.
625         Invoke.addIncomingPHIValuesFor(NewBB);
626 
627     // Forward any resumes that are remaining here.
628     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
629       Invoke.forwardResume(RI, InlinedLPads);
630   }
631 
632   // Now that everything is happy, we have one final detail.  The PHI nodes in
633   // the exception destination block still have entries due to the original
634   // invoke instruction. Eliminate these entries (which might even delete the
635   // PHI node) now.
636   InvokeDest->removePredecessor(II->getParent());
637 }
638 
639 /// If we inlined an invoke site, we need to convert calls
640 /// in the body of the inlined function into invokes.
641 ///
642 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
643 /// block of the inlined code (the last block is the end of the function),
644 /// and InlineCodeInfo is information about the code that got inlined.
645 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
646                                ClonedCodeInfo &InlinedCodeInfo) {
647   BasicBlock *UnwindDest = II->getUnwindDest();
648   Function *Caller = FirstNewBlock->getParent();
649 
650   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
651 
652   // If there are PHI nodes in the unwind destination block, we need to keep
653   // track of which values came into them from the invoke before removing the
654   // edge from this block.
655   SmallVector<Value *, 8> UnwindDestPHIValues;
656   BasicBlock *InvokeBB = II->getParent();
657   for (Instruction &I : *UnwindDest) {
658     // Save the value to use for this edge.
659     PHINode *PHI = dyn_cast<PHINode>(&I);
660     if (!PHI)
661       break;
662     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
663   }
664 
665   // Add incoming-PHI values to the unwind destination block for the given basic
666   // block, using the values for the original invoke's source block.
667   auto UpdatePHINodes = [&](BasicBlock *Src) {
668     BasicBlock::iterator I = UnwindDest->begin();
669     for (Value *V : UnwindDestPHIValues) {
670       PHINode *PHI = cast<PHINode>(I);
671       PHI->addIncoming(V, Src);
672       ++I;
673     }
674   };
675 
676   // This connects all the instructions which 'unwind to caller' to the invoke
677   // destination.
678   UnwindDestMemoTy FuncletUnwindMap;
679   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
680        BB != E; ++BB) {
681     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
682       if (CRI->unwindsToCaller()) {
683         auto *CleanupPad = CRI->getCleanupPad();
684         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
685         CRI->eraseFromParent();
686         UpdatePHINodes(&*BB);
687         // Finding a cleanupret with an unwind destination would confuse
688         // subsequent calls to getUnwindDestToken, so map the cleanuppad
689         // to short-circuit any such calls and recognize this as an "unwind
690         // to caller" cleanup.
691         assert(!FuncletUnwindMap.count(CleanupPad) ||
692                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
693         FuncletUnwindMap[CleanupPad] =
694             ConstantTokenNone::get(Caller->getContext());
695       }
696     }
697 
698     Instruction *I = BB->getFirstNonPHI();
699     if (!I->isEHPad())
700       continue;
701 
702     Instruction *Replacement = nullptr;
703     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
704       if (CatchSwitch->unwindsToCaller()) {
705         Value *UnwindDestToken;
706         if (auto *ParentPad =
707                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
708           // This catchswitch is nested inside another funclet.  If that
709           // funclet has an unwind destination within the inlinee, then
710           // unwinding out of this catchswitch would be UB.  Rewriting this
711           // catchswitch to unwind to the inlined invoke's unwind dest would
712           // give the parent funclet multiple unwind destinations, which is
713           // something that subsequent EH table generation can't handle and
714           // that the veirifer rejects.  So when we see such a call, leave it
715           // as "unwind to caller".
716           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
717           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
718             continue;
719         } else {
720           // This catchswitch has no parent to inherit constraints from, and
721           // none of its descendants can have an unwind edge that exits it and
722           // targets another funclet in the inlinee.  It may or may not have a
723           // descendant that definitively has an unwind to caller.  In either
724           // case, we'll have to assume that any unwinds out of it may need to
725           // be routed to the caller, so treat it as though it has a definitive
726           // unwind to caller.
727           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
728         }
729         auto *NewCatchSwitch = CatchSwitchInst::Create(
730             CatchSwitch->getParentPad(), UnwindDest,
731             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
732             CatchSwitch);
733         for (BasicBlock *PadBB : CatchSwitch->handlers())
734           NewCatchSwitch->addHandler(PadBB);
735         // Propagate info for the old catchswitch over to the new one in
736         // the unwind map.  This also serves to short-circuit any subsequent
737         // checks for the unwind dest of this catchswitch, which would get
738         // confused if they found the outer handler in the callee.
739         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
740         Replacement = NewCatchSwitch;
741       }
742     } else if (!isa<FuncletPadInst>(I)) {
743       llvm_unreachable("unexpected EHPad!");
744     }
745 
746     if (Replacement) {
747       Replacement->takeName(I);
748       I->replaceAllUsesWith(Replacement);
749       I->eraseFromParent();
750       UpdatePHINodes(&*BB);
751     }
752   }
753 
754   if (InlinedCodeInfo.ContainsCalls)
755     for (Function::iterator BB = FirstNewBlock->getIterator(),
756                             E = Caller->end();
757          BB != E; ++BB)
758       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
759               &*BB, UnwindDest, &FuncletUnwindMap))
760         // Update any PHI nodes in the exceptional block to indicate that there
761         // is now a new entry in them.
762         UpdatePHINodes(NewBB);
763 
764   // Now that everything is happy, we have one final detail.  The PHI nodes in
765   // the exception destination block still have entries due to the original
766   // invoke instruction. Eliminate these entries (which might even delete the
767   // PHI node) now.
768   UnwindDest->removePredecessor(InvokeBB);
769 }
770 
771 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
772 /// llvm.access.group metadata, that metadata should be propagated to all
773 /// memory-accessing cloned instructions.
774 static void PropagateParallelLoopAccessMetadata(CallBase &CB,
775                                                 ValueToValueMapTy &VMap) {
776   MDNode *M = CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
777   MDNode *CallAccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
778   if (!M && !CallAccessGroup)
779     return;
780 
781   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
782        VMI != VMIE; ++VMI) {
783     if (!VMI->second)
784       continue;
785 
786     Instruction *NI = dyn_cast<Instruction>(VMI->second);
787     if (!NI)
788       continue;
789 
790     if (M) {
791       if (MDNode *PM =
792               NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
793         M = MDNode::concatenate(PM, M);
794       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
795       } else if (NI->mayReadOrWriteMemory()) {
796         NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
797       }
798     }
799 
800     if (NI->mayReadOrWriteMemory()) {
801       MDNode *UnitedAccGroups = uniteAccessGroups(
802           NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
803       NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
804     }
805   }
806 }
807 
808 /// When inlining a function that contains noalias scope metadata,
809 /// this metadata needs to be cloned so that the inlined blocks
810 /// have different "unique scopes" at every call site. Were this not done, then
811 /// aliasing scopes from a function inlined into a caller multiple times could
812 /// not be differentiated (and this would lead to miscompiles because the
813 /// non-aliasing property communicated by the metadata could have
814 /// call-site-specific control dependencies).
815 static void CloneAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap) {
816   const Function *CalledFunc = CB.getCalledFunction();
817   SetVector<const MDNode *> MD;
818 
819   // Note: We could only clone the metadata if it is already used in the
820   // caller. I'm omitting that check here because it might confuse
821   // inter-procedural alias analysis passes. We can revisit this if it becomes
822   // an efficiency or overhead problem.
823 
824   for (const BasicBlock &I : *CalledFunc)
825     for (const Instruction &J : I) {
826       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
827         MD.insert(M);
828       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
829         MD.insert(M);
830     }
831 
832   if (MD.empty())
833     return;
834 
835   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
836   // the set.
837   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
838   while (!Queue.empty()) {
839     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
840     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
841       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
842         if (MD.insert(M1))
843           Queue.push_back(M1);
844   }
845 
846   // Now we have a complete set of all metadata in the chains used to specify
847   // the noalias scopes and the lists of those scopes.
848   SmallVector<TempMDTuple, 16> DummyNodes;
849   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
850   for (const MDNode *I : MD) {
851     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
852     MDMap[I].reset(DummyNodes.back().get());
853   }
854 
855   // Create new metadata nodes to replace the dummy nodes, replacing old
856   // metadata references with either a dummy node or an already-created new
857   // node.
858   for (const MDNode *I : MD) {
859     SmallVector<Metadata *, 4> NewOps;
860     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
861       const Metadata *V = I->getOperand(i);
862       if (const MDNode *M = dyn_cast<MDNode>(V))
863         NewOps.push_back(MDMap[M]);
864       else
865         NewOps.push_back(const_cast<Metadata *>(V));
866     }
867 
868     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
869     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
870     assert(TempM->isTemporary() && "Expected temporary node");
871 
872     TempM->replaceAllUsesWith(NewM);
873   }
874 
875   // Now replace the metadata in the new inlined instructions with the
876   // repacements from the map.
877   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
878        VMI != VMIE; ++VMI) {
879     if (!VMI->second)
880       continue;
881 
882     Instruction *NI = dyn_cast<Instruction>(VMI->second);
883     if (!NI)
884       continue;
885 
886     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
887       MDNode *NewMD = MDMap[M];
888       // If the call site also had alias scope metadata (a list of scopes to
889       // which instructions inside it might belong), propagate those scopes to
890       // the inlined instructions.
891       if (MDNode *CSM = CB.getMetadata(LLVMContext::MD_alias_scope))
892         NewMD = MDNode::concatenate(NewMD, CSM);
893       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
894     } else if (NI->mayReadOrWriteMemory()) {
895       if (MDNode *M = CB.getMetadata(LLVMContext::MD_alias_scope))
896         NI->setMetadata(LLVMContext::MD_alias_scope, M);
897     }
898 
899     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
900       MDNode *NewMD = MDMap[M];
901       // If the call site also had noalias metadata (a list of scopes with
902       // which instructions inside it don't alias), propagate those scopes to
903       // the inlined instructions.
904       if (MDNode *CSM = CB.getMetadata(LLVMContext::MD_noalias))
905         NewMD = MDNode::concatenate(NewMD, CSM);
906       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
907     } else if (NI->mayReadOrWriteMemory()) {
908       if (MDNode *M = CB.getMetadata(LLVMContext::MD_noalias))
909         NI->setMetadata(LLVMContext::MD_noalias, M);
910     }
911   }
912 }
913 
914 /// If the inlined function has noalias arguments,
915 /// then add new alias scopes for each noalias argument, tag the mapped noalias
916 /// parameters with noalias metadata specifying the new scope, and tag all
917 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
918 static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap,
919                                   const DataLayout &DL, AAResults *CalleeAAR) {
920   if (!EnableNoAliasConversion)
921     return;
922 
923   const Function *CalledFunc = CB.getCalledFunction();
924   SmallVector<const Argument *, 4> NoAliasArgs;
925 
926   for (const Argument &Arg : CalledFunc->args())
927     if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
928       NoAliasArgs.push_back(&Arg);
929 
930   if (NoAliasArgs.empty())
931     return;
932 
933   // To do a good job, if a noalias variable is captured, we need to know if
934   // the capture point dominates the particular use we're considering.
935   DominatorTree DT;
936   DT.recalculate(const_cast<Function&>(*CalledFunc));
937 
938   // noalias indicates that pointer values based on the argument do not alias
939   // pointer values which are not based on it. So we add a new "scope" for each
940   // noalias function argument. Accesses using pointers based on that argument
941   // become part of that alias scope, accesses using pointers not based on that
942   // argument are tagged as noalias with that scope.
943 
944   DenseMap<const Argument *, MDNode *> NewScopes;
945   MDBuilder MDB(CalledFunc->getContext());
946 
947   // Create a new scope domain for this function.
948   MDNode *NewDomain =
949     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
950   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
951     const Argument *A = NoAliasArgs[i];
952 
953     std::string Name = std::string(CalledFunc->getName());
954     if (A->hasName()) {
955       Name += ": %";
956       Name += A->getName();
957     } else {
958       Name += ": argument ";
959       Name += utostr(i);
960     }
961 
962     // Note: We always create a new anonymous root here. This is true regardless
963     // of the linkage of the callee because the aliasing "scope" is not just a
964     // property of the callee, but also all control dependencies in the caller.
965     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
966     NewScopes.insert(std::make_pair(A, NewScope));
967   }
968 
969   // Iterate over all new instructions in the map; for all memory-access
970   // instructions, add the alias scope metadata.
971   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
972        VMI != VMIE; ++VMI) {
973     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
974       if (!VMI->second)
975         continue;
976 
977       Instruction *NI = dyn_cast<Instruction>(VMI->second);
978       if (!NI)
979         continue;
980 
981       bool IsArgMemOnlyCall = false, IsFuncCall = false;
982       SmallVector<const Value *, 2> PtrArgs;
983 
984       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
985         PtrArgs.push_back(LI->getPointerOperand());
986       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
987         PtrArgs.push_back(SI->getPointerOperand());
988       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
989         PtrArgs.push_back(VAAI->getPointerOperand());
990       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
991         PtrArgs.push_back(CXI->getPointerOperand());
992       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
993         PtrArgs.push_back(RMWI->getPointerOperand());
994       else if (const auto *Call = dyn_cast<CallBase>(I)) {
995         // If we know that the call does not access memory, then we'll still
996         // know that about the inlined clone of this call site, and we don't
997         // need to add metadata.
998         if (Call->doesNotAccessMemory())
999           continue;
1000 
1001         IsFuncCall = true;
1002         if (CalleeAAR) {
1003           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1004           if (AAResults::onlyAccessesArgPointees(MRB))
1005             IsArgMemOnlyCall = true;
1006         }
1007 
1008         for (Value *Arg : Call->args()) {
1009           // We need to check the underlying objects of all arguments, not just
1010           // the pointer arguments, because we might be passing pointers as
1011           // integers, etc.
1012           // However, if we know that the call only accesses pointer arguments,
1013           // then we only need to check the pointer arguments.
1014           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1015             continue;
1016 
1017           PtrArgs.push_back(Arg);
1018         }
1019       }
1020 
1021       // If we found no pointers, then this instruction is not suitable for
1022       // pairing with an instruction to receive aliasing metadata.
1023       // However, if this is a call, this we might just alias with none of the
1024       // noalias arguments.
1025       if (PtrArgs.empty() && !IsFuncCall)
1026         continue;
1027 
1028       // It is possible that there is only one underlying object, but you
1029       // need to go through several PHIs to see it, and thus could be
1030       // repeated in the Objects list.
1031       SmallPtrSet<const Value *, 4> ObjSet;
1032       SmallVector<Metadata *, 4> Scopes, NoAliases;
1033 
1034       SmallSetVector<const Argument *, 4> NAPtrArgs;
1035       for (const Value *V : PtrArgs) {
1036         SmallVector<const Value *, 4> Objects;
1037         GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
1038 
1039         for (const Value *O : Objects)
1040           ObjSet.insert(O);
1041       }
1042 
1043       // Figure out if we're derived from anything that is not a noalias
1044       // argument.
1045       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1046       for (const Value *V : ObjSet) {
1047         // Is this value a constant that cannot be derived from any pointer
1048         // value (we need to exclude constant expressions, for example, that
1049         // are formed from arithmetic on global symbols).
1050         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1051                              isa<ConstantPointerNull>(V) ||
1052                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1053         if (IsNonPtrConst)
1054           continue;
1055 
1056         // If this is anything other than a noalias argument, then we cannot
1057         // completely describe the aliasing properties using alias.scope
1058         // metadata (and, thus, won't add any).
1059         if (const Argument *A = dyn_cast<Argument>(V)) {
1060           if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1061             UsesAliasingPtr = true;
1062         } else {
1063           UsesAliasingPtr = true;
1064         }
1065 
1066         // If this is not some identified function-local object (which cannot
1067         // directly alias a noalias argument), or some other argument (which,
1068         // by definition, also cannot alias a noalias argument), then we could
1069         // alias a noalias argument that has been captured).
1070         if (!isa<Argument>(V) &&
1071             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1072           CanDeriveViaCapture = true;
1073       }
1074 
1075       // A function call can always get captured noalias pointers (via other
1076       // parameters, globals, etc.).
1077       if (IsFuncCall && !IsArgMemOnlyCall)
1078         CanDeriveViaCapture = true;
1079 
1080       // First, we want to figure out all of the sets with which we definitely
1081       // don't alias. Iterate over all noalias set, and add those for which:
1082       //   1. The noalias argument is not in the set of objects from which we
1083       //      definitely derive.
1084       //   2. The noalias argument has not yet been captured.
1085       // An arbitrary function that might load pointers could see captured
1086       // noalias arguments via other noalias arguments or globals, and so we
1087       // must always check for prior capture.
1088       for (const Argument *A : NoAliasArgs) {
1089         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1090                                  // It might be tempting to skip the
1091                                  // PointerMayBeCapturedBefore check if
1092                                  // A->hasNoCaptureAttr() is true, but this is
1093                                  // incorrect because nocapture only guarantees
1094                                  // that no copies outlive the function, not
1095                                  // that the value cannot be locally captured.
1096                                  !PointerMayBeCapturedBefore(A,
1097                                    /* ReturnCaptures */ false,
1098                                    /* StoreCaptures */ false, I, &DT)))
1099           NoAliases.push_back(NewScopes[A]);
1100       }
1101 
1102       if (!NoAliases.empty())
1103         NI->setMetadata(LLVMContext::MD_noalias,
1104                         MDNode::concatenate(
1105                             NI->getMetadata(LLVMContext::MD_noalias),
1106                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1107 
1108       // Next, we want to figure out all of the sets to which we might belong.
1109       // We might belong to a set if the noalias argument is in the set of
1110       // underlying objects. If there is some non-noalias argument in our list
1111       // of underlying objects, then we cannot add a scope because the fact
1112       // that some access does not alias with any set of our noalias arguments
1113       // cannot itself guarantee that it does not alias with this access
1114       // (because there is some pointer of unknown origin involved and the
1115       // other access might also depend on this pointer). We also cannot add
1116       // scopes to arbitrary functions unless we know they don't access any
1117       // non-parameter pointer-values.
1118       bool CanAddScopes = !UsesAliasingPtr;
1119       if (CanAddScopes && IsFuncCall)
1120         CanAddScopes = IsArgMemOnlyCall;
1121 
1122       if (CanAddScopes)
1123         for (const Argument *A : NoAliasArgs) {
1124           if (ObjSet.count(A))
1125             Scopes.push_back(NewScopes[A]);
1126         }
1127 
1128       if (!Scopes.empty())
1129         NI->setMetadata(
1130             LLVMContext::MD_alias_scope,
1131             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1132                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1133     }
1134   }
1135 }
1136 
1137 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1138                                             Instruction *End) {
1139 
1140   assert(Begin->getParent() == End->getParent() &&
1141          "Expected to be in same basic block!");
1142   unsigned NumInstChecked = 0;
1143   // Check that all instructions in the range [Begin, End) are guaranteed to
1144   // transfer execution to successor.
1145   for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1146     if (NumInstChecked++ > InlinerAttributeWindow ||
1147         !isGuaranteedToTransferExecutionToSuccessor(&I))
1148       return true;
1149   return false;
1150 }
1151 
1152 static AttrBuilder IdentifyValidAttributes(CallBase &CB) {
1153 
1154   AttrBuilder AB(CB.getAttributes(), AttributeList::ReturnIndex);
1155   if (AB.empty())
1156     return AB;
1157   AttrBuilder Valid;
1158   // Only allow these white listed attributes to be propagated back to the
1159   // callee. This is because other attributes may only be valid on the call
1160   // itself, i.e. attributes such as signext and zeroext.
1161   if (auto DerefBytes = AB.getDereferenceableBytes())
1162     Valid.addDereferenceableAttr(DerefBytes);
1163   if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1164     Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1165   if (AB.contains(Attribute::NoAlias))
1166     Valid.addAttribute(Attribute::NoAlias);
1167   if (AB.contains(Attribute::NonNull))
1168     Valid.addAttribute(Attribute::NonNull);
1169   return Valid;
1170 }
1171 
1172 static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap) {
1173   if (!UpdateReturnAttributes)
1174     return;
1175 
1176   AttrBuilder Valid = IdentifyValidAttributes(CB);
1177   if (Valid.empty())
1178     return;
1179   auto *CalledFunction = CB.getCalledFunction();
1180   auto &Context = CalledFunction->getContext();
1181 
1182   for (auto &BB : *CalledFunction) {
1183     auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1184     if (!RI || !isa<CallBase>(RI->getOperand(0)))
1185       continue;
1186     auto *RetVal = cast<CallBase>(RI->getOperand(0));
1187     // Sanity check that the cloned RetVal exists and is a call, otherwise we
1188     // cannot add the attributes on the cloned RetVal.
1189     // Simplification during inlining could have transformed the cloned
1190     // instruction.
1191     auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1192     if (!NewRetVal)
1193       continue;
1194     // Backward propagation of attributes to the returned value may be incorrect
1195     // if it is control flow dependent.
1196     // Consider:
1197     // @callee {
1198     //  %rv = call @foo()
1199     //  %rv2 = call @bar()
1200     //  if (%rv2 != null)
1201     //    return %rv2
1202     //  if (%rv == null)
1203     //    exit()
1204     //  return %rv
1205     // }
1206     // caller() {
1207     //   %val = call nonnull @callee()
1208     // }
1209     // Here we cannot add the nonnull attribute on either foo or bar. So, we
1210     // limit the check to both RetVal and RI are in the same basic block and
1211     // there are no throwing/exiting instructions between these instructions.
1212     if (RI->getParent() != RetVal->getParent() ||
1213         MayContainThrowingOrExitingCall(RetVal, RI))
1214       continue;
1215     // Add to the existing attributes of NewRetVal, i.e. the cloned call
1216     // instruction.
1217     // NB! When we have the same attribute already existing on NewRetVal, but
1218     // with a differing value, the AttributeList's merge API honours the already
1219     // existing attribute value (i.e. attributes such as dereferenceable,
1220     // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1221     AttributeList AL = NewRetVal->getAttributes();
1222     AttributeList NewAL =
1223         AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1224     NewRetVal->setAttributes(NewAL);
1225   }
1226 }
1227 
1228 /// If the inlined function has non-byval align arguments, then
1229 /// add @llvm.assume-based alignment assumptions to preserve this information.
1230 static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI) {
1231   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1232     return;
1233 
1234   AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CB.getCaller());
1235   auto &DL = CB.getCaller()->getParent()->getDataLayout();
1236 
1237   // To avoid inserting redundant assumptions, we should check for assumptions
1238   // already in the caller. To do this, we might need a DT of the caller.
1239   DominatorTree DT;
1240   bool DTCalculated = false;
1241 
1242   Function *CalledFunc = CB.getCalledFunction();
1243   for (Argument &Arg : CalledFunc->args()) {
1244     unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1245     if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1246       if (!DTCalculated) {
1247         DT.recalculate(*CB.getCaller());
1248         DTCalculated = true;
1249       }
1250 
1251       // If we can already prove the asserted alignment in the context of the
1252       // caller, then don't bother inserting the assumption.
1253       Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1254       if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= Align)
1255         continue;
1256 
1257       CallInst *NewAsmp =
1258           IRBuilder<>(&CB).CreateAlignmentAssumption(DL, ArgVal, Align);
1259       AC->registerAssumption(NewAsmp);
1260     }
1261   }
1262 }
1263 
1264 /// Once we have cloned code over from a callee into the caller,
1265 /// update the specified callgraph to reflect the changes we made.
1266 /// Note that it's possible that not all code was copied over, so only
1267 /// some edges of the callgraph may remain.
1268 static void UpdateCallGraphAfterInlining(CallBase &CB,
1269                                          Function::iterator FirstNewBlock,
1270                                          ValueToValueMapTy &VMap,
1271                                          InlineFunctionInfo &IFI) {
1272   CallGraph &CG = *IFI.CG;
1273   const Function *Caller = CB.getCaller();
1274   const Function *Callee = CB.getCalledFunction();
1275   CallGraphNode *CalleeNode = CG[Callee];
1276   CallGraphNode *CallerNode = CG[Caller];
1277 
1278   // Since we inlined some uninlined call sites in the callee into the caller,
1279   // add edges from the caller to all of the callees of the callee.
1280   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1281 
1282   // Consider the case where CalleeNode == CallerNode.
1283   CallGraphNode::CalledFunctionsVector CallCache;
1284   if (CalleeNode == CallerNode) {
1285     CallCache.assign(I, E);
1286     I = CallCache.begin();
1287     E = CallCache.end();
1288   }
1289 
1290   for (; I != E; ++I) {
1291     const Value *OrigCall = I->first;
1292 
1293     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1294     // Only copy the edge if the call was inlined!
1295     if (VMI == VMap.end() || VMI->second == nullptr)
1296       continue;
1297 
1298     // If the call was inlined, but then constant folded, there is no edge to
1299     // add.  Check for this case.
1300     auto *NewCall = dyn_cast<CallBase>(VMI->second);
1301     if (!NewCall)
1302       continue;
1303 
1304     // We do not treat intrinsic calls like real function calls because we
1305     // expect them to become inline code; do not add an edge for an intrinsic.
1306     if (NewCall->getCalledFunction() &&
1307         NewCall->getCalledFunction()->isIntrinsic())
1308       continue;
1309 
1310     // Remember that this call site got inlined for the client of
1311     // InlineFunction.
1312     IFI.InlinedCalls.push_back(NewCall);
1313 
1314     // It's possible that inlining the callsite will cause it to go from an
1315     // indirect to a direct call by resolving a function pointer.  If this
1316     // happens, set the callee of the new call site to a more precise
1317     // destination.  This can also happen if the call graph node of the caller
1318     // was just unnecessarily imprecise.
1319     if (!I->second->getFunction())
1320       if (Function *F = NewCall->getCalledFunction()) {
1321         // Indirect call site resolved to direct call.
1322         CallerNode->addCalledFunction(NewCall, CG[F]);
1323 
1324         continue;
1325       }
1326 
1327     CallerNode->addCalledFunction(NewCall, I->second);
1328   }
1329 
1330   // Update the call graph by deleting the edge from Callee to Caller.  We must
1331   // do this after the loop above in case Caller and Callee are the same.
1332   CallerNode->removeCallEdgeFor(*cast<CallBase>(&CB));
1333 }
1334 
1335 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1336                                     BasicBlock *InsertBlock,
1337                                     InlineFunctionInfo &IFI) {
1338   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1339   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1340 
1341   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1342 
1343   // Always generate a memcpy of alignment 1 here because we don't know
1344   // the alignment of the src pointer.  Other optimizations can infer
1345   // better alignment.
1346   Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1347                        /*SrcAlign*/ Align(1), Size);
1348 }
1349 
1350 /// When inlining a call site that has a byval argument,
1351 /// we have to make the implicit memcpy explicit by adding it.
1352 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1353                                   const Function *CalledFunc,
1354                                   InlineFunctionInfo &IFI,
1355                                   unsigned ByValAlignment) {
1356   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1357   Type *AggTy = ArgTy->getElementType();
1358 
1359   Function *Caller = TheCall->getFunction();
1360   const DataLayout &DL = Caller->getParent()->getDataLayout();
1361 
1362   // If the called function is readonly, then it could not mutate the caller's
1363   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1364   // temporary.
1365   if (CalledFunc->onlyReadsMemory()) {
1366     // If the byval argument has a specified alignment that is greater than the
1367     // passed in pointer, then we either have to round up the input pointer or
1368     // give up on this transformation.
1369     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1370       return Arg;
1371 
1372     AssumptionCache *AC =
1373         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1374 
1375     // If the pointer is already known to be sufficiently aligned, or if we can
1376     // round it up to a larger alignment, then we don't need a temporary.
1377     if (getOrEnforceKnownAlignment(Arg, Align(ByValAlignment), DL, TheCall,
1378                                    AC) >= ByValAlignment)
1379       return Arg;
1380 
1381     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1382     // for code quality, but rarely happens and is required for correctness.
1383   }
1384 
1385   // Create the alloca.  If we have DataLayout, use nice alignment.
1386   Align Alignment(DL.getPrefTypeAlignment(AggTy));
1387 
1388   // If the byval had an alignment specified, we *must* use at least that
1389   // alignment, as it is required by the byval argument (and uses of the
1390   // pointer inside the callee).
1391   Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1392 
1393   Value *NewAlloca =
1394       new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1395                      Arg->getName(), &*Caller->begin()->begin());
1396   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1397 
1398   // Uses of the argument in the function should use our new alloca
1399   // instead.
1400   return NewAlloca;
1401 }
1402 
1403 // Check whether this Value is used by a lifetime intrinsic.
1404 static bool isUsedByLifetimeMarker(Value *V) {
1405   for (User *U : V->users())
1406     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1407       if (II->isLifetimeStartOrEnd())
1408         return true;
1409   return false;
1410 }
1411 
1412 // Check whether the given alloca already has
1413 // lifetime.start or lifetime.end intrinsics.
1414 static bool hasLifetimeMarkers(AllocaInst *AI) {
1415   Type *Ty = AI->getType();
1416   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1417                                        Ty->getPointerAddressSpace());
1418   if (Ty == Int8PtrTy)
1419     return isUsedByLifetimeMarker(AI);
1420 
1421   // Do a scan to find all the casts to i8*.
1422   for (User *U : AI->users()) {
1423     if (U->getType() != Int8PtrTy) continue;
1424     if (U->stripPointerCasts() != AI) continue;
1425     if (isUsedByLifetimeMarker(U))
1426       return true;
1427   }
1428   return false;
1429 }
1430 
1431 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1432 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1433 /// cannot be static.
1434 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1435   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1436 }
1437 
1438 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1439 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1440 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1441                                LLVMContext &Ctx,
1442                                DenseMap<const MDNode *, MDNode *> &IANodes) {
1443   auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1444   return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1445                        IA);
1446 }
1447 
1448 /// Update inlined instructions' line numbers to
1449 /// to encode location where these instructions are inlined.
1450 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1451                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1452   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1453   if (!TheCallDL)
1454     return;
1455 
1456   auto &Ctx = Fn->getContext();
1457   DILocation *InlinedAtNode = TheCallDL;
1458 
1459   // Create a unique call site, not to be confused with any other call from the
1460   // same location.
1461   InlinedAtNode = DILocation::getDistinct(
1462       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1463       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1464 
1465   // Cache the inlined-at nodes as they're built so they are reused, without
1466   // this every instruction's inlined-at chain would become distinct from each
1467   // other.
1468   DenseMap<const MDNode *, MDNode *> IANodes;
1469 
1470   // Check if we are not generating inline line tables and want to use
1471   // the call site location instead.
1472   bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1473 
1474   for (; FI != Fn->end(); ++FI) {
1475     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1476          BI != BE; ++BI) {
1477       // Loop metadata needs to be updated so that the start and end locs
1478       // reference inlined-at locations.
1479       auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1480                                    const DILocation &Loc) -> DILocation * {
1481         return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1482       };
1483       updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1484 
1485       if (!NoInlineLineTables)
1486         if (DebugLoc DL = BI->getDebugLoc()) {
1487           DebugLoc IDL =
1488               inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1489           BI->setDebugLoc(IDL);
1490           continue;
1491         }
1492 
1493       if (CalleeHasDebugInfo && !NoInlineLineTables)
1494         continue;
1495 
1496       // If the inlined instruction has no line number, or if inline info
1497       // is not being generated, make it look as if it originates from the call
1498       // location. This is important for ((__always_inline, __nodebug__))
1499       // functions which must use caller location for all instructions in their
1500       // function body.
1501 
1502       // Don't update static allocas, as they may get moved later.
1503       if (auto *AI = dyn_cast<AllocaInst>(BI))
1504         if (allocaWouldBeStaticInEntry(AI))
1505           continue;
1506 
1507       BI->setDebugLoc(TheCallDL);
1508     }
1509 
1510     // Remove debug info intrinsics if we're not keeping inline info.
1511     if (NoInlineLineTables) {
1512       BasicBlock::iterator BI = FI->begin();
1513       while (BI != FI->end()) {
1514         if (isa<DbgInfoIntrinsic>(BI)) {
1515           BI = BI->eraseFromParent();
1516           continue;
1517         }
1518         ++BI;
1519       }
1520     }
1521 
1522   }
1523 }
1524 
1525 /// Update the block frequencies of the caller after a callee has been inlined.
1526 ///
1527 /// Each block cloned into the caller has its block frequency scaled by the
1528 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1529 /// callee's entry block gets the same frequency as the callsite block and the
1530 /// relative frequencies of all cloned blocks remain the same after cloning.
1531 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1532                             const ValueToValueMapTy &VMap,
1533                             BlockFrequencyInfo *CallerBFI,
1534                             BlockFrequencyInfo *CalleeBFI,
1535                             const BasicBlock &CalleeEntryBlock) {
1536   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1537   for (auto Entry : VMap) {
1538     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1539       continue;
1540     auto *OrigBB = cast<BasicBlock>(Entry.first);
1541     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1542     uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1543     if (!ClonedBBs.insert(ClonedBB).second) {
1544       // Multiple blocks in the callee might get mapped to one cloned block in
1545       // the caller since we prune the callee as we clone it. When that happens,
1546       // we want to use the maximum among the original blocks' frequencies.
1547       uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1548       if (NewFreq > Freq)
1549         Freq = NewFreq;
1550     }
1551     CallerBFI->setBlockFreq(ClonedBB, Freq);
1552   }
1553   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1554   CallerBFI->setBlockFreqAndScale(
1555       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1556       ClonedBBs);
1557 }
1558 
1559 /// Update the branch metadata for cloned call instructions.
1560 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1561                               const ProfileCount &CalleeEntryCount,
1562                               const Instruction *TheCall,
1563                               ProfileSummaryInfo *PSI,
1564                               BlockFrequencyInfo *CallerBFI) {
1565   if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1566       CalleeEntryCount.getCount() < 1)
1567     return;
1568   auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1569   int64_t CallCount =
1570       std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1571                CalleeEntryCount.getCount());
1572   updateProfileCallee(Callee, -CallCount, &VMap);
1573 }
1574 
1575 void llvm::updateProfileCallee(
1576     Function *Callee, int64_t entryDelta,
1577     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1578   auto CalleeCount = Callee->getEntryCount();
1579   if (!CalleeCount.hasValue())
1580     return;
1581 
1582   uint64_t priorEntryCount = CalleeCount.getCount();
1583   uint64_t newEntryCount;
1584 
1585   // Since CallSiteCount is an estimate, it could exceed the original callee
1586   // count and has to be set to 0 so guard against underflow.
1587   if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1588     newEntryCount = 0;
1589   else
1590     newEntryCount = priorEntryCount + entryDelta;
1591 
1592   // During inlining ?
1593   if (VMap) {
1594     uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1595     for (auto Entry : *VMap)
1596       if (isa<CallInst>(Entry.first))
1597         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1598           CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1599   }
1600 
1601   if (entryDelta) {
1602     Callee->setEntryCount(newEntryCount);
1603 
1604     for (BasicBlock &BB : *Callee)
1605       // No need to update the callsite if it is pruned during inlining.
1606       if (!VMap || VMap->count(&BB))
1607         for (Instruction &I : BB)
1608           if (CallInst *CI = dyn_cast<CallInst>(&I))
1609             CI->updateProfWeight(newEntryCount, priorEntryCount);
1610   }
1611 }
1612 
1613 /// This function inlines the called function into the basic block of the
1614 /// caller. This returns false if it is not possible to inline this call.
1615 /// The program is still in a well defined state if this occurs though.
1616 ///
1617 /// Note that this only does one level of inlining.  For example, if the
1618 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1619 /// exists in the instruction stream.  Similarly this will inline a recursive
1620 /// function by one level.
1621 llvm::InlineResult llvm::InlineFunction(CallBase &CB, InlineFunctionInfo &IFI,
1622                                         AAResults *CalleeAAR,
1623                                         bool InsertLifetime,
1624                                         Function *ForwardVarArgsTo) {
1625   assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
1626 
1627   // FIXME: we don't inline callbr yet.
1628   if (isa<CallBrInst>(CB))
1629     return InlineResult::failure("We don't inline callbr yet.");
1630 
1631   // If IFI has any state in it, zap it before we fill it in.
1632   IFI.reset();
1633 
1634   Function *CalledFunc = CB.getCalledFunction();
1635   if (!CalledFunc ||               // Can't inline external function or indirect
1636       CalledFunc->isDeclaration()) // call!
1637     return InlineResult::failure("external or indirect");
1638 
1639   // The inliner does not know how to inline through calls with operand bundles
1640   // in general ...
1641   if (CB.hasOperandBundles()) {
1642     for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
1643       uint32_t Tag = CB.getOperandBundleAt(i).getTagID();
1644       // ... but it knows how to inline through "deopt" operand bundles ...
1645       if (Tag == LLVMContext::OB_deopt)
1646         continue;
1647       // ... and "funclet" operand bundles.
1648       if (Tag == LLVMContext::OB_funclet)
1649         continue;
1650 
1651       return InlineResult::failure("unsupported operand bundle");
1652     }
1653   }
1654 
1655   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1656   // calls that we inline.
1657   bool MarkNoUnwind = CB.doesNotThrow();
1658 
1659   BasicBlock *OrigBB = CB.getParent();
1660   Function *Caller = OrigBB->getParent();
1661 
1662   // GC poses two hazards to inlining, which only occur when the callee has GC:
1663   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1664   //     caller.
1665   //  2. If the caller has a differing GC, it is invalid to inline.
1666   if (CalledFunc->hasGC()) {
1667     if (!Caller->hasGC())
1668       Caller->setGC(CalledFunc->getGC());
1669     else if (CalledFunc->getGC() != Caller->getGC())
1670       return InlineResult::failure("incompatible GC");
1671   }
1672 
1673   // Get the personality function from the callee if it contains a landing pad.
1674   Constant *CalledPersonality =
1675       CalledFunc->hasPersonalityFn()
1676           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1677           : nullptr;
1678 
1679   // Find the personality function used by the landing pads of the caller. If it
1680   // exists, then check to see that it matches the personality function used in
1681   // the callee.
1682   Constant *CallerPersonality =
1683       Caller->hasPersonalityFn()
1684           ? Caller->getPersonalityFn()->stripPointerCasts()
1685           : nullptr;
1686   if (CalledPersonality) {
1687     if (!CallerPersonality)
1688       Caller->setPersonalityFn(CalledPersonality);
1689     // If the personality functions match, then we can perform the
1690     // inlining. Otherwise, we can't inline.
1691     // TODO: This isn't 100% true. Some personality functions are proper
1692     //       supersets of others and can be used in place of the other.
1693     else if (CalledPersonality != CallerPersonality)
1694       return InlineResult::failure("incompatible personality");
1695   }
1696 
1697   // We need to figure out which funclet the callsite was in so that we may
1698   // properly nest the callee.
1699   Instruction *CallSiteEHPad = nullptr;
1700   if (CallerPersonality) {
1701     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1702     if (isScopedEHPersonality(Personality)) {
1703       Optional<OperandBundleUse> ParentFunclet =
1704           CB.getOperandBundle(LLVMContext::OB_funclet);
1705       if (ParentFunclet)
1706         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1707 
1708       // OK, the inlining site is legal.  What about the target function?
1709 
1710       if (CallSiteEHPad) {
1711         if (Personality == EHPersonality::MSVC_CXX) {
1712           // The MSVC personality cannot tolerate catches getting inlined into
1713           // cleanup funclets.
1714           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1715             // Ok, the call site is within a cleanuppad.  Let's check the callee
1716             // for catchpads.
1717             for (const BasicBlock &CalledBB : *CalledFunc) {
1718               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1719                 return InlineResult::failure("catch in cleanup funclet");
1720             }
1721           }
1722         } else if (isAsynchronousEHPersonality(Personality)) {
1723           // SEH is even less tolerant, there may not be any sort of exceptional
1724           // funclet in the callee.
1725           for (const BasicBlock &CalledBB : *CalledFunc) {
1726             if (CalledBB.isEHPad())
1727               return InlineResult::failure("SEH in cleanup funclet");
1728           }
1729         }
1730       }
1731     }
1732   }
1733 
1734   // Determine if we are dealing with a call in an EHPad which does not unwind
1735   // to caller.
1736   bool EHPadForCallUnwindsLocally = false;
1737   if (CallSiteEHPad && isa<CallInst>(CB)) {
1738     UnwindDestMemoTy FuncletUnwindMap;
1739     Value *CallSiteUnwindDestToken =
1740         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1741 
1742     EHPadForCallUnwindsLocally =
1743         CallSiteUnwindDestToken &&
1744         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1745   }
1746 
1747   // Get an iterator to the last basic block in the function, which will have
1748   // the new function inlined after it.
1749   Function::iterator LastBlock = --Caller->end();
1750 
1751   // Make sure to capture all of the return instructions from the cloned
1752   // function.
1753   SmallVector<ReturnInst*, 8> Returns;
1754   ClonedCodeInfo InlinedFunctionInfo;
1755   Function::iterator FirstNewBlock;
1756 
1757   { // Scope to destroy VMap after cloning.
1758     ValueToValueMapTy VMap;
1759     // Keep a list of pair (dst, src) to emit byval initializations.
1760     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1761 
1762     auto &DL = Caller->getParent()->getDataLayout();
1763 
1764     // Calculate the vector of arguments to pass into the function cloner, which
1765     // matches up the formal to the actual argument values.
1766     auto AI = CB.arg_begin();
1767     unsigned ArgNo = 0;
1768     for (Function::arg_iterator I = CalledFunc->arg_begin(),
1769          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1770       Value *ActualArg = *AI;
1771 
1772       // When byval arguments actually inlined, we need to make the copy implied
1773       // by them explicit.  However, we don't do this if the callee is readonly
1774       // or readnone, because the copy would be unneeded: the callee doesn't
1775       // modify the struct.
1776       if (CB.isByValArgument(ArgNo)) {
1777         ActualArg = HandleByValArgument(ActualArg, &CB, CalledFunc, IFI,
1778                                         CalledFunc->getParamAlignment(ArgNo));
1779         if (ActualArg != *AI)
1780           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1781       }
1782 
1783       VMap[&*I] = ActualArg;
1784     }
1785 
1786     // TODO: Remove this when users have been updated to the assume bundles.
1787     // Add alignment assumptions if necessary. We do this before the inlined
1788     // instructions are actually cloned into the caller so that we can easily
1789     // check what will be known at the start of the inlined code.
1790     AddAlignmentAssumptions(CB, IFI);
1791 
1792     AssumptionCache *AC =
1793         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1794 
1795     /// Preserve all attributes on of the call and its parameters.
1796     salvageKnowledge(&CB, AC);
1797 
1798     // We want the inliner to prune the code as it copies.  We would LOVE to
1799     // have no dead or constant instructions leftover after inlining occurs
1800     // (which can happen, e.g., because an argument was constant), but we'll be
1801     // happy with whatever the cloner can do.
1802     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1803                               /*ModuleLevelChanges=*/false, Returns, ".i",
1804                               &InlinedFunctionInfo, &CB);
1805     // Remember the first block that is newly cloned over.
1806     FirstNewBlock = LastBlock; ++FirstNewBlock;
1807 
1808     if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1809       // Update the BFI of blocks cloned into the caller.
1810       updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1811                       CalledFunc->front());
1812 
1813     updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), &CB,
1814                       IFI.PSI, IFI.CallerBFI);
1815 
1816     // Inject byval arguments initialization.
1817     for (std::pair<Value*, Value*> &Init : ByValInit)
1818       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1819                               &*FirstNewBlock, IFI);
1820 
1821     Optional<OperandBundleUse> ParentDeopt =
1822         CB.getOperandBundle(LLVMContext::OB_deopt);
1823     if (ParentDeopt) {
1824       SmallVector<OperandBundleDef, 2> OpDefs;
1825 
1826       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1827         CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
1828         if (!ICS)
1829           continue; // instruction was DCE'd or RAUW'ed to undef
1830 
1831         OpDefs.clear();
1832 
1833         OpDefs.reserve(ICS->getNumOperandBundles());
1834 
1835         for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
1836              ++COBi) {
1837           auto ChildOB = ICS->getOperandBundleAt(COBi);
1838           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1839             // If the inlined call has other operand bundles, let them be
1840             OpDefs.emplace_back(ChildOB);
1841             continue;
1842           }
1843 
1844           // It may be useful to separate this logic (of handling operand
1845           // bundles) out to a separate "policy" component if this gets crowded.
1846           // Prepend the parent's deoptimization continuation to the newly
1847           // inlined call's deoptimization continuation.
1848           std::vector<Value *> MergedDeoptArgs;
1849           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1850                                   ChildOB.Inputs.size());
1851 
1852           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1853                                  ParentDeopt->Inputs.begin(),
1854                                  ParentDeopt->Inputs.end());
1855           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1856                                  ChildOB.Inputs.end());
1857 
1858           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1859         }
1860 
1861         Instruction *NewI = nullptr;
1862         if (isa<CallInst>(ICS))
1863           NewI = CallInst::Create(cast<CallInst>(ICS), OpDefs, ICS);
1864         else if (isa<CallBrInst>(ICS))
1865           NewI = CallBrInst::Create(cast<CallBrInst>(ICS), OpDefs, ICS);
1866         else
1867           NewI = InvokeInst::Create(cast<InvokeInst>(ICS), OpDefs, ICS);
1868 
1869         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1870         // this even if the call returns void.
1871         ICS->replaceAllUsesWith(NewI);
1872 
1873         VH = nullptr;
1874         ICS->eraseFromParent();
1875       }
1876     }
1877 
1878     // Update the callgraph if requested.
1879     if (IFI.CG)
1880       UpdateCallGraphAfterInlining(CB, FirstNewBlock, VMap, IFI);
1881 
1882     // For 'nodebug' functions, the associated DISubprogram is always null.
1883     // Conservatively avoid propagating the callsite debug location to
1884     // instructions inlined from a function whose DISubprogram is not null.
1885     fixupLineNumbers(Caller, FirstNewBlock, &CB,
1886                      CalledFunc->getSubprogram() != nullptr);
1887 
1888     // Clone existing noalias metadata if necessary.
1889     CloneAliasScopeMetadata(CB, VMap);
1890 
1891     // Add noalias metadata if necessary.
1892     AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR);
1893 
1894     // Clone return attributes on the callsite into the calls within the inlined
1895     // function which feed into its return value.
1896     AddReturnAttributes(CB, VMap);
1897 
1898     // Propagate llvm.mem.parallel_loop_access if necessary.
1899     PropagateParallelLoopAccessMetadata(CB, VMap);
1900 
1901     // Register any cloned assumptions.
1902     if (IFI.GetAssumptionCache)
1903       for (BasicBlock &NewBlock :
1904            make_range(FirstNewBlock->getIterator(), Caller->end()))
1905         for (Instruction &I : NewBlock) {
1906           if (auto *II = dyn_cast<IntrinsicInst>(&I))
1907             if (II->getIntrinsicID() == Intrinsic::assume)
1908               (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1909         }
1910   }
1911 
1912   // If there are any alloca instructions in the block that used to be the entry
1913   // block for the callee, move them to the entry block of the caller.  First
1914   // calculate which instruction they should be inserted before.  We insert the
1915   // instructions at the end of the current alloca list.
1916   {
1917     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1918     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1919          E = FirstNewBlock->end(); I != E; ) {
1920       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1921       if (!AI) continue;
1922 
1923       // If the alloca is now dead, remove it.  This often occurs due to code
1924       // specialization.
1925       if (AI->use_empty()) {
1926         AI->eraseFromParent();
1927         continue;
1928       }
1929 
1930       if (!allocaWouldBeStaticInEntry(AI))
1931         continue;
1932 
1933       // Keep track of the static allocas that we inline into the caller.
1934       IFI.StaticAllocas.push_back(AI);
1935 
1936       // Scan for the block of allocas that we can move over, and move them
1937       // all at once.
1938       while (isa<AllocaInst>(I) &&
1939              !cast<AllocaInst>(I)->use_empty() &&
1940              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1941         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1942         ++I;
1943       }
1944 
1945       // Transfer all of the allocas over in a block.  Using splice means
1946       // that the instructions aren't removed from the symbol table, then
1947       // reinserted.
1948       Caller->getEntryBlock().getInstList().splice(
1949           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1950     }
1951   }
1952 
1953   SmallVector<Value*,4> VarArgsToForward;
1954   SmallVector<AttributeSet, 4> VarArgsAttrs;
1955   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
1956        i < CB.getNumArgOperands(); i++) {
1957     VarArgsToForward.push_back(CB.getArgOperand(i));
1958     VarArgsAttrs.push_back(CB.getAttributes().getParamAttributes(i));
1959   }
1960 
1961   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
1962   if (InlinedFunctionInfo.ContainsCalls) {
1963     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1964     if (CallInst *CI = dyn_cast<CallInst>(&CB))
1965       CallSiteTailKind = CI->getTailCallKind();
1966 
1967     // For inlining purposes, the "notail" marker is the same as no marker.
1968     if (CallSiteTailKind == CallInst::TCK_NoTail)
1969       CallSiteTailKind = CallInst::TCK_None;
1970 
1971     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1972          ++BB) {
1973       for (auto II = BB->begin(); II != BB->end();) {
1974         Instruction &I = *II++;
1975         CallInst *CI = dyn_cast<CallInst>(&I);
1976         if (!CI)
1977           continue;
1978 
1979         // Forward varargs from inlined call site to calls to the
1980         // ForwardVarArgsTo function, if requested, and to musttail calls.
1981         if (!VarArgsToForward.empty() &&
1982             ((ForwardVarArgsTo &&
1983               CI->getCalledFunction() == ForwardVarArgsTo) ||
1984              CI->isMustTailCall())) {
1985           // Collect attributes for non-vararg parameters.
1986           AttributeList Attrs = CI->getAttributes();
1987           SmallVector<AttributeSet, 8> ArgAttrs;
1988           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
1989             for (unsigned ArgNo = 0;
1990                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
1991               ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
1992           }
1993 
1994           // Add VarArg attributes.
1995           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
1996           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
1997                                      Attrs.getRetAttributes(), ArgAttrs);
1998           // Add VarArgs to existing parameters.
1999           SmallVector<Value *, 6> Params(CI->arg_operands());
2000           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2001           CallInst *NewCI = CallInst::Create(
2002               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2003           NewCI->setDebugLoc(CI->getDebugLoc());
2004           NewCI->setAttributes(Attrs);
2005           NewCI->setCallingConv(CI->getCallingConv());
2006           CI->replaceAllUsesWith(NewCI);
2007           CI->eraseFromParent();
2008           CI = NewCI;
2009         }
2010 
2011         if (Function *F = CI->getCalledFunction())
2012           InlinedDeoptimizeCalls |=
2013               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2014 
2015         // We need to reduce the strength of any inlined tail calls.  For
2016         // musttail, we have to avoid introducing potential unbounded stack
2017         // growth.  For example, if functions 'f' and 'g' are mutually recursive
2018         // with musttail, we can inline 'g' into 'f' so long as we preserve
2019         // musttail on the cloned call to 'f'.  If either the inlined call site
2020         // or the cloned call site is *not* musttail, the program already has
2021         // one frame of stack growth, so it's safe to remove musttail.  Here is
2022         // a table of example transformations:
2023         //
2024         //    f -> musttail g -> musttail f  ==>  f -> musttail f
2025         //    f -> musttail g ->     tail f  ==>  f ->     tail f
2026         //    f ->          g -> musttail f  ==>  f ->          f
2027         //    f ->          g ->     tail f  ==>  f ->          f
2028         //
2029         // Inlined notail calls should remain notail calls.
2030         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2031         if (ChildTCK != CallInst::TCK_NoTail)
2032           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2033         CI->setTailCallKind(ChildTCK);
2034         InlinedMustTailCalls |= CI->isMustTailCall();
2035 
2036         // Calls inlined through a 'nounwind' call site should be marked
2037         // 'nounwind'.
2038         if (MarkNoUnwind)
2039           CI->setDoesNotThrow();
2040       }
2041     }
2042   }
2043 
2044   // Leave lifetime markers for the static alloca's, scoping them to the
2045   // function we just inlined.
2046   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2047     IRBuilder<> builder(&FirstNewBlock->front());
2048     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2049       AllocaInst *AI = IFI.StaticAllocas[ai];
2050       // Don't mark swifterror allocas. They can't have bitcast uses.
2051       if (AI->isSwiftError())
2052         continue;
2053 
2054       // If the alloca is already scoped to something smaller than the whole
2055       // function then there's no need to add redundant, less accurate markers.
2056       if (hasLifetimeMarkers(AI))
2057         continue;
2058 
2059       // Try to determine the size of the allocation.
2060       ConstantInt *AllocaSize = nullptr;
2061       if (ConstantInt *AIArraySize =
2062           dyn_cast<ConstantInt>(AI->getArraySize())) {
2063         auto &DL = Caller->getParent()->getDataLayout();
2064         Type *AllocaType = AI->getAllocatedType();
2065         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2066         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2067 
2068         // Don't add markers for zero-sized allocas.
2069         if (AllocaArraySize == 0)
2070           continue;
2071 
2072         // Check that array size doesn't saturate uint64_t and doesn't
2073         // overflow when it's multiplied by type size.
2074         if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2075             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2076                 AllocaTypeSize) {
2077           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2078                                         AllocaArraySize * AllocaTypeSize);
2079         }
2080       }
2081 
2082       builder.CreateLifetimeStart(AI, AllocaSize);
2083       for (ReturnInst *RI : Returns) {
2084         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2085         // call and a return.  The return kills all local allocas.
2086         if (InlinedMustTailCalls &&
2087             RI->getParent()->getTerminatingMustTailCall())
2088           continue;
2089         if (InlinedDeoptimizeCalls &&
2090             RI->getParent()->getTerminatingDeoptimizeCall())
2091           continue;
2092         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2093       }
2094     }
2095   }
2096 
2097   // If the inlined code contained dynamic alloca instructions, wrap the inlined
2098   // code with llvm.stacksave/llvm.stackrestore intrinsics.
2099   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2100     Module *M = Caller->getParent();
2101     // Get the two intrinsics we care about.
2102     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2103     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2104 
2105     // Insert the llvm.stacksave.
2106     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2107                              .CreateCall(StackSave, {}, "savedstack");
2108 
2109     // Insert a call to llvm.stackrestore before any return instructions in the
2110     // inlined function.
2111     for (ReturnInst *RI : Returns) {
2112       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2113       // call and a return.  The return will restore the stack pointer.
2114       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2115         continue;
2116       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2117         continue;
2118       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2119     }
2120   }
2121 
2122   // If we are inlining for an invoke instruction, we must make sure to rewrite
2123   // any call instructions into invoke instructions.  This is sensitive to which
2124   // funclet pads were top-level in the inlinee, so must be done before
2125   // rewriting the "parent pad" links.
2126   if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2127     BasicBlock *UnwindDest = II->getUnwindDest();
2128     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2129     if (isa<LandingPadInst>(FirstNonPHI)) {
2130       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2131     } else {
2132       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2133     }
2134   }
2135 
2136   // Update the lexical scopes of the new funclets and callsites.
2137   // Anything that had 'none' as its parent is now nested inside the callsite's
2138   // EHPad.
2139 
2140   if (CallSiteEHPad) {
2141     for (Function::iterator BB = FirstNewBlock->getIterator(),
2142                             E = Caller->end();
2143          BB != E; ++BB) {
2144       // Add bundle operands to any top-level call sites.
2145       SmallVector<OperandBundleDef, 1> OpBundles;
2146       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2147         CallBase *I = dyn_cast<CallBase>(&*BBI++);
2148         if (!I)
2149           continue;
2150 
2151         // Skip call sites which are nounwind intrinsics.
2152         auto *CalledFn =
2153             dyn_cast<Function>(I->getCalledValue()->stripPointerCasts());
2154         if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow())
2155           continue;
2156 
2157         // Skip call sites which already have a "funclet" bundle.
2158         if (I->getOperandBundle(LLVMContext::OB_funclet))
2159           continue;
2160 
2161         I->getOperandBundlesAsDefs(OpBundles);
2162         OpBundles.emplace_back("funclet", CallSiteEHPad);
2163 
2164         Instruction *NewInst;
2165         if (auto *CallI = dyn_cast<CallInst>(I))
2166           NewInst = CallInst::Create(CallI, OpBundles, CallI);
2167         else if (auto *CallBrI = dyn_cast<CallBrInst>(I))
2168           NewInst = CallBrInst::Create(CallBrI, OpBundles, CallBrI);
2169         else
2170           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2171         NewInst->takeName(I);
2172         I->replaceAllUsesWith(NewInst);
2173         I->eraseFromParent();
2174 
2175         OpBundles.clear();
2176       }
2177 
2178       // It is problematic if the inlinee has a cleanupret which unwinds to
2179       // caller and we inline it into a call site which doesn't unwind but into
2180       // an EH pad that does.  Such an edge must be dynamically unreachable.
2181       // As such, we replace the cleanupret with unreachable.
2182       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2183         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2184           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2185 
2186       Instruction *I = BB->getFirstNonPHI();
2187       if (!I->isEHPad())
2188         continue;
2189 
2190       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2191         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2192           CatchSwitch->setParentPad(CallSiteEHPad);
2193       } else {
2194         auto *FPI = cast<FuncletPadInst>(I);
2195         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2196           FPI->setParentPad(CallSiteEHPad);
2197       }
2198     }
2199   }
2200 
2201   if (InlinedDeoptimizeCalls) {
2202     // We need to at least remove the deoptimizing returns from the Return set,
2203     // so that the control flow from those returns does not get merged into the
2204     // caller (but terminate it instead).  If the caller's return type does not
2205     // match the callee's return type, we also need to change the return type of
2206     // the intrinsic.
2207     if (Caller->getReturnType() == CB.getType()) {
2208       auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2209         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2210       });
2211       Returns.erase(NewEnd, Returns.end());
2212     } else {
2213       SmallVector<ReturnInst *, 8> NormalReturns;
2214       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2215           Caller->getParent(), Intrinsic::experimental_deoptimize,
2216           {Caller->getReturnType()});
2217 
2218       for (ReturnInst *RI : Returns) {
2219         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2220         if (!DeoptCall) {
2221           NormalReturns.push_back(RI);
2222           continue;
2223         }
2224 
2225         // The calling convention on the deoptimize call itself may be bogus,
2226         // since the code we're inlining may have undefined behavior (and may
2227         // never actually execute at runtime); but all
2228         // @llvm.experimental.deoptimize declarations have to have the same
2229         // calling convention in a well-formed module.
2230         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2231         NewDeoptIntrinsic->setCallingConv(CallingConv);
2232         auto *CurBB = RI->getParent();
2233         RI->eraseFromParent();
2234 
2235         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2236                                          DeoptCall->arg_end());
2237 
2238         SmallVector<OperandBundleDef, 1> OpBundles;
2239         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2240         DeoptCall->eraseFromParent();
2241         assert(!OpBundles.empty() &&
2242                "Expected at least the deopt operand bundle");
2243 
2244         IRBuilder<> Builder(CurBB);
2245         CallInst *NewDeoptCall =
2246             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2247         NewDeoptCall->setCallingConv(CallingConv);
2248         if (NewDeoptCall->getType()->isVoidTy())
2249           Builder.CreateRetVoid();
2250         else
2251           Builder.CreateRet(NewDeoptCall);
2252       }
2253 
2254       // Leave behind the normal returns so we can merge control flow.
2255       std::swap(Returns, NormalReturns);
2256     }
2257   }
2258 
2259   // Handle any inlined musttail call sites.  In order for a new call site to be
2260   // musttail, the source of the clone and the inlined call site must have been
2261   // musttail.  Therefore it's safe to return without merging control into the
2262   // phi below.
2263   if (InlinedMustTailCalls) {
2264     // Check if we need to bitcast the result of any musttail calls.
2265     Type *NewRetTy = Caller->getReturnType();
2266     bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2267 
2268     // Handle the returns preceded by musttail calls separately.
2269     SmallVector<ReturnInst *, 8> NormalReturns;
2270     for (ReturnInst *RI : Returns) {
2271       CallInst *ReturnedMustTail =
2272           RI->getParent()->getTerminatingMustTailCall();
2273       if (!ReturnedMustTail) {
2274         NormalReturns.push_back(RI);
2275         continue;
2276       }
2277       if (!NeedBitCast)
2278         continue;
2279 
2280       // Delete the old return and any preceding bitcast.
2281       BasicBlock *CurBB = RI->getParent();
2282       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2283       RI->eraseFromParent();
2284       if (OldCast)
2285         OldCast->eraseFromParent();
2286 
2287       // Insert a new bitcast and return with the right type.
2288       IRBuilder<> Builder(CurBB);
2289       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2290     }
2291 
2292     // Leave behind the normal returns so we can merge control flow.
2293     std::swap(Returns, NormalReturns);
2294   }
2295 
2296   // Now that all of the transforms on the inlined code have taken place but
2297   // before we splice the inlined code into the CFG and lose track of which
2298   // blocks were actually inlined, collect the call sites. We only do this if
2299   // call graph updates weren't requested, as those provide value handle based
2300   // tracking of inlined call sites instead.
2301   if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2302     // Otherwise just collect the raw call sites that were inlined.
2303     for (BasicBlock &NewBB :
2304          make_range(FirstNewBlock->getIterator(), Caller->end()))
2305       for (Instruction &I : NewBB)
2306         if (auto *CB = dyn_cast<CallBase>(&I))
2307           IFI.InlinedCallSites.push_back(CB);
2308   }
2309 
2310   // If we cloned in _exactly one_ basic block, and if that block ends in a
2311   // return instruction, we splice the body of the inlined callee directly into
2312   // the calling basic block.
2313   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2314     // Move all of the instructions right before the call.
2315     OrigBB->getInstList().splice(CB.getIterator(), FirstNewBlock->getInstList(),
2316                                  FirstNewBlock->begin(), FirstNewBlock->end());
2317     // Remove the cloned basic block.
2318     Caller->getBasicBlockList().pop_back();
2319 
2320     // If the call site was an invoke instruction, add a branch to the normal
2321     // destination.
2322     if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2323       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), &CB);
2324       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2325     }
2326 
2327     // If the return instruction returned a value, replace uses of the call with
2328     // uses of the returned value.
2329     if (!CB.use_empty()) {
2330       ReturnInst *R = Returns[0];
2331       if (&CB == R->getReturnValue())
2332         CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2333       else
2334         CB.replaceAllUsesWith(R->getReturnValue());
2335     }
2336     // Since we are now done with the Call/Invoke, we can delete it.
2337     CB.eraseFromParent();
2338 
2339     // Since we are now done with the return instruction, delete it also.
2340     Returns[0]->eraseFromParent();
2341 
2342     // We are now done with the inlining.
2343     return InlineResult::success();
2344   }
2345 
2346   // Otherwise, we have the normal case, of more than one block to inline or
2347   // multiple return sites.
2348 
2349   // We want to clone the entire callee function into the hole between the
2350   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2351   // this is an invoke instruction or a call instruction.
2352   BasicBlock *AfterCallBB;
2353   BranchInst *CreatedBranchToNormalDest = nullptr;
2354   if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2355 
2356     // Add an unconditional branch to make this look like the CallInst case...
2357     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), &CB);
2358 
2359     // Split the basic block.  This guarantees that no PHI nodes will have to be
2360     // updated due to new incoming edges, and make the invoke case more
2361     // symmetric to the call case.
2362     AfterCallBB =
2363         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2364                                 CalledFunc->getName() + ".exit");
2365 
2366   } else { // It's a call
2367     // If this is a call instruction, we need to split the basic block that
2368     // the call lives in.
2369     //
2370     AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2371                                           CalledFunc->getName() + ".exit");
2372   }
2373 
2374   if (IFI.CallerBFI) {
2375     // Copy original BB's block frequency to AfterCallBB
2376     IFI.CallerBFI->setBlockFreq(
2377         AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2378   }
2379 
2380   // Change the branch that used to go to AfterCallBB to branch to the first
2381   // basic block of the inlined function.
2382   //
2383   Instruction *Br = OrigBB->getTerminator();
2384   assert(Br && Br->getOpcode() == Instruction::Br &&
2385          "splitBasicBlock broken!");
2386   Br->setOperand(0, &*FirstNewBlock);
2387 
2388   // Now that the function is correct, make it a little bit nicer.  In
2389   // particular, move the basic blocks inserted from the end of the function
2390   // into the space made by splitting the source basic block.
2391   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2392                                      Caller->getBasicBlockList(), FirstNewBlock,
2393                                      Caller->end());
2394 
2395   // Handle all of the return instructions that we just cloned in, and eliminate
2396   // any users of the original call/invoke instruction.
2397   Type *RTy = CalledFunc->getReturnType();
2398 
2399   PHINode *PHI = nullptr;
2400   if (Returns.size() > 1) {
2401     // The PHI node should go at the front of the new basic block to merge all
2402     // possible incoming values.
2403     if (!CB.use_empty()) {
2404       PHI = PHINode::Create(RTy, Returns.size(), CB.getName(),
2405                             &AfterCallBB->front());
2406       // Anything that used the result of the function call should now use the
2407       // PHI node as their operand.
2408       CB.replaceAllUsesWith(PHI);
2409     }
2410 
2411     // Loop over all of the return instructions adding entries to the PHI node
2412     // as appropriate.
2413     if (PHI) {
2414       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2415         ReturnInst *RI = Returns[i];
2416         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2417                "Ret value not consistent in function!");
2418         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2419       }
2420     }
2421 
2422     // Add a branch to the merge points and remove return instructions.
2423     DebugLoc Loc;
2424     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2425       ReturnInst *RI = Returns[i];
2426       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2427       Loc = RI->getDebugLoc();
2428       BI->setDebugLoc(Loc);
2429       RI->eraseFromParent();
2430     }
2431     // We need to set the debug location to *somewhere* inside the
2432     // inlined function. The line number may be nonsensical, but the
2433     // instruction will at least be associated with the right
2434     // function.
2435     if (CreatedBranchToNormalDest)
2436       CreatedBranchToNormalDest->setDebugLoc(Loc);
2437   } else if (!Returns.empty()) {
2438     // Otherwise, if there is exactly one return value, just replace anything
2439     // using the return value of the call with the computed value.
2440     if (!CB.use_empty()) {
2441       if (&CB == Returns[0]->getReturnValue())
2442         CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2443       else
2444         CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2445     }
2446 
2447     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2448     BasicBlock *ReturnBB = Returns[0]->getParent();
2449     ReturnBB->replaceAllUsesWith(AfterCallBB);
2450 
2451     // Splice the code from the return block into the block that it will return
2452     // to, which contains the code that was after the call.
2453     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2454                                       ReturnBB->getInstList());
2455 
2456     if (CreatedBranchToNormalDest)
2457       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2458 
2459     // Delete the return instruction now and empty ReturnBB now.
2460     Returns[0]->eraseFromParent();
2461     ReturnBB->eraseFromParent();
2462   } else if (!CB.use_empty()) {
2463     // No returns, but something is using the return value of the call.  Just
2464     // nuke the result.
2465     CB.replaceAllUsesWith(UndefValue::get(CB.getType()));
2466   }
2467 
2468   // Since we are now done with the Call/Invoke, we can delete it.
2469   CB.eraseFromParent();
2470 
2471   // If we inlined any musttail calls and the original return is now
2472   // unreachable, delete it.  It can only contain a bitcast and ret.
2473   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2474     AfterCallBB->eraseFromParent();
2475 
2476   // We should always be able to fold the entry block of the function into the
2477   // single predecessor of the block...
2478   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2479   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2480 
2481   // Splice the code entry block into calling block, right before the
2482   // unconditional branch.
2483   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2484   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2485 
2486   // Remove the unconditional branch.
2487   OrigBB->getInstList().erase(Br);
2488 
2489   // Now we can remove the CalleeEntry block, which is now empty.
2490   Caller->getBasicBlockList().erase(CalleeEntry);
2491 
2492   // If we inserted a phi node, check to see if it has a single value (e.g. all
2493   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2494   // block other optimizations.
2495   if (PHI) {
2496     AssumptionCache *AC =
2497         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2498     auto &DL = Caller->getParent()->getDataLayout();
2499     if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2500       PHI->replaceAllUsesWith(V);
2501       PHI->eraseFromParent();
2502     }
2503   }
2504 
2505   return InlineResult::success();
2506 }
2507