1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inlining of a function into a call site, resolving
10 // parameters and the return value as appropriate.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/None.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/BlockFrequencyInfo.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
30 #include "llvm/Analysis/ProfileSummaryInfo.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CFG.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DIBuilder.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DebugInfoMetadata.h"
43 #include "llvm/IR/DebugLoc.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/MDBuilder.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/User.h"
59 #include "llvm/IR/Value.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CommandLine.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
64 #include "llvm/Transforms/Utils/Cloning.h"
65 #include "llvm/Transforms/Utils/ValueMapper.h"
66 #include <algorithm>
67 #include <cassert>
68 #include <cstdint>
69 #include <iterator>
70 #include <limits>
71 #include <string>
72 #include <utility>
73 #include <vector>
74 
75 using namespace llvm;
76 using ProfileCount = Function::ProfileCount;
77 
78 static cl::opt<bool>
79 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
80   cl::Hidden,
81   cl::desc("Convert noalias attributes to metadata during inlining."));
82 
83 static cl::opt<bool>
84 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
85   cl::init(true), cl::Hidden,
86   cl::desc("Convert align attributes to assumptions during inlining."));
87 
88 static cl::opt<bool> UpdateReturnAttributes(
89         "update-return-attrs", cl::init(true), cl::Hidden,
90             cl::desc("Update return attributes on calls within inlined body"));
91 
92 static cl::opt<bool> UpdateLoadMetadataDuringInlining(
93         "update-load-metadata-during-inlining", cl::init(true), cl::Hidden,
94             cl::desc("Update metadata on loads within inlined body"));
95 
96 static cl::opt<unsigned> InlinerAttributeWindow(
97     "max-inst-checked-for-throw-during-inlining", cl::Hidden,
98     cl::desc("the maximum number of instructions analyzed for may throw during "
99              "attribute inference in inlined body"),
100     cl::init(4));
101 
102 llvm::InlineResult llvm::InlineFunction(CallBase *CB, InlineFunctionInfo &IFI,
103                                         AAResults *CalleeAAR,
104                                         bool InsertLifetime) {
105   return InlineFunction(CallSite(CB), IFI, CalleeAAR, InsertLifetime);
106 }
107 
108 namespace {
109 
110   /// A class for recording information about inlining a landing pad.
111   class LandingPadInliningInfo {
112     /// Destination of the invoke's unwind.
113     BasicBlock *OuterResumeDest;
114 
115     /// Destination for the callee's resume.
116     BasicBlock *InnerResumeDest = nullptr;
117 
118     /// LandingPadInst associated with the invoke.
119     LandingPadInst *CallerLPad = nullptr;
120 
121     /// PHI for EH values from landingpad insts.
122     PHINode *InnerEHValuesPHI = nullptr;
123 
124     SmallVector<Value*, 8> UnwindDestPHIValues;
125 
126   public:
127     LandingPadInliningInfo(InvokeInst *II)
128         : OuterResumeDest(II->getUnwindDest()) {
129       // If there are PHI nodes in the unwind destination block, we need to keep
130       // track of which values came into them from the invoke before removing
131       // the edge from this block.
132       BasicBlock *InvokeBB = II->getParent();
133       BasicBlock::iterator I = OuterResumeDest->begin();
134       for (; isa<PHINode>(I); ++I) {
135         // Save the value to use for this edge.
136         PHINode *PHI = cast<PHINode>(I);
137         UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
138       }
139 
140       CallerLPad = cast<LandingPadInst>(I);
141     }
142 
143     /// The outer unwind destination is the target of
144     /// unwind edges introduced for calls within the inlined function.
145     BasicBlock *getOuterResumeDest() const {
146       return OuterResumeDest;
147     }
148 
149     BasicBlock *getInnerResumeDest();
150 
151     LandingPadInst *getLandingPadInst() const { return CallerLPad; }
152 
153     /// Forward the 'resume' instruction to the caller's landing pad block.
154     /// When the landing pad block has only one predecessor, this is
155     /// a simple branch. When there is more than one predecessor, we need to
156     /// split the landing pad block after the landingpad instruction and jump
157     /// to there.
158     void forwardResume(ResumeInst *RI,
159                        SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
160 
161     /// Add incoming-PHI values to the unwind destination block for the given
162     /// basic block, using the values for the original invoke's source block.
163     void addIncomingPHIValuesFor(BasicBlock *BB) const {
164       addIncomingPHIValuesForInto(BB, OuterResumeDest);
165     }
166 
167     void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
168       BasicBlock::iterator I = dest->begin();
169       for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
170         PHINode *phi = cast<PHINode>(I);
171         phi->addIncoming(UnwindDestPHIValues[i], src);
172       }
173     }
174   };
175 
176 } // end anonymous namespace
177 
178 /// Get or create a target for the branch from ResumeInsts.
179 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
180   if (InnerResumeDest) return InnerResumeDest;
181 
182   // Split the landing pad.
183   BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
184   InnerResumeDest =
185     OuterResumeDest->splitBasicBlock(SplitPoint,
186                                      OuterResumeDest->getName() + ".body");
187 
188   // The number of incoming edges we expect to the inner landing pad.
189   const unsigned PHICapacity = 2;
190 
191   // Create corresponding new PHIs for all the PHIs in the outer landing pad.
192   Instruction *InsertPoint = &InnerResumeDest->front();
193   BasicBlock::iterator I = OuterResumeDest->begin();
194   for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
195     PHINode *OuterPHI = cast<PHINode>(I);
196     PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
197                                         OuterPHI->getName() + ".lpad-body",
198                                         InsertPoint);
199     OuterPHI->replaceAllUsesWith(InnerPHI);
200     InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
201   }
202 
203   // Create a PHI for the exception values.
204   InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
205                                      "eh.lpad-body", InsertPoint);
206   CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
207   InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
208 
209   // All done.
210   return InnerResumeDest;
211 }
212 
213 /// Forward the 'resume' instruction to the caller's landing pad block.
214 /// When the landing pad block has only one predecessor, this is a simple
215 /// branch. When there is more than one predecessor, we need to split the
216 /// landing pad block after the landingpad instruction and jump to there.
217 void LandingPadInliningInfo::forwardResume(
218     ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
219   BasicBlock *Dest = getInnerResumeDest();
220   BasicBlock *Src = RI->getParent();
221 
222   BranchInst::Create(Dest, Src);
223 
224   // Update the PHIs in the destination. They were inserted in an order which
225   // makes this work.
226   addIncomingPHIValuesForInto(Src, Dest);
227 
228   InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
229   RI->eraseFromParent();
230 }
231 
232 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
233 static Value *getParentPad(Value *EHPad) {
234   if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
235     return FPI->getParentPad();
236   return cast<CatchSwitchInst>(EHPad)->getParentPad();
237 }
238 
239 using UnwindDestMemoTy = DenseMap<Instruction *, Value *>;
240 
241 /// Helper for getUnwindDestToken that does the descendant-ward part of
242 /// the search.
243 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
244                                        UnwindDestMemoTy &MemoMap) {
245   SmallVector<Instruction *, 8> Worklist(1, EHPad);
246 
247   while (!Worklist.empty()) {
248     Instruction *CurrentPad = Worklist.pop_back_val();
249     // We only put pads on the worklist that aren't in the MemoMap.  When
250     // we find an unwind dest for a pad we may update its ancestors, but
251     // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
252     // so they should never get updated while queued on the worklist.
253     assert(!MemoMap.count(CurrentPad));
254     Value *UnwindDestToken = nullptr;
255     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
256       if (CatchSwitch->hasUnwindDest()) {
257         UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
258       } else {
259         // Catchswitch doesn't have a 'nounwind' variant, and one might be
260         // annotated as "unwinds to caller" when really it's nounwind (see
261         // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
262         // parent's unwind dest from this.  We can check its catchpads'
263         // descendants, since they might include a cleanuppad with an
264         // "unwinds to caller" cleanupret, which can be trusted.
265         for (auto HI = CatchSwitch->handler_begin(),
266                   HE = CatchSwitch->handler_end();
267              HI != HE && !UnwindDestToken; ++HI) {
268           BasicBlock *HandlerBlock = *HI;
269           auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
270           for (User *Child : CatchPad->users()) {
271             // Intentionally ignore invokes here -- since the catchswitch is
272             // marked "unwind to caller", it would be a verifier error if it
273             // contained an invoke which unwinds out of it, so any invoke we'd
274             // encounter must unwind to some child of the catch.
275             if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
276               continue;
277 
278             Instruction *ChildPad = cast<Instruction>(Child);
279             auto Memo = MemoMap.find(ChildPad);
280             if (Memo == MemoMap.end()) {
281               // Haven't figured out this child pad yet; queue it.
282               Worklist.push_back(ChildPad);
283               continue;
284             }
285             // We've already checked this child, but might have found that
286             // it offers no proof either way.
287             Value *ChildUnwindDestToken = Memo->second;
288             if (!ChildUnwindDestToken)
289               continue;
290             // We already know the child's unwind dest, which can either
291             // be ConstantTokenNone to indicate unwind to caller, or can
292             // be another child of the catchpad.  Only the former indicates
293             // the unwind dest of the catchswitch.
294             if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
295               UnwindDestToken = ChildUnwindDestToken;
296               break;
297             }
298             assert(getParentPad(ChildUnwindDestToken) == CatchPad);
299           }
300         }
301       }
302     } else {
303       auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
304       for (User *U : CleanupPad->users()) {
305         if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
306           if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
307             UnwindDestToken = RetUnwindDest->getFirstNonPHI();
308           else
309             UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
310           break;
311         }
312         Value *ChildUnwindDestToken;
313         if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
314           ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
315         } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
316           Instruction *ChildPad = cast<Instruction>(U);
317           auto Memo = MemoMap.find(ChildPad);
318           if (Memo == MemoMap.end()) {
319             // Haven't resolved this child yet; queue it and keep searching.
320             Worklist.push_back(ChildPad);
321             continue;
322           }
323           // We've checked this child, but still need to ignore it if it
324           // had no proof either way.
325           ChildUnwindDestToken = Memo->second;
326           if (!ChildUnwindDestToken)
327             continue;
328         } else {
329           // Not a relevant user of the cleanuppad
330           continue;
331         }
332         // In a well-formed program, the child/invoke must either unwind to
333         // an(other) child of the cleanup, or exit the cleanup.  In the
334         // first case, continue searching.
335         if (isa<Instruction>(ChildUnwindDestToken) &&
336             getParentPad(ChildUnwindDestToken) == CleanupPad)
337           continue;
338         UnwindDestToken = ChildUnwindDestToken;
339         break;
340       }
341     }
342     // If we haven't found an unwind dest for CurrentPad, we may have queued its
343     // children, so move on to the next in the worklist.
344     if (!UnwindDestToken)
345       continue;
346 
347     // Now we know that CurrentPad unwinds to UnwindDestToken.  It also exits
348     // any ancestors of CurrentPad up to but not including UnwindDestToken's
349     // parent pad.  Record this in the memo map, and check to see if the
350     // original EHPad being queried is one of the ones exited.
351     Value *UnwindParent;
352     if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
353       UnwindParent = getParentPad(UnwindPad);
354     else
355       UnwindParent = nullptr;
356     bool ExitedOriginalPad = false;
357     for (Instruction *ExitedPad = CurrentPad;
358          ExitedPad && ExitedPad != UnwindParent;
359          ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
360       // Skip over catchpads since they just follow their catchswitches.
361       if (isa<CatchPadInst>(ExitedPad))
362         continue;
363       MemoMap[ExitedPad] = UnwindDestToken;
364       ExitedOriginalPad |= (ExitedPad == EHPad);
365     }
366 
367     if (ExitedOriginalPad)
368       return UnwindDestToken;
369 
370     // Continue the search.
371   }
372 
373   // No definitive information is contained within this funclet.
374   return nullptr;
375 }
376 
377 /// Given an EH pad, find where it unwinds.  If it unwinds to an EH pad,
378 /// return that pad instruction.  If it unwinds to caller, return
379 /// ConstantTokenNone.  If it does not have a definitive unwind destination,
380 /// return nullptr.
381 ///
382 /// This routine gets invoked for calls in funclets in inlinees when inlining
383 /// an invoke.  Since many funclets don't have calls inside them, it's queried
384 /// on-demand rather than building a map of pads to unwind dests up front.
385 /// Determining a funclet's unwind dest may require recursively searching its
386 /// descendants, and also ancestors and cousins if the descendants don't provide
387 /// an answer.  Since most funclets will have their unwind dest immediately
388 /// available as the unwind dest of a catchswitch or cleanupret, this routine
389 /// searches top-down from the given pad and then up. To avoid worst-case
390 /// quadratic run-time given that approach, it uses a memo map to avoid
391 /// re-processing funclet trees.  The callers that rewrite the IR as they go
392 /// take advantage of this, for correctness, by checking/forcing rewritten
393 /// pads' entries to match the original callee view.
394 static Value *getUnwindDestToken(Instruction *EHPad,
395                                  UnwindDestMemoTy &MemoMap) {
396   // Catchpads unwind to the same place as their catchswitch;
397   // redirct any queries on catchpads so the code below can
398   // deal with just catchswitches and cleanuppads.
399   if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
400     EHPad = CPI->getCatchSwitch();
401 
402   // Check if we've already determined the unwind dest for this pad.
403   auto Memo = MemoMap.find(EHPad);
404   if (Memo != MemoMap.end())
405     return Memo->second;
406 
407   // Search EHPad and, if necessary, its descendants.
408   Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
409   assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
410   if (UnwindDestToken)
411     return UnwindDestToken;
412 
413   // No information is available for this EHPad from itself or any of its
414   // descendants.  An unwind all the way out to a pad in the caller would
415   // need also to agree with the unwind dest of the parent funclet, so
416   // search up the chain to try to find a funclet with information.  Put
417   // null entries in the memo map to avoid re-processing as we go up.
418   MemoMap[EHPad] = nullptr;
419 #ifndef NDEBUG
420   SmallPtrSet<Instruction *, 4> TempMemos;
421   TempMemos.insert(EHPad);
422 #endif
423   Instruction *LastUselessPad = EHPad;
424   Value *AncestorToken;
425   for (AncestorToken = getParentPad(EHPad);
426        auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
427        AncestorToken = getParentPad(AncestorToken)) {
428     // Skip over catchpads since they just follow their catchswitches.
429     if (isa<CatchPadInst>(AncestorPad))
430       continue;
431     // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
432     // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
433     // call to getUnwindDestToken, that would mean that AncestorPad had no
434     // information in itself, its descendants, or its ancestors.  If that
435     // were the case, then we should also have recorded the lack of information
436     // for the descendant that we're coming from.  So assert that we don't
437     // find a null entry in the MemoMap for AncestorPad.
438     assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
439     auto AncestorMemo = MemoMap.find(AncestorPad);
440     if (AncestorMemo == MemoMap.end()) {
441       UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
442     } else {
443       UnwindDestToken = AncestorMemo->second;
444     }
445     if (UnwindDestToken)
446       break;
447     LastUselessPad = AncestorPad;
448     MemoMap[LastUselessPad] = nullptr;
449 #ifndef NDEBUG
450     TempMemos.insert(LastUselessPad);
451 #endif
452   }
453 
454   // We know that getUnwindDestTokenHelper was called on LastUselessPad and
455   // returned nullptr (and likewise for EHPad and any of its ancestors up to
456   // LastUselessPad), so LastUselessPad has no information from below.  Since
457   // getUnwindDestTokenHelper must investigate all downward paths through
458   // no-information nodes to prove that a node has no information like this,
459   // and since any time it finds information it records it in the MemoMap for
460   // not just the immediately-containing funclet but also any ancestors also
461   // exited, it must be the case that, walking downward from LastUselessPad,
462   // visiting just those nodes which have not been mapped to an unwind dest
463   // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
464   // they are just used to keep getUnwindDestTokenHelper from repeating work),
465   // any node visited must have been exhaustively searched with no information
466   // for it found.
467   SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
468   while (!Worklist.empty()) {
469     Instruction *UselessPad = Worklist.pop_back_val();
470     auto Memo = MemoMap.find(UselessPad);
471     if (Memo != MemoMap.end() && Memo->second) {
472       // Here the name 'UselessPad' is a bit of a misnomer, because we've found
473       // that it is a funclet that does have information about unwinding to
474       // a particular destination; its parent was a useless pad.
475       // Since its parent has no information, the unwind edge must not escape
476       // the parent, and must target a sibling of this pad.  This local unwind
477       // gives us no information about EHPad.  Leave it and the subtree rooted
478       // at it alone.
479       assert(getParentPad(Memo->second) == getParentPad(UselessPad));
480       continue;
481     }
482     // We know we don't have information for UselesPad.  If it has an entry in
483     // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
484     // added on this invocation of getUnwindDestToken; if a previous invocation
485     // recorded nullptr, it would have had to prove that the ancestors of
486     // UselessPad, which include LastUselessPad, had no information, and that
487     // in turn would have required proving that the descendants of
488     // LastUselesPad, which include EHPad, have no information about
489     // LastUselessPad, which would imply that EHPad was mapped to nullptr in
490     // the MemoMap on that invocation, which isn't the case if we got here.
491     assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
492     // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
493     // information that we'd be contradicting by making a map entry for it
494     // (which is something that getUnwindDestTokenHelper must have proved for
495     // us to get here).  Just assert on is direct users here; the checks in
496     // this downward walk at its descendants will verify that they don't have
497     // any unwind edges that exit 'UselessPad' either (i.e. they either have no
498     // unwind edges or unwind to a sibling).
499     MemoMap[UselessPad] = UnwindDestToken;
500     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
501       assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
502       for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
503         auto *CatchPad = HandlerBlock->getFirstNonPHI();
504         for (User *U : CatchPad->users()) {
505           assert(
506               (!isa<InvokeInst>(U) ||
507                (getParentPad(
508                     cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
509                 CatchPad)) &&
510               "Expected useless pad");
511           if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
512             Worklist.push_back(cast<Instruction>(U));
513         }
514       }
515     } else {
516       assert(isa<CleanupPadInst>(UselessPad));
517       for (User *U : UselessPad->users()) {
518         assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
519         assert((!isa<InvokeInst>(U) ||
520                 (getParentPad(
521                      cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
522                  UselessPad)) &&
523                "Expected useless pad");
524         if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
525           Worklist.push_back(cast<Instruction>(U));
526       }
527     }
528   }
529 
530   return UnwindDestToken;
531 }
532 
533 /// When we inline a basic block into an invoke,
534 /// we have to turn all of the calls that can throw into invokes.
535 /// This function analyze BB to see if there are any calls, and if so,
536 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
537 /// nodes in that block with the values specified in InvokeDestPHIValues.
538 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
539     BasicBlock *BB, BasicBlock *UnwindEdge,
540     UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
541   for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
542     Instruction *I = &*BBI++;
543 
544     // We only need to check for function calls: inlined invoke
545     // instructions require no special handling.
546     CallInst *CI = dyn_cast<CallInst>(I);
547 
548     if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
549       continue;
550 
551     // We do not need to (and in fact, cannot) convert possibly throwing calls
552     // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
553     // invokes.  The caller's "segment" of the deoptimization continuation
554     // attached to the newly inlined @llvm.experimental_deoptimize
555     // (resp. @llvm.experimental.guard) call should contain the exception
556     // handling logic, if any.
557     if (auto *F = CI->getCalledFunction())
558       if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
559           F->getIntrinsicID() == Intrinsic::experimental_guard)
560         continue;
561 
562     if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
563       // This call is nested inside a funclet.  If that funclet has an unwind
564       // destination within the inlinee, then unwinding out of this call would
565       // be UB.  Rewriting this call to an invoke which targets the inlined
566       // invoke's unwind dest would give the call's parent funclet multiple
567       // unwind destinations, which is something that subsequent EH table
568       // generation can't handle and that the veirifer rejects.  So when we
569       // see such a call, leave it as a call.
570       auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
571       Value *UnwindDestToken =
572           getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
573       if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
574         continue;
575 #ifndef NDEBUG
576       Instruction *MemoKey;
577       if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
578         MemoKey = CatchPad->getCatchSwitch();
579       else
580         MemoKey = FuncletPad;
581       assert(FuncletUnwindMap->count(MemoKey) &&
582              (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
583              "must get memoized to avoid confusing later searches");
584 #endif // NDEBUG
585     }
586 
587     changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
588     return BB;
589   }
590   return nullptr;
591 }
592 
593 /// If we inlined an invoke site, we need to convert calls
594 /// in the body of the inlined function into invokes.
595 ///
596 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
597 /// block of the inlined code (the last block is the end of the function),
598 /// and InlineCodeInfo is information about the code that got inlined.
599 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
600                                     ClonedCodeInfo &InlinedCodeInfo) {
601   BasicBlock *InvokeDest = II->getUnwindDest();
602 
603   Function *Caller = FirstNewBlock->getParent();
604 
605   // The inlined code is currently at the end of the function, scan from the
606   // start of the inlined code to its end, checking for stuff we need to
607   // rewrite.
608   LandingPadInliningInfo Invoke(II);
609 
610   // Get all of the inlined landing pad instructions.
611   SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
612   for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
613        I != E; ++I)
614     if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
615       InlinedLPads.insert(II->getLandingPadInst());
616 
617   // Append the clauses from the outer landing pad instruction into the inlined
618   // landing pad instructions.
619   LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
620   for (LandingPadInst *InlinedLPad : InlinedLPads) {
621     unsigned OuterNum = OuterLPad->getNumClauses();
622     InlinedLPad->reserveClauses(OuterNum);
623     for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
624       InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
625     if (OuterLPad->isCleanup())
626       InlinedLPad->setCleanup(true);
627   }
628 
629   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
630        BB != E; ++BB) {
631     if (InlinedCodeInfo.ContainsCalls)
632       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
633               &*BB, Invoke.getOuterResumeDest()))
634         // Update any PHI nodes in the exceptional block to indicate that there
635         // is now a new entry in them.
636         Invoke.addIncomingPHIValuesFor(NewBB);
637 
638     // Forward any resumes that are remaining here.
639     if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
640       Invoke.forwardResume(RI, InlinedLPads);
641   }
642 
643   // Now that everything is happy, we have one final detail.  The PHI nodes in
644   // the exception destination block still have entries due to the original
645   // invoke instruction. Eliminate these entries (which might even delete the
646   // PHI node) now.
647   InvokeDest->removePredecessor(II->getParent());
648 }
649 
650 /// If we inlined an invoke site, we need to convert calls
651 /// in the body of the inlined function into invokes.
652 ///
653 /// II is the invoke instruction being inlined.  FirstNewBlock is the first
654 /// block of the inlined code (the last block is the end of the function),
655 /// and InlineCodeInfo is information about the code that got inlined.
656 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
657                                ClonedCodeInfo &InlinedCodeInfo) {
658   BasicBlock *UnwindDest = II->getUnwindDest();
659   Function *Caller = FirstNewBlock->getParent();
660 
661   assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
662 
663   // If there are PHI nodes in the unwind destination block, we need to keep
664   // track of which values came into them from the invoke before removing the
665   // edge from this block.
666   SmallVector<Value *, 8> UnwindDestPHIValues;
667   BasicBlock *InvokeBB = II->getParent();
668   for (Instruction &I : *UnwindDest) {
669     // Save the value to use for this edge.
670     PHINode *PHI = dyn_cast<PHINode>(&I);
671     if (!PHI)
672       break;
673     UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
674   }
675 
676   // Add incoming-PHI values to the unwind destination block for the given basic
677   // block, using the values for the original invoke's source block.
678   auto UpdatePHINodes = [&](BasicBlock *Src) {
679     BasicBlock::iterator I = UnwindDest->begin();
680     for (Value *V : UnwindDestPHIValues) {
681       PHINode *PHI = cast<PHINode>(I);
682       PHI->addIncoming(V, Src);
683       ++I;
684     }
685   };
686 
687   // This connects all the instructions which 'unwind to caller' to the invoke
688   // destination.
689   UnwindDestMemoTy FuncletUnwindMap;
690   for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
691        BB != E; ++BB) {
692     if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
693       if (CRI->unwindsToCaller()) {
694         auto *CleanupPad = CRI->getCleanupPad();
695         CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
696         CRI->eraseFromParent();
697         UpdatePHINodes(&*BB);
698         // Finding a cleanupret with an unwind destination would confuse
699         // subsequent calls to getUnwindDestToken, so map the cleanuppad
700         // to short-circuit any such calls and recognize this as an "unwind
701         // to caller" cleanup.
702         assert(!FuncletUnwindMap.count(CleanupPad) ||
703                isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
704         FuncletUnwindMap[CleanupPad] =
705             ConstantTokenNone::get(Caller->getContext());
706       }
707     }
708 
709     Instruction *I = BB->getFirstNonPHI();
710     if (!I->isEHPad())
711       continue;
712 
713     Instruction *Replacement = nullptr;
714     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
715       if (CatchSwitch->unwindsToCaller()) {
716         Value *UnwindDestToken;
717         if (auto *ParentPad =
718                 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
719           // This catchswitch is nested inside another funclet.  If that
720           // funclet has an unwind destination within the inlinee, then
721           // unwinding out of this catchswitch would be UB.  Rewriting this
722           // catchswitch to unwind to the inlined invoke's unwind dest would
723           // give the parent funclet multiple unwind destinations, which is
724           // something that subsequent EH table generation can't handle and
725           // that the veirifer rejects.  So when we see such a call, leave it
726           // as "unwind to caller".
727           UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
728           if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
729             continue;
730         } else {
731           // This catchswitch has no parent to inherit constraints from, and
732           // none of its descendants can have an unwind edge that exits it and
733           // targets another funclet in the inlinee.  It may or may not have a
734           // descendant that definitively has an unwind to caller.  In either
735           // case, we'll have to assume that any unwinds out of it may need to
736           // be routed to the caller, so treat it as though it has a definitive
737           // unwind to caller.
738           UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
739         }
740         auto *NewCatchSwitch = CatchSwitchInst::Create(
741             CatchSwitch->getParentPad(), UnwindDest,
742             CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
743             CatchSwitch);
744         for (BasicBlock *PadBB : CatchSwitch->handlers())
745           NewCatchSwitch->addHandler(PadBB);
746         // Propagate info for the old catchswitch over to the new one in
747         // the unwind map.  This also serves to short-circuit any subsequent
748         // checks for the unwind dest of this catchswitch, which would get
749         // confused if they found the outer handler in the callee.
750         FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
751         Replacement = NewCatchSwitch;
752       }
753     } else if (!isa<FuncletPadInst>(I)) {
754       llvm_unreachable("unexpected EHPad!");
755     }
756 
757     if (Replacement) {
758       Replacement->takeName(I);
759       I->replaceAllUsesWith(Replacement);
760       I->eraseFromParent();
761       UpdatePHINodes(&*BB);
762     }
763   }
764 
765   if (InlinedCodeInfo.ContainsCalls)
766     for (Function::iterator BB = FirstNewBlock->getIterator(),
767                             E = Caller->end();
768          BB != E; ++BB)
769       if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
770               &*BB, UnwindDest, &FuncletUnwindMap))
771         // Update any PHI nodes in the exceptional block to indicate that there
772         // is now a new entry in them.
773         UpdatePHINodes(NewBB);
774 
775   // Now that everything is happy, we have one final detail.  The PHI nodes in
776   // the exception destination block still have entries due to the original
777   // invoke instruction. Eliminate these entries (which might even delete the
778   // PHI node) now.
779   UnwindDest->removePredecessor(InvokeBB);
780 }
781 
782 /// When inlining a call site that has !llvm.mem.parallel_loop_access or
783 /// llvm.access.group metadata, that metadata should be propagated to all
784 /// memory-accessing cloned instructions.
785 static void PropagateParallelLoopAccessMetadata(CallSite CS,
786                                                 ValueToValueMapTy &VMap) {
787   MDNode *M =
788     CS.getInstruction()->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
789   MDNode *CallAccessGroup =
790       CS.getInstruction()->getMetadata(LLVMContext::MD_access_group);
791   if (!M && !CallAccessGroup)
792     return;
793 
794   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
795        VMI != VMIE; ++VMI) {
796     if (!VMI->second)
797       continue;
798 
799     Instruction *NI = dyn_cast<Instruction>(VMI->second);
800     if (!NI)
801       continue;
802 
803     if (M) {
804       if (MDNode *PM =
805               NI->getMetadata(LLVMContext::MD_mem_parallel_loop_access)) {
806         M = MDNode::concatenate(PM, M);
807       NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
808       } else if (NI->mayReadOrWriteMemory()) {
809         NI->setMetadata(LLVMContext::MD_mem_parallel_loop_access, M);
810       }
811     }
812 
813     if (NI->mayReadOrWriteMemory()) {
814       MDNode *UnitedAccGroups = uniteAccessGroups(
815           NI->getMetadata(LLVMContext::MD_access_group), CallAccessGroup);
816       NI->setMetadata(LLVMContext::MD_access_group, UnitedAccGroups);
817     }
818   }
819 }
820 
821 /// When inlining a function that contains noalias scope metadata,
822 /// this metadata needs to be cloned so that the inlined blocks
823 /// have different "unique scopes" at every call site. Were this not done, then
824 /// aliasing scopes from a function inlined into a caller multiple times could
825 /// not be differentiated (and this would lead to miscompiles because the
826 /// non-aliasing property communicated by the metadata could have
827 /// call-site-specific control dependencies).
828 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
829   const Function *CalledFunc = CS.getCalledFunction();
830   SetVector<const MDNode *> MD;
831 
832   // Note: We could only clone the metadata if it is already used in the
833   // caller. I'm omitting that check here because it might confuse
834   // inter-procedural alias analysis passes. We can revisit this if it becomes
835   // an efficiency or overhead problem.
836 
837   for (const BasicBlock &I : *CalledFunc)
838     for (const Instruction &J : I) {
839       if (const MDNode *M = J.getMetadata(LLVMContext::MD_alias_scope))
840         MD.insert(M);
841       if (const MDNode *M = J.getMetadata(LLVMContext::MD_noalias))
842         MD.insert(M);
843     }
844 
845   if (MD.empty())
846     return;
847 
848   // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
849   // the set.
850   SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
851   while (!Queue.empty()) {
852     const MDNode *M = cast<MDNode>(Queue.pop_back_val());
853     for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
854       if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
855         if (MD.insert(M1))
856           Queue.push_back(M1);
857   }
858 
859   // Now we have a complete set of all metadata in the chains used to specify
860   // the noalias scopes and the lists of those scopes.
861   SmallVector<TempMDTuple, 16> DummyNodes;
862   DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
863   for (const MDNode *I : MD) {
864     DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
865     MDMap[I].reset(DummyNodes.back().get());
866   }
867 
868   // Create new metadata nodes to replace the dummy nodes, replacing old
869   // metadata references with either a dummy node or an already-created new
870   // node.
871   for (const MDNode *I : MD) {
872     SmallVector<Metadata *, 4> NewOps;
873     for (unsigned i = 0, ie = I->getNumOperands(); i != ie; ++i) {
874       const Metadata *V = I->getOperand(i);
875       if (const MDNode *M = dyn_cast<MDNode>(V))
876         NewOps.push_back(MDMap[M]);
877       else
878         NewOps.push_back(const_cast<Metadata *>(V));
879     }
880 
881     MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
882     MDTuple *TempM = cast<MDTuple>(MDMap[I]);
883     assert(TempM->isTemporary() && "Expected temporary node");
884 
885     TempM->replaceAllUsesWith(NewM);
886   }
887 
888   // Now replace the metadata in the new inlined instructions with the
889   // repacements from the map.
890   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
891        VMI != VMIE; ++VMI) {
892     if (!VMI->second)
893       continue;
894 
895     Instruction *NI = dyn_cast<Instruction>(VMI->second);
896     if (!NI)
897       continue;
898 
899     if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
900       MDNode *NewMD = MDMap[M];
901       // If the call site also had alias scope metadata (a list of scopes to
902       // which instructions inside it might belong), propagate those scopes to
903       // the inlined instructions.
904       if (MDNode *CSM =
905               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
906         NewMD = MDNode::concatenate(NewMD, CSM);
907       NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
908     } else if (NI->mayReadOrWriteMemory()) {
909       if (MDNode *M =
910               CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
911         NI->setMetadata(LLVMContext::MD_alias_scope, M);
912     }
913 
914     if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
915       MDNode *NewMD = MDMap[M];
916       // If the call site also had noalias metadata (a list of scopes with
917       // which instructions inside it don't alias), propagate those scopes to
918       // the inlined instructions.
919       if (MDNode *CSM =
920               CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
921         NewMD = MDNode::concatenate(NewMD, CSM);
922       NI->setMetadata(LLVMContext::MD_noalias, NewMD);
923     } else if (NI->mayReadOrWriteMemory()) {
924       if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
925         NI->setMetadata(LLVMContext::MD_noalias, M);
926     }
927   }
928 }
929 
930 /// If the inlined function has noalias arguments,
931 /// then add new alias scopes for each noalias argument, tag the mapped noalias
932 /// parameters with noalias metadata specifying the new scope, and tag all
933 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
934 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
935                                   const DataLayout &DL, AAResults *CalleeAAR) {
936   if (!EnableNoAliasConversion)
937     return;
938 
939   const Function *CalledFunc = CS.getCalledFunction();
940   SmallVector<const Argument *, 4> NoAliasArgs;
941 
942   for (const Argument &Arg : CalledFunc->args())
943     if (CS.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
944       NoAliasArgs.push_back(&Arg);
945 
946   if (NoAliasArgs.empty())
947     return;
948 
949   // To do a good job, if a noalias variable is captured, we need to know if
950   // the capture point dominates the particular use we're considering.
951   DominatorTree DT;
952   DT.recalculate(const_cast<Function&>(*CalledFunc));
953 
954   // noalias indicates that pointer values based on the argument do not alias
955   // pointer values which are not based on it. So we add a new "scope" for each
956   // noalias function argument. Accesses using pointers based on that argument
957   // become part of that alias scope, accesses using pointers not based on that
958   // argument are tagged as noalias with that scope.
959 
960   DenseMap<const Argument *, MDNode *> NewScopes;
961   MDBuilder MDB(CalledFunc->getContext());
962 
963   // Create a new scope domain for this function.
964   MDNode *NewDomain =
965     MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
966   for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
967     const Argument *A = NoAliasArgs[i];
968 
969     std::string Name = std::string(CalledFunc->getName());
970     if (A->hasName()) {
971       Name += ": %";
972       Name += A->getName();
973     } else {
974       Name += ": argument ";
975       Name += utostr(i);
976     }
977 
978     // Note: We always create a new anonymous root here. This is true regardless
979     // of the linkage of the callee because the aliasing "scope" is not just a
980     // property of the callee, but also all control dependencies in the caller.
981     MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
982     NewScopes.insert(std::make_pair(A, NewScope));
983   }
984 
985   // Iterate over all new instructions in the map; for all memory-access
986   // instructions, add the alias scope metadata.
987   for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
988        VMI != VMIE; ++VMI) {
989     if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
990       if (!VMI->second)
991         continue;
992 
993       Instruction *NI = dyn_cast<Instruction>(VMI->second);
994       if (!NI)
995         continue;
996 
997       bool IsArgMemOnlyCall = false, IsFuncCall = false;
998       SmallVector<const Value *, 2> PtrArgs;
999 
1000       if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1001         PtrArgs.push_back(LI->getPointerOperand());
1002       else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1003         PtrArgs.push_back(SI->getPointerOperand());
1004       else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1005         PtrArgs.push_back(VAAI->getPointerOperand());
1006       else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1007         PtrArgs.push_back(CXI->getPointerOperand());
1008       else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1009         PtrArgs.push_back(RMWI->getPointerOperand());
1010       else if (const auto *Call = dyn_cast<CallBase>(I)) {
1011         // If we know that the call does not access memory, then we'll still
1012         // know that about the inlined clone of this call site, and we don't
1013         // need to add metadata.
1014         if (Call->doesNotAccessMemory())
1015           continue;
1016 
1017         IsFuncCall = true;
1018         if (CalleeAAR) {
1019           FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(Call);
1020           if (AAResults::onlyAccessesArgPointees(MRB))
1021             IsArgMemOnlyCall = true;
1022         }
1023 
1024         for (Value *Arg : Call->args()) {
1025           // We need to check the underlying objects of all arguments, not just
1026           // the pointer arguments, because we might be passing pointers as
1027           // integers, etc.
1028           // However, if we know that the call only accesses pointer arguments,
1029           // then we only need to check the pointer arguments.
1030           if (IsArgMemOnlyCall && !Arg->getType()->isPointerTy())
1031             continue;
1032 
1033           PtrArgs.push_back(Arg);
1034         }
1035       }
1036 
1037       // If we found no pointers, then this instruction is not suitable for
1038       // pairing with an instruction to receive aliasing metadata.
1039       // However, if this is a call, this we might just alias with none of the
1040       // noalias arguments.
1041       if (PtrArgs.empty() && !IsFuncCall)
1042         continue;
1043 
1044       // It is possible that there is only one underlying object, but you
1045       // need to go through several PHIs to see it, and thus could be
1046       // repeated in the Objects list.
1047       SmallPtrSet<const Value *, 4> ObjSet;
1048       SmallVector<Metadata *, 4> Scopes, NoAliases;
1049 
1050       SmallSetVector<const Argument *, 4> NAPtrArgs;
1051       for (const Value *V : PtrArgs) {
1052         SmallVector<const Value *, 4> Objects;
1053         GetUnderlyingObjects(V, Objects, DL, /* LI = */ nullptr);
1054 
1055         for (const Value *O : Objects)
1056           ObjSet.insert(O);
1057       }
1058 
1059       // Figure out if we're derived from anything that is not a noalias
1060       // argument.
1061       bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
1062       for (const Value *V : ObjSet) {
1063         // Is this value a constant that cannot be derived from any pointer
1064         // value (we need to exclude constant expressions, for example, that
1065         // are formed from arithmetic on global symbols).
1066         bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1067                              isa<ConstantPointerNull>(V) ||
1068                              isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1069         if (IsNonPtrConst)
1070           continue;
1071 
1072         // If this is anything other than a noalias argument, then we cannot
1073         // completely describe the aliasing properties using alias.scope
1074         // metadata (and, thus, won't add any).
1075         if (const Argument *A = dyn_cast<Argument>(V)) {
1076           if (!CS.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1077             UsesAliasingPtr = true;
1078         } else {
1079           UsesAliasingPtr = true;
1080         }
1081 
1082         // If this is not some identified function-local object (which cannot
1083         // directly alias a noalias argument), or some other argument (which,
1084         // by definition, also cannot alias a noalias argument), then we could
1085         // alias a noalias argument that has been captured).
1086         if (!isa<Argument>(V) &&
1087             !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
1088           CanDeriveViaCapture = true;
1089       }
1090 
1091       // A function call can always get captured noalias pointers (via other
1092       // parameters, globals, etc.).
1093       if (IsFuncCall && !IsArgMemOnlyCall)
1094         CanDeriveViaCapture = true;
1095 
1096       // First, we want to figure out all of the sets with which we definitely
1097       // don't alias. Iterate over all noalias set, and add those for which:
1098       //   1. The noalias argument is not in the set of objects from which we
1099       //      definitely derive.
1100       //   2. The noalias argument has not yet been captured.
1101       // An arbitrary function that might load pointers could see captured
1102       // noalias arguments via other noalias arguments or globals, and so we
1103       // must always check for prior capture.
1104       for (const Argument *A : NoAliasArgs) {
1105         if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
1106                                  // It might be tempting to skip the
1107                                  // PointerMayBeCapturedBefore check if
1108                                  // A->hasNoCaptureAttr() is true, but this is
1109                                  // incorrect because nocapture only guarantees
1110                                  // that no copies outlive the function, not
1111                                  // that the value cannot be locally captured.
1112                                  !PointerMayBeCapturedBefore(A,
1113                                    /* ReturnCaptures */ false,
1114                                    /* StoreCaptures */ false, I, &DT)))
1115           NoAliases.push_back(NewScopes[A]);
1116       }
1117 
1118       if (!NoAliases.empty())
1119         NI->setMetadata(LLVMContext::MD_noalias,
1120                         MDNode::concatenate(
1121                             NI->getMetadata(LLVMContext::MD_noalias),
1122                             MDNode::get(CalledFunc->getContext(), NoAliases)));
1123 
1124       // Next, we want to figure out all of the sets to which we might belong.
1125       // We might belong to a set if the noalias argument is in the set of
1126       // underlying objects. If there is some non-noalias argument in our list
1127       // of underlying objects, then we cannot add a scope because the fact
1128       // that some access does not alias with any set of our noalias arguments
1129       // cannot itself guarantee that it does not alias with this access
1130       // (because there is some pointer of unknown origin involved and the
1131       // other access might also depend on this pointer). We also cannot add
1132       // scopes to arbitrary functions unless we know they don't access any
1133       // non-parameter pointer-values.
1134       bool CanAddScopes = !UsesAliasingPtr;
1135       if (CanAddScopes && IsFuncCall)
1136         CanAddScopes = IsArgMemOnlyCall;
1137 
1138       if (CanAddScopes)
1139         for (const Argument *A : NoAliasArgs) {
1140           if (ObjSet.count(A))
1141             Scopes.push_back(NewScopes[A]);
1142         }
1143 
1144       if (!Scopes.empty())
1145         NI->setMetadata(
1146             LLVMContext::MD_alias_scope,
1147             MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1148                                 MDNode::get(CalledFunc->getContext(), Scopes)));
1149     }
1150   }
1151 }
1152 
1153 static bool MayContainThrowingOrExitingCall(Instruction *Begin,
1154                                             Instruction *End) {
1155 
1156   assert(Begin->getParent() == End->getParent() &&
1157          "Expected to be in same basic block!");
1158   unsigned NumInstChecked = 0;
1159   // Check that all instructions in the range [Begin, End) are guaranteed to
1160   // transfer execution to successor.
1161   for (auto &I : make_range(Begin->getIterator(), End->getIterator()))
1162     if (NumInstChecked++ > InlinerAttributeWindow ||
1163         !isGuaranteedToTransferExecutionToSuccessor(&I))
1164       return true;
1165   return false;
1166 }
1167 
1168 static AttrBuilder IdentifyValidAttributes(CallSite CS) {
1169 
1170   AttrBuilder AB(CS.getAttributes(), AttributeList::ReturnIndex);
1171   if (AB.empty())
1172     return AB;
1173   AttrBuilder Valid;
1174   // Only allow these white listed attributes to be propagated back to the
1175   // callee. This is because other attributes may only be valid on the call
1176   // itself, i.e. attributes such as signext and zeroext.
1177   if (auto DerefBytes = AB.getDereferenceableBytes())
1178     Valid.addDereferenceableAttr(DerefBytes);
1179   if (auto DerefOrNullBytes = AB.getDereferenceableOrNullBytes())
1180     Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1181   if (AB.contains(Attribute::NoAlias))
1182     Valid.addAttribute(Attribute::NoAlias);
1183   if (AB.contains(Attribute::NonNull))
1184     Valid.addAttribute(Attribute::NonNull);
1185   return Valid;
1186 }
1187 
1188 static void AddReturnAttributes(CallSite CS, ValueToValueMapTy &VMap) {
1189   if (!UpdateReturnAttributes && !UpdateLoadMetadataDuringInlining)
1190     return;
1191 
1192   AttrBuilder Valid = IdentifyValidAttributes(CS);
1193   if (Valid.empty())
1194     return;
1195   auto *CalledFunction = CS.getCalledFunction();
1196   auto &Context = CalledFunction->getContext();
1197 
1198   auto getExpectedRV = [&](Value *V) -> Instruction * {
1199     if (UpdateReturnAttributes && isa<CallBase>(V))
1200       return dyn_cast_or_null<CallBase>(VMap.lookup(V));
1201     if (UpdateLoadMetadataDuringInlining && isa<LoadInst>(V))
1202       return dyn_cast_or_null<LoadInst>(VMap.lookup(V));
1203     return nullptr;
1204   };
1205 
1206  MDBuilder MDB(Context);
1207   auto CreateMDNode = [&](uint64_t Num) -> MDNode * {
1208     auto *Int = ConstantInt::get(Type::getInt64Ty(Context), Num);
1209     return MDNode::get(Context, MDB.createConstant(Int));
1210   };
1211 
1212   for (auto &BB : *CalledFunction) {
1213     auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1214     if (!RI)
1215       continue;
1216     // Sanity check that the cloned RetVal exists and is a call, otherwise we
1217     // cannot add the attributes on the cloned RetVal.
1218     // Simplification during inlining could have transformed the cloned
1219     // instruction.
1220     auto *NewRetVal = getExpectedRV(RI->getOperand(0));
1221     if (!NewRetVal)
1222       continue;
1223     auto *RetVal = cast<Instruction>(RI->getOperand(0));
1224     // Backward propagation of attributes to the returned value may be incorrect
1225     // if it is control flow dependent.
1226     // Consider:
1227     // @callee {
1228     //  %rv = call @foo()
1229     //  %rv2 = call @bar()
1230     //  if (%rv2 != null)
1231     //    return %rv2
1232     //  if (%rv == null)
1233     //    exit()
1234     //  return %rv
1235     // }
1236     // caller() {
1237     //   %val = call nonnull @callee()
1238     // }
1239     // Here we cannot add the nonnull attribute on either foo or bar. So, we
1240     // limit the check to both RetVal and RI are in the same basic block and
1241     // there are no throwing/exiting instructions between these instructions.
1242     if (RI->getParent() != RetVal->getParent() ||
1243         MayContainThrowingOrExitingCall(RetVal, RI))
1244       continue;
1245     // Add to the existing attributes of NewRetVal, i.e. the cloned call
1246     // instruction.
1247     // NB! When we have the same attribute already existing on NewRetVal, but
1248     // with a differing value, the AttributeList's merge API honours the already
1249     // existing attribute value (i.e. attributes such as dereferenceable,
1250     // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1251     if (auto *CB = dyn_cast<CallBase>(NewRetVal)) {
1252       AttributeList AL = CB->getAttributes();
1253       AttributeList NewAL =
1254           AL.addAttributes(Context, AttributeList::ReturnIndex, Valid);
1255       CB->setAttributes(NewAL);
1256     } else {
1257       auto *NewLI = cast<LoadInst>(NewRetVal);
1258       if (CS.isReturnNonNull())
1259         NewLI->setMetadata(LLVMContext::MD_nonnull, CreateMDNode(1));
1260       // If the load already has a dereferenceable/dereferenceable_or_null
1261       // metadata, we should honour it.
1262       if (uint64_t DerefBytes = Valid.getDereferenceableBytes())
1263        if(!NewLI->getMetadata(LLVMContext::MD_dereferenceable))
1264          NewLI->setMetadata(LLVMContext::MD_dereferenceable,
1265                             CreateMDNode(DerefBytes));
1266       if (uint64_t DerefOrNullBytes = Valid.getDereferenceableOrNullBytes())
1267        if (!NewLI->getMetadata(LLVMContext::MD_dereferenceable_or_null))
1268          NewLI->setMetadata(LLVMContext::MD_dereferenceable_or_null,
1269                             CreateMDNode(DerefOrNullBytes));
1270     }
1271 
1272   }
1273 }
1274 
1275 /// If the inlined function has non-byval align arguments, then
1276 /// add @llvm.assume-based alignment assumptions to preserve this information.
1277 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1278   if (!PreserveAlignmentAssumptions || !IFI.GetAssumptionCache)
1279     return;
1280 
1281   AssumptionCache *AC = &(*IFI.GetAssumptionCache)(*CS.getCaller());
1282   auto &DL = CS.getCaller()->getParent()->getDataLayout();
1283 
1284   // To avoid inserting redundant assumptions, we should check for assumptions
1285   // already in the caller. To do this, we might need a DT of the caller.
1286   DominatorTree DT;
1287   bool DTCalculated = false;
1288 
1289   Function *CalledFunc = CS.getCalledFunction();
1290   for (Argument &Arg : CalledFunc->args()) {
1291     unsigned Align = Arg.getType()->isPointerTy() ? Arg.getParamAlignment() : 0;
1292     if (Align && !Arg.hasByValOrInAllocaAttr() && !Arg.hasNUses(0)) {
1293       if (!DTCalculated) {
1294         DT.recalculate(*CS.getCaller());
1295         DTCalculated = true;
1296       }
1297 
1298       // If we can already prove the asserted alignment in the context of the
1299       // caller, then don't bother inserting the assumption.
1300       Value *ArgVal = CS.getArgument(Arg.getArgNo());
1301       if (getKnownAlignment(ArgVal, DL, CS.getInstruction(), AC, &DT) >= Align)
1302         continue;
1303 
1304       CallInst *NewAsmp = IRBuilder<>(CS.getInstruction())
1305                               .CreateAlignmentAssumption(DL, ArgVal, Align);
1306       AC->registerAssumption(NewAsmp);
1307     }
1308   }
1309 }
1310 
1311 /// Once we have cloned code over from a callee into the caller,
1312 /// update the specified callgraph to reflect the changes we made.
1313 /// Note that it's possible that not all code was copied over, so only
1314 /// some edges of the callgraph may remain.
1315 static void UpdateCallGraphAfterInlining(CallSite CS,
1316                                          Function::iterator FirstNewBlock,
1317                                          ValueToValueMapTy &VMap,
1318                                          InlineFunctionInfo &IFI) {
1319   CallGraph &CG = *IFI.CG;
1320   const Function *Caller = CS.getCaller();
1321   const Function *Callee = CS.getCalledFunction();
1322   CallGraphNode *CalleeNode = CG[Callee];
1323   CallGraphNode *CallerNode = CG[Caller];
1324 
1325   // Since we inlined some uninlined call sites in the callee into the caller,
1326   // add edges from the caller to all of the callees of the callee.
1327   CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1328 
1329   // Consider the case where CalleeNode == CallerNode.
1330   CallGraphNode::CalledFunctionsVector CallCache;
1331   if (CalleeNode == CallerNode) {
1332     CallCache.assign(I, E);
1333     I = CallCache.begin();
1334     E = CallCache.end();
1335   }
1336 
1337   for (; I != E; ++I) {
1338     const Value *OrigCall = I->first;
1339 
1340     ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1341     // Only copy the edge if the call was inlined!
1342     if (VMI == VMap.end() || VMI->second == nullptr)
1343       continue;
1344 
1345     // If the call was inlined, but then constant folded, there is no edge to
1346     // add.  Check for this case.
1347     auto *NewCall = dyn_cast<CallBase>(VMI->second);
1348     if (!NewCall)
1349       continue;
1350 
1351     // We do not treat intrinsic calls like real function calls because we
1352     // expect them to become inline code; do not add an edge for an intrinsic.
1353     if (NewCall->getCalledFunction() &&
1354         NewCall->getCalledFunction()->isIntrinsic())
1355       continue;
1356 
1357     // Remember that this call site got inlined for the client of
1358     // InlineFunction.
1359     IFI.InlinedCalls.push_back(NewCall);
1360 
1361     // It's possible that inlining the callsite will cause it to go from an
1362     // indirect to a direct call by resolving a function pointer.  If this
1363     // happens, set the callee of the new call site to a more precise
1364     // destination.  This can also happen if the call graph node of the caller
1365     // was just unnecessarily imprecise.
1366     if (!I->second->getFunction())
1367       if (Function *F = NewCall->getCalledFunction()) {
1368         // Indirect call site resolved to direct call.
1369         CallerNode->addCalledFunction(NewCall, CG[F]);
1370 
1371         continue;
1372       }
1373 
1374     CallerNode->addCalledFunction(NewCall, I->second);
1375   }
1376 
1377   // Update the call graph by deleting the edge from Callee to Caller.  We must
1378   // do this after the loop above in case Caller and Callee are the same.
1379   CallerNode->removeCallEdgeFor(*cast<CallBase>(CS.getInstruction()));
1380 }
1381 
1382 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1383                                     BasicBlock *InsertBlock,
1384                                     InlineFunctionInfo &IFI) {
1385   Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1386   IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1387 
1388   Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1389 
1390   // Always generate a memcpy of alignment 1 here because we don't know
1391   // the alignment of the src pointer.  Other optimizations can infer
1392   // better alignment.
1393   Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1394                        /*SrcAlign*/ Align(1), Size);
1395 }
1396 
1397 /// When inlining a call site that has a byval argument,
1398 /// we have to make the implicit memcpy explicit by adding it.
1399 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1400                                   const Function *CalledFunc,
1401                                   InlineFunctionInfo &IFI,
1402                                   unsigned ByValAlignment) {
1403   PointerType *ArgTy = cast<PointerType>(Arg->getType());
1404   Type *AggTy = ArgTy->getElementType();
1405 
1406   Function *Caller = TheCall->getFunction();
1407   const DataLayout &DL = Caller->getParent()->getDataLayout();
1408 
1409   // If the called function is readonly, then it could not mutate the caller's
1410   // copy of the byval'd memory.  In this case, it is safe to elide the copy and
1411   // temporary.
1412   if (CalledFunc->onlyReadsMemory()) {
1413     // If the byval argument has a specified alignment that is greater than the
1414     // passed in pointer, then we either have to round up the input pointer or
1415     // give up on this transformation.
1416     if (ByValAlignment <= 1)  // 0 = unspecified, 1 = no particular alignment.
1417       return Arg;
1418 
1419     AssumptionCache *AC =
1420         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
1421 
1422     // If the pointer is already known to be sufficiently aligned, or if we can
1423     // round it up to a larger alignment, then we don't need a temporary.
1424     if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
1425         ByValAlignment)
1426       return Arg;
1427 
1428     // Otherwise, we have to make a memcpy to get a safe alignment.  This is bad
1429     // for code quality, but rarely happens and is required for correctness.
1430   }
1431 
1432   // Create the alloca.  If we have DataLayout, use nice alignment.
1433   Align Alignment(DL.getPrefTypeAlignment(AggTy));
1434 
1435   // If the byval had an alignment specified, we *must* use at least that
1436   // alignment, as it is required by the byval argument (and uses of the
1437   // pointer inside the callee).
1438   Alignment = max(Alignment, MaybeAlign(ByValAlignment));
1439 
1440   Value *NewAlloca =
1441       new AllocaInst(AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
1442                      Arg->getName(), &*Caller->begin()->begin());
1443   IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1444 
1445   // Uses of the argument in the function should use our new alloca
1446   // instead.
1447   return NewAlloca;
1448 }
1449 
1450 // Check whether this Value is used by a lifetime intrinsic.
1451 static bool isUsedByLifetimeMarker(Value *V) {
1452   for (User *U : V->users())
1453     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1454       if (II->isLifetimeStartOrEnd())
1455         return true;
1456   return false;
1457 }
1458 
1459 // Check whether the given alloca already has
1460 // lifetime.start or lifetime.end intrinsics.
1461 static bool hasLifetimeMarkers(AllocaInst *AI) {
1462   Type *Ty = AI->getType();
1463   Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1464                                        Ty->getPointerAddressSpace());
1465   if (Ty == Int8PtrTy)
1466     return isUsedByLifetimeMarker(AI);
1467 
1468   // Do a scan to find all the casts to i8*.
1469   for (User *U : AI->users()) {
1470     if (U->getType() != Int8PtrTy) continue;
1471     if (U->stripPointerCasts() != AI) continue;
1472     if (isUsedByLifetimeMarker(U))
1473       return true;
1474   }
1475   return false;
1476 }
1477 
1478 /// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1479 /// block. Allocas used in inalloca calls and allocas of dynamic array size
1480 /// cannot be static.
1481 static bool allocaWouldBeStaticInEntry(const AllocaInst *AI ) {
1482   return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1483 }
1484 
1485 /// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1486 /// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1487 static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1488                                LLVMContext &Ctx,
1489                                DenseMap<const MDNode *, MDNode *> &IANodes) {
1490   auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1491   return DebugLoc::get(OrigDL.getLine(), OrigDL.getCol(), OrigDL.getScope(),
1492                        IA);
1493 }
1494 
1495 /// Update inlined instructions' line numbers to
1496 /// to encode location where these instructions are inlined.
1497 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1498                              Instruction *TheCall, bool CalleeHasDebugInfo) {
1499   const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1500   if (!TheCallDL)
1501     return;
1502 
1503   auto &Ctx = Fn->getContext();
1504   DILocation *InlinedAtNode = TheCallDL;
1505 
1506   // Create a unique call site, not to be confused with any other call from the
1507   // same location.
1508   InlinedAtNode = DILocation::getDistinct(
1509       Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1510       InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1511 
1512   // Cache the inlined-at nodes as they're built so they are reused, without
1513   // this every instruction's inlined-at chain would become distinct from each
1514   // other.
1515   DenseMap<const MDNode *, MDNode *> IANodes;
1516 
1517   // Check if we are not generating inline line tables and want to use
1518   // the call site location instead.
1519   bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1520 
1521   for (; FI != Fn->end(); ++FI) {
1522     for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1523          BI != BE; ++BI) {
1524       // Loop metadata needs to be updated so that the start and end locs
1525       // reference inlined-at locations.
1526       auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode, &IANodes](
1527                                    const DILocation &Loc) -> DILocation * {
1528         return inlineDebugLoc(&Loc, InlinedAtNode, Ctx, IANodes).get();
1529       };
1530       updateLoopMetadataDebugLocations(*BI, updateLoopInfoLoc);
1531 
1532       if (!NoInlineLineTables)
1533         if (DebugLoc DL = BI->getDebugLoc()) {
1534           DebugLoc IDL =
1535               inlineDebugLoc(DL, InlinedAtNode, BI->getContext(), IANodes);
1536           BI->setDebugLoc(IDL);
1537           continue;
1538         }
1539 
1540       if (CalleeHasDebugInfo && !NoInlineLineTables)
1541         continue;
1542 
1543       // If the inlined instruction has no line number, or if inline info
1544       // is not being generated, make it look as if it originates from the call
1545       // location. This is important for ((__always_inline, __nodebug__))
1546       // functions which must use caller location for all instructions in their
1547       // function body.
1548 
1549       // Don't update static allocas, as they may get moved later.
1550       if (auto *AI = dyn_cast<AllocaInst>(BI))
1551         if (allocaWouldBeStaticInEntry(AI))
1552           continue;
1553 
1554       BI->setDebugLoc(TheCallDL);
1555     }
1556 
1557     // Remove debug info intrinsics if we're not keeping inline info.
1558     if (NoInlineLineTables) {
1559       BasicBlock::iterator BI = FI->begin();
1560       while (BI != FI->end()) {
1561         if (isa<DbgInfoIntrinsic>(BI)) {
1562           BI = BI->eraseFromParent();
1563           continue;
1564         }
1565         ++BI;
1566       }
1567     }
1568 
1569   }
1570 }
1571 
1572 /// Update the block frequencies of the caller after a callee has been inlined.
1573 ///
1574 /// Each block cloned into the caller has its block frequency scaled by the
1575 /// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1576 /// callee's entry block gets the same frequency as the callsite block and the
1577 /// relative frequencies of all cloned blocks remain the same after cloning.
1578 static void updateCallerBFI(BasicBlock *CallSiteBlock,
1579                             const ValueToValueMapTy &VMap,
1580                             BlockFrequencyInfo *CallerBFI,
1581                             BlockFrequencyInfo *CalleeBFI,
1582                             const BasicBlock &CalleeEntryBlock) {
1583   SmallPtrSet<BasicBlock *, 16> ClonedBBs;
1584   for (auto Entry : VMap) {
1585     if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1586       continue;
1587     auto *OrigBB = cast<BasicBlock>(Entry.first);
1588     auto *ClonedBB = cast<BasicBlock>(Entry.second);
1589     uint64_t Freq = CalleeBFI->getBlockFreq(OrigBB).getFrequency();
1590     if (!ClonedBBs.insert(ClonedBB).second) {
1591       // Multiple blocks in the callee might get mapped to one cloned block in
1592       // the caller since we prune the callee as we clone it. When that happens,
1593       // we want to use the maximum among the original blocks' frequencies.
1594       uint64_t NewFreq = CallerBFI->getBlockFreq(ClonedBB).getFrequency();
1595       if (NewFreq > Freq)
1596         Freq = NewFreq;
1597     }
1598     CallerBFI->setBlockFreq(ClonedBB, Freq);
1599   }
1600   BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1601   CallerBFI->setBlockFreqAndScale(
1602       EntryClone, CallerBFI->getBlockFreq(CallSiteBlock).getFrequency(),
1603       ClonedBBs);
1604 }
1605 
1606 /// Update the branch metadata for cloned call instructions.
1607 static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1608                               const ProfileCount &CalleeEntryCount,
1609                               const Instruction *TheCall,
1610                               ProfileSummaryInfo *PSI,
1611                               BlockFrequencyInfo *CallerBFI) {
1612   if (!CalleeEntryCount.hasValue() || CalleeEntryCount.isSynthetic() ||
1613       CalleeEntryCount.getCount() < 1)
1614     return;
1615   auto CallSiteCount = PSI ? PSI->getProfileCount(TheCall, CallerBFI) : None;
1616   int64_t CallCount =
1617       std::min(CallSiteCount.hasValue() ? CallSiteCount.getValue() : 0,
1618                CalleeEntryCount.getCount());
1619   updateProfileCallee(Callee, -CallCount, &VMap);
1620 }
1621 
1622 void llvm::updateProfileCallee(
1623     Function *Callee, int64_t entryDelta,
1624     const ValueMap<const Value *, WeakTrackingVH> *VMap) {
1625   auto CalleeCount = Callee->getEntryCount();
1626   if (!CalleeCount.hasValue())
1627     return;
1628 
1629   uint64_t priorEntryCount = CalleeCount.getCount();
1630   uint64_t newEntryCount;
1631 
1632   // Since CallSiteCount is an estimate, it could exceed the original callee
1633   // count and has to be set to 0 so guard against underflow.
1634   if (entryDelta < 0 && static_cast<uint64_t>(-entryDelta) > priorEntryCount)
1635     newEntryCount = 0;
1636   else
1637     newEntryCount = priorEntryCount + entryDelta;
1638 
1639   // During inlining ?
1640   if (VMap) {
1641     uint64_t cloneEntryCount = priorEntryCount - newEntryCount;
1642     for (auto Entry : *VMap)
1643       if (isa<CallInst>(Entry.first))
1644         if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1645           CI->updateProfWeight(cloneEntryCount, priorEntryCount);
1646   }
1647 
1648   if (entryDelta) {
1649     Callee->setEntryCount(newEntryCount);
1650 
1651     for (BasicBlock &BB : *Callee)
1652       // No need to update the callsite if it is pruned during inlining.
1653       if (!VMap || VMap->count(&BB))
1654         for (Instruction &I : BB)
1655           if (CallInst *CI = dyn_cast<CallInst>(&I))
1656             CI->updateProfWeight(newEntryCount, priorEntryCount);
1657   }
1658 }
1659 
1660 /// This function inlines the called function into the basic block of the
1661 /// caller. This returns false if it is not possible to inline this call.
1662 /// The program is still in a well defined state if this occurs though.
1663 ///
1664 /// Note that this only does one level of inlining.  For example, if the
1665 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1666 /// exists in the instruction stream.  Similarly this will inline a recursive
1667 /// function by one level.
1668 llvm::InlineResult llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1669                                         AAResults *CalleeAAR,
1670                                         bool InsertLifetime,
1671                                         Function *ForwardVarArgsTo) {
1672   Instruction *TheCall = CS.getInstruction();
1673   assert(TheCall->getParent() && TheCall->getFunction()
1674          && "Instruction not in function!");
1675 
1676   // FIXME: we don't inline callbr yet.
1677   if (isa<CallBrInst>(TheCall))
1678     return InlineResult::failure("We don't inline callbr yet.");
1679 
1680   // If IFI has any state in it, zap it before we fill it in.
1681   IFI.reset();
1682 
1683   Function *CalledFunc = CS.getCalledFunction();
1684   if (!CalledFunc ||               // Can't inline external function or indirect
1685       CalledFunc->isDeclaration()) // call!
1686     return InlineResult::failure("external or indirect");
1687 
1688   // The inliner does not know how to inline through calls with operand bundles
1689   // in general ...
1690   if (CS.hasOperandBundles()) {
1691     for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1692       uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1693       // ... but it knows how to inline through "deopt" operand bundles ...
1694       if (Tag == LLVMContext::OB_deopt)
1695         continue;
1696       // ... and "funclet" operand bundles.
1697       if (Tag == LLVMContext::OB_funclet)
1698         continue;
1699 
1700       return InlineResult::failure("unsupported operand bundle");
1701     }
1702   }
1703 
1704   // If the call to the callee cannot throw, set the 'nounwind' flag on any
1705   // calls that we inline.
1706   bool MarkNoUnwind = CS.doesNotThrow();
1707 
1708   BasicBlock *OrigBB = TheCall->getParent();
1709   Function *Caller = OrigBB->getParent();
1710 
1711   // GC poses two hazards to inlining, which only occur when the callee has GC:
1712   //  1. If the caller has no GC, then the callee's GC must be propagated to the
1713   //     caller.
1714   //  2. If the caller has a differing GC, it is invalid to inline.
1715   if (CalledFunc->hasGC()) {
1716     if (!Caller->hasGC())
1717       Caller->setGC(CalledFunc->getGC());
1718     else if (CalledFunc->getGC() != Caller->getGC())
1719       return InlineResult::failure("incompatible GC");
1720   }
1721 
1722   // Get the personality function from the callee if it contains a landing pad.
1723   Constant *CalledPersonality =
1724       CalledFunc->hasPersonalityFn()
1725           ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1726           : nullptr;
1727 
1728   // Find the personality function used by the landing pads of the caller. If it
1729   // exists, then check to see that it matches the personality function used in
1730   // the callee.
1731   Constant *CallerPersonality =
1732       Caller->hasPersonalityFn()
1733           ? Caller->getPersonalityFn()->stripPointerCasts()
1734           : nullptr;
1735   if (CalledPersonality) {
1736     if (!CallerPersonality)
1737       Caller->setPersonalityFn(CalledPersonality);
1738     // If the personality functions match, then we can perform the
1739     // inlining. Otherwise, we can't inline.
1740     // TODO: This isn't 100% true. Some personality functions are proper
1741     //       supersets of others and can be used in place of the other.
1742     else if (CalledPersonality != CallerPersonality)
1743       return InlineResult::failure("incompatible personality");
1744   }
1745 
1746   // We need to figure out which funclet the callsite was in so that we may
1747   // properly nest the callee.
1748   Instruction *CallSiteEHPad = nullptr;
1749   if (CallerPersonality) {
1750     EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1751     if (isScopedEHPersonality(Personality)) {
1752       Optional<OperandBundleUse> ParentFunclet =
1753           CS.getOperandBundle(LLVMContext::OB_funclet);
1754       if (ParentFunclet)
1755         CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1756 
1757       // OK, the inlining site is legal.  What about the target function?
1758 
1759       if (CallSiteEHPad) {
1760         if (Personality == EHPersonality::MSVC_CXX) {
1761           // The MSVC personality cannot tolerate catches getting inlined into
1762           // cleanup funclets.
1763           if (isa<CleanupPadInst>(CallSiteEHPad)) {
1764             // Ok, the call site is within a cleanuppad.  Let's check the callee
1765             // for catchpads.
1766             for (const BasicBlock &CalledBB : *CalledFunc) {
1767               if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1768                 return InlineResult::failure("catch in cleanup funclet");
1769             }
1770           }
1771         } else if (isAsynchronousEHPersonality(Personality)) {
1772           // SEH is even less tolerant, there may not be any sort of exceptional
1773           // funclet in the callee.
1774           for (const BasicBlock &CalledBB : *CalledFunc) {
1775             if (CalledBB.isEHPad())
1776               return InlineResult::failure("SEH in cleanup funclet");
1777           }
1778         }
1779       }
1780     }
1781   }
1782 
1783   // Determine if we are dealing with a call in an EHPad which does not unwind
1784   // to caller.
1785   bool EHPadForCallUnwindsLocally = false;
1786   if (CallSiteEHPad && CS.isCall()) {
1787     UnwindDestMemoTy FuncletUnwindMap;
1788     Value *CallSiteUnwindDestToken =
1789         getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
1790 
1791     EHPadForCallUnwindsLocally =
1792         CallSiteUnwindDestToken &&
1793         !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
1794   }
1795 
1796   // Get an iterator to the last basic block in the function, which will have
1797   // the new function inlined after it.
1798   Function::iterator LastBlock = --Caller->end();
1799 
1800   // Make sure to capture all of the return instructions from the cloned
1801   // function.
1802   SmallVector<ReturnInst*, 8> Returns;
1803   ClonedCodeInfo InlinedFunctionInfo;
1804   Function::iterator FirstNewBlock;
1805 
1806   { // Scope to destroy VMap after cloning.
1807     ValueToValueMapTy VMap;
1808     // Keep a list of pair (dst, src) to emit byval initializations.
1809     SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1810 
1811     auto &DL = Caller->getParent()->getDataLayout();
1812 
1813     // Calculate the vector of arguments to pass into the function cloner, which
1814     // matches up the formal to the actual argument values.
1815     CallSite::arg_iterator AI = CS.arg_begin();
1816     unsigned ArgNo = 0;
1817     for (Function::arg_iterator I = CalledFunc->arg_begin(),
1818          E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1819       Value *ActualArg = *AI;
1820 
1821       // When byval arguments actually inlined, we need to make the copy implied
1822       // by them explicit.  However, we don't do this if the callee is readonly
1823       // or readnone, because the copy would be unneeded: the callee doesn't
1824       // modify the struct.
1825       if (CS.isByValArgument(ArgNo)) {
1826         ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1827                                         CalledFunc->getParamAlignment(ArgNo));
1828         if (ActualArg != *AI)
1829           ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1830       }
1831 
1832       VMap[&*I] = ActualArg;
1833     }
1834 
1835     // TODO: Remove this when users have been updated to the assume bundles.
1836     // Add alignment assumptions if necessary. We do this before the inlined
1837     // instructions are actually cloned into the caller so that we can easily
1838     // check what will be known at the start of the inlined code.
1839     AddAlignmentAssumptions(CS, IFI);
1840 
1841     /// Preserve all attributes on of the call and its parameters.
1842     if (Instruction *Assume = buildAssumeFromInst(CS.getInstruction()))
1843       Assume->insertBefore(CS.getInstruction());
1844 
1845     // We want the inliner to prune the code as it copies.  We would LOVE to
1846     // have no dead or constant instructions leftover after inlining occurs
1847     // (which can happen, e.g., because an argument was constant), but we'll be
1848     // happy with whatever the cloner can do.
1849     CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1850                               /*ModuleLevelChanges=*/false, Returns, ".i",
1851                               &InlinedFunctionInfo, TheCall);
1852     // Remember the first block that is newly cloned over.
1853     FirstNewBlock = LastBlock; ++FirstNewBlock;
1854 
1855     if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
1856       // Update the BFI of blocks cloned into the caller.
1857       updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
1858                       CalledFunc->front());
1859 
1860     updateCallProfile(CalledFunc, VMap, CalledFunc->getEntryCount(), TheCall,
1861                       IFI.PSI, IFI.CallerBFI);
1862 
1863     // Inject byval arguments initialization.
1864     for (std::pair<Value*, Value*> &Init : ByValInit)
1865       HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1866                               &*FirstNewBlock, IFI);
1867 
1868     Optional<OperandBundleUse> ParentDeopt =
1869         CS.getOperandBundle(LLVMContext::OB_deopt);
1870     if (ParentDeopt) {
1871       SmallVector<OperandBundleDef, 2> OpDefs;
1872 
1873       for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1874         Instruction *I = dyn_cast_or_null<Instruction>(VH);
1875         if (!I) continue;  // instruction was DCE'd or RAUW'ed to undef
1876 
1877         OpDefs.clear();
1878 
1879         CallSite ICS(I);
1880         OpDefs.reserve(ICS.getNumOperandBundles());
1881 
1882         for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1883           auto ChildOB = ICS.getOperandBundleAt(i);
1884           if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1885             // If the inlined call has other operand bundles, let them be
1886             OpDefs.emplace_back(ChildOB);
1887             continue;
1888           }
1889 
1890           // It may be useful to separate this logic (of handling operand
1891           // bundles) out to a separate "policy" component if this gets crowded.
1892           // Prepend the parent's deoptimization continuation to the newly
1893           // inlined call's deoptimization continuation.
1894           std::vector<Value *> MergedDeoptArgs;
1895           MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1896                                   ChildOB.Inputs.size());
1897 
1898           MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1899                                  ParentDeopt->Inputs.begin(),
1900                                  ParentDeopt->Inputs.end());
1901           MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1902                                  ChildOB.Inputs.end());
1903 
1904           OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1905         }
1906 
1907         Instruction *NewI = nullptr;
1908         if (isa<CallInst>(I))
1909           NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1910         else if (isa<CallBrInst>(I))
1911           NewI = CallBrInst::Create(cast<CallBrInst>(I), OpDefs, I);
1912         else
1913           NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1914 
1915         // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1916         // this even if the call returns void.
1917         I->replaceAllUsesWith(NewI);
1918 
1919         VH = nullptr;
1920         I->eraseFromParent();
1921       }
1922     }
1923 
1924     // Update the callgraph if requested.
1925     if (IFI.CG)
1926       UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1927 
1928     // For 'nodebug' functions, the associated DISubprogram is always null.
1929     // Conservatively avoid propagating the callsite debug location to
1930     // instructions inlined from a function whose DISubprogram is not null.
1931     fixupLineNumbers(Caller, FirstNewBlock, TheCall,
1932                      CalledFunc->getSubprogram() != nullptr);
1933 
1934     // Clone existing noalias metadata if necessary.
1935     CloneAliasScopeMetadata(CS, VMap);
1936 
1937     // Add noalias metadata if necessary.
1938     AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1939 
1940     // Clone return attributes on the callsite into the calls within the inlined
1941     // function which feed into its return value.
1942     AddReturnAttributes(CS, VMap);
1943 
1944     // Propagate llvm.mem.parallel_loop_access if necessary.
1945     PropagateParallelLoopAccessMetadata(CS, VMap);
1946 
1947     // Register any cloned assumptions.
1948     if (IFI.GetAssumptionCache)
1949       for (BasicBlock &NewBlock :
1950            make_range(FirstNewBlock->getIterator(), Caller->end()))
1951         for (Instruction &I : NewBlock) {
1952           if (auto *II = dyn_cast<IntrinsicInst>(&I))
1953             if (II->getIntrinsicID() == Intrinsic::assume)
1954               (*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
1955         }
1956   }
1957 
1958   // If there are any alloca instructions in the block that used to be the entry
1959   // block for the callee, move them to the entry block of the caller.  First
1960   // calculate which instruction they should be inserted before.  We insert the
1961   // instructions at the end of the current alloca list.
1962   {
1963     BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1964     for (BasicBlock::iterator I = FirstNewBlock->begin(),
1965          E = FirstNewBlock->end(); I != E; ) {
1966       AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1967       if (!AI) continue;
1968 
1969       // If the alloca is now dead, remove it.  This often occurs due to code
1970       // specialization.
1971       if (AI->use_empty()) {
1972         AI->eraseFromParent();
1973         continue;
1974       }
1975 
1976       if (!allocaWouldBeStaticInEntry(AI))
1977         continue;
1978 
1979       // Keep track of the static allocas that we inline into the caller.
1980       IFI.StaticAllocas.push_back(AI);
1981 
1982       // Scan for the block of allocas that we can move over, and move them
1983       // all at once.
1984       while (isa<AllocaInst>(I) &&
1985              !cast<AllocaInst>(I)->use_empty() &&
1986              allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
1987         IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1988         ++I;
1989       }
1990 
1991       // Transfer all of the allocas over in a block.  Using splice means
1992       // that the instructions aren't removed from the symbol table, then
1993       // reinserted.
1994       Caller->getEntryBlock().getInstList().splice(
1995           InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1996     }
1997   }
1998 
1999   SmallVector<Value*,4> VarArgsToForward;
2000   SmallVector<AttributeSet, 4> VarArgsAttrs;
2001   for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2002        i < CS.getNumArgOperands(); i++) {
2003     VarArgsToForward.push_back(CS.getArgOperand(i));
2004     VarArgsAttrs.push_back(CS.getAttributes().getParamAttributes(i));
2005   }
2006 
2007   bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2008   if (InlinedFunctionInfo.ContainsCalls) {
2009     CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2010     if (CallInst *CI = dyn_cast<CallInst>(TheCall))
2011       CallSiteTailKind = CI->getTailCallKind();
2012 
2013     // For inlining purposes, the "notail" marker is the same as no marker.
2014     if (CallSiteTailKind == CallInst::TCK_NoTail)
2015       CallSiteTailKind = CallInst::TCK_None;
2016 
2017     for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2018          ++BB) {
2019       for (auto II = BB->begin(); II != BB->end();) {
2020         Instruction &I = *II++;
2021         CallInst *CI = dyn_cast<CallInst>(&I);
2022         if (!CI)
2023           continue;
2024 
2025         // Forward varargs from inlined call site to calls to the
2026         // ForwardVarArgsTo function, if requested, and to musttail calls.
2027         if (!VarArgsToForward.empty() &&
2028             ((ForwardVarArgsTo &&
2029               CI->getCalledFunction() == ForwardVarArgsTo) ||
2030              CI->isMustTailCall())) {
2031           // Collect attributes for non-vararg parameters.
2032           AttributeList Attrs = CI->getAttributes();
2033           SmallVector<AttributeSet, 8> ArgAttrs;
2034           if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2035             for (unsigned ArgNo = 0;
2036                  ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2037               ArgAttrs.push_back(Attrs.getParamAttributes(ArgNo));
2038           }
2039 
2040           // Add VarArg attributes.
2041           ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2042           Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttributes(),
2043                                      Attrs.getRetAttributes(), ArgAttrs);
2044           // Add VarArgs to existing parameters.
2045           SmallVector<Value *, 6> Params(CI->arg_operands());
2046           Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2047           CallInst *NewCI = CallInst::Create(
2048               CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI);
2049           NewCI->setDebugLoc(CI->getDebugLoc());
2050           NewCI->setAttributes(Attrs);
2051           NewCI->setCallingConv(CI->getCallingConv());
2052           CI->replaceAllUsesWith(NewCI);
2053           CI->eraseFromParent();
2054           CI = NewCI;
2055         }
2056 
2057         if (Function *F = CI->getCalledFunction())
2058           InlinedDeoptimizeCalls |=
2059               F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2060 
2061         // We need to reduce the strength of any inlined tail calls.  For
2062         // musttail, we have to avoid introducing potential unbounded stack
2063         // growth.  For example, if functions 'f' and 'g' are mutually recursive
2064         // with musttail, we can inline 'g' into 'f' so long as we preserve
2065         // musttail on the cloned call to 'f'.  If either the inlined call site
2066         // or the cloned call site is *not* musttail, the program already has
2067         // one frame of stack growth, so it's safe to remove musttail.  Here is
2068         // a table of example transformations:
2069         //
2070         //    f -> musttail g -> musttail f  ==>  f -> musttail f
2071         //    f -> musttail g ->     tail f  ==>  f ->     tail f
2072         //    f ->          g -> musttail f  ==>  f ->          f
2073         //    f ->          g ->     tail f  ==>  f ->          f
2074         //
2075         // Inlined notail calls should remain notail calls.
2076         CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2077         if (ChildTCK != CallInst::TCK_NoTail)
2078           ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2079         CI->setTailCallKind(ChildTCK);
2080         InlinedMustTailCalls |= CI->isMustTailCall();
2081 
2082         // Calls inlined through a 'nounwind' call site should be marked
2083         // 'nounwind'.
2084         if (MarkNoUnwind)
2085           CI->setDoesNotThrow();
2086       }
2087     }
2088   }
2089 
2090   // Leave lifetime markers for the static alloca's, scoping them to the
2091   // function we just inlined.
2092   if (InsertLifetime && !IFI.StaticAllocas.empty()) {
2093     IRBuilder<> builder(&FirstNewBlock->front());
2094     for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2095       AllocaInst *AI = IFI.StaticAllocas[ai];
2096       // Don't mark swifterror allocas. They can't have bitcast uses.
2097       if (AI->isSwiftError())
2098         continue;
2099 
2100       // If the alloca is already scoped to something smaller than the whole
2101       // function then there's no need to add redundant, less accurate markers.
2102       if (hasLifetimeMarkers(AI))
2103         continue;
2104 
2105       // Try to determine the size of the allocation.
2106       ConstantInt *AllocaSize = nullptr;
2107       if (ConstantInt *AIArraySize =
2108           dyn_cast<ConstantInt>(AI->getArraySize())) {
2109         auto &DL = Caller->getParent()->getDataLayout();
2110         Type *AllocaType = AI->getAllocatedType();
2111         uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2112         uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2113 
2114         // Don't add markers for zero-sized allocas.
2115         if (AllocaArraySize == 0)
2116           continue;
2117 
2118         // Check that array size doesn't saturate uint64_t and doesn't
2119         // overflow when it's multiplied by type size.
2120         if (AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2121             std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2122                 AllocaTypeSize) {
2123           AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2124                                         AllocaArraySize * AllocaTypeSize);
2125         }
2126       }
2127 
2128       builder.CreateLifetimeStart(AI, AllocaSize);
2129       for (ReturnInst *RI : Returns) {
2130         // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2131         // call and a return.  The return kills all local allocas.
2132         if (InlinedMustTailCalls &&
2133             RI->getParent()->getTerminatingMustTailCall())
2134           continue;
2135         if (InlinedDeoptimizeCalls &&
2136             RI->getParent()->getTerminatingDeoptimizeCall())
2137           continue;
2138         IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2139       }
2140     }
2141   }
2142 
2143   // If the inlined code contained dynamic alloca instructions, wrap the inlined
2144   // code with llvm.stacksave/llvm.stackrestore intrinsics.
2145   if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2146     Module *M = Caller->getParent();
2147     // Get the two intrinsics we care about.
2148     Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
2149     Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
2150 
2151     // Insert the llvm.stacksave.
2152     CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2153                              .CreateCall(StackSave, {}, "savedstack");
2154 
2155     // Insert a call to llvm.stackrestore before any return instructions in the
2156     // inlined function.
2157     for (ReturnInst *RI : Returns) {
2158       // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2159       // call and a return.  The return will restore the stack pointer.
2160       if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2161         continue;
2162       if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2163         continue;
2164       IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
2165     }
2166   }
2167 
2168   // If we are inlining for an invoke instruction, we must make sure to rewrite
2169   // any call instructions into invoke instructions.  This is sensitive to which
2170   // funclet pads were top-level in the inlinee, so must be done before
2171   // rewriting the "parent pad" links.
2172   if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
2173     BasicBlock *UnwindDest = II->getUnwindDest();
2174     Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2175     if (isa<LandingPadInst>(FirstNonPHI)) {
2176       HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2177     } else {
2178       HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2179     }
2180   }
2181 
2182   // Update the lexical scopes of the new funclets and callsites.
2183   // Anything that had 'none' as its parent is now nested inside the callsite's
2184   // EHPad.
2185 
2186   if (CallSiteEHPad) {
2187     for (Function::iterator BB = FirstNewBlock->getIterator(),
2188                             E = Caller->end();
2189          BB != E; ++BB) {
2190       // Add bundle operands to any top-level call sites.
2191       SmallVector<OperandBundleDef, 1> OpBundles;
2192       for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
2193         Instruction *I = &*BBI++;
2194         CallSite CS(I);
2195         if (!CS)
2196           continue;
2197 
2198         // Skip call sites which are nounwind intrinsics.
2199         auto *CalledFn =
2200             dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2201         if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
2202           continue;
2203 
2204         // Skip call sites which already have a "funclet" bundle.
2205         if (CS.getOperandBundle(LLVMContext::OB_funclet))
2206           continue;
2207 
2208         CS.getOperandBundlesAsDefs(OpBundles);
2209         OpBundles.emplace_back("funclet", CallSiteEHPad);
2210 
2211         Instruction *NewInst;
2212         if (CS.isCall())
2213           NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
2214         else if (CS.isCallBr())
2215           NewInst = CallBrInst::Create(cast<CallBrInst>(I), OpBundles, I);
2216         else
2217           NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
2218         NewInst->takeName(I);
2219         I->replaceAllUsesWith(NewInst);
2220         I->eraseFromParent();
2221 
2222         OpBundles.clear();
2223       }
2224 
2225       // It is problematic if the inlinee has a cleanupret which unwinds to
2226       // caller and we inline it into a call site which doesn't unwind but into
2227       // an EH pad that does.  Such an edge must be dynamically unreachable.
2228       // As such, we replace the cleanupret with unreachable.
2229       if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2230         if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2231           changeToUnreachable(CleanupRet, /*UseLLVMTrap=*/false);
2232 
2233       Instruction *I = BB->getFirstNonPHI();
2234       if (!I->isEHPad())
2235         continue;
2236 
2237       if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2238         if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2239           CatchSwitch->setParentPad(CallSiteEHPad);
2240       } else {
2241         auto *FPI = cast<FuncletPadInst>(I);
2242         if (isa<ConstantTokenNone>(FPI->getParentPad()))
2243           FPI->setParentPad(CallSiteEHPad);
2244       }
2245     }
2246   }
2247 
2248   if (InlinedDeoptimizeCalls) {
2249     // We need to at least remove the deoptimizing returns from the Return set,
2250     // so that the control flow from those returns does not get merged into the
2251     // caller (but terminate it instead).  If the caller's return type does not
2252     // match the callee's return type, we also need to change the return type of
2253     // the intrinsic.
2254     if (Caller->getReturnType() == TheCall->getType()) {
2255       auto NewEnd = llvm::remove_if(Returns, [](ReturnInst *RI) {
2256         return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2257       });
2258       Returns.erase(NewEnd, Returns.end());
2259     } else {
2260       SmallVector<ReturnInst *, 8> NormalReturns;
2261       Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2262           Caller->getParent(), Intrinsic::experimental_deoptimize,
2263           {Caller->getReturnType()});
2264 
2265       for (ReturnInst *RI : Returns) {
2266         CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2267         if (!DeoptCall) {
2268           NormalReturns.push_back(RI);
2269           continue;
2270         }
2271 
2272         // The calling convention on the deoptimize call itself may be bogus,
2273         // since the code we're inlining may have undefined behavior (and may
2274         // never actually execute at runtime); but all
2275         // @llvm.experimental.deoptimize declarations have to have the same
2276         // calling convention in a well-formed module.
2277         auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2278         NewDeoptIntrinsic->setCallingConv(CallingConv);
2279         auto *CurBB = RI->getParent();
2280         RI->eraseFromParent();
2281 
2282         SmallVector<Value *, 4> CallArgs(DeoptCall->arg_begin(),
2283                                          DeoptCall->arg_end());
2284 
2285         SmallVector<OperandBundleDef, 1> OpBundles;
2286         DeoptCall->getOperandBundlesAsDefs(OpBundles);
2287         DeoptCall->eraseFromParent();
2288         assert(!OpBundles.empty() &&
2289                "Expected at least the deopt operand bundle");
2290 
2291         IRBuilder<> Builder(CurBB);
2292         CallInst *NewDeoptCall =
2293             Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2294         NewDeoptCall->setCallingConv(CallingConv);
2295         if (NewDeoptCall->getType()->isVoidTy())
2296           Builder.CreateRetVoid();
2297         else
2298           Builder.CreateRet(NewDeoptCall);
2299       }
2300 
2301       // Leave behind the normal returns so we can merge control flow.
2302       std::swap(Returns, NormalReturns);
2303     }
2304   }
2305 
2306   // Handle any inlined musttail call sites.  In order for a new call site to be
2307   // musttail, the source of the clone and the inlined call site must have been
2308   // musttail.  Therefore it's safe to return without merging control into the
2309   // phi below.
2310   if (InlinedMustTailCalls) {
2311     // Check if we need to bitcast the result of any musttail calls.
2312     Type *NewRetTy = Caller->getReturnType();
2313     bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
2314 
2315     // Handle the returns preceded by musttail calls separately.
2316     SmallVector<ReturnInst *, 8> NormalReturns;
2317     for (ReturnInst *RI : Returns) {
2318       CallInst *ReturnedMustTail =
2319           RI->getParent()->getTerminatingMustTailCall();
2320       if (!ReturnedMustTail) {
2321         NormalReturns.push_back(RI);
2322         continue;
2323       }
2324       if (!NeedBitCast)
2325         continue;
2326 
2327       // Delete the old return and any preceding bitcast.
2328       BasicBlock *CurBB = RI->getParent();
2329       auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2330       RI->eraseFromParent();
2331       if (OldCast)
2332         OldCast->eraseFromParent();
2333 
2334       // Insert a new bitcast and return with the right type.
2335       IRBuilder<> Builder(CurBB);
2336       Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2337     }
2338 
2339     // Leave behind the normal returns so we can merge control flow.
2340     std::swap(Returns, NormalReturns);
2341   }
2342 
2343   // Now that all of the transforms on the inlined code have taken place but
2344   // before we splice the inlined code into the CFG and lose track of which
2345   // blocks were actually inlined, collect the call sites. We only do this if
2346   // call graph updates weren't requested, as those provide value handle based
2347   // tracking of inlined call sites instead.
2348   if (InlinedFunctionInfo.ContainsCalls && !IFI.CG) {
2349     // Otherwise just collect the raw call sites that were inlined.
2350     for (BasicBlock &NewBB :
2351          make_range(FirstNewBlock->getIterator(), Caller->end()))
2352       for (Instruction &I : NewBB)
2353         if (auto CS = CallSite(&I))
2354           IFI.InlinedCallSites.push_back(CS);
2355   }
2356 
2357   // If we cloned in _exactly one_ basic block, and if that block ends in a
2358   // return instruction, we splice the body of the inlined callee directly into
2359   // the calling basic block.
2360   if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2361     // Move all of the instructions right before the call.
2362     OrigBB->getInstList().splice(TheCall->getIterator(),
2363                                  FirstNewBlock->getInstList(),
2364                                  FirstNewBlock->begin(), FirstNewBlock->end());
2365     // Remove the cloned basic block.
2366     Caller->getBasicBlockList().pop_back();
2367 
2368     // If the call site was an invoke instruction, add a branch to the normal
2369     // destination.
2370     if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2371       BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
2372       NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2373     }
2374 
2375     // If the return instruction returned a value, replace uses of the call with
2376     // uses of the returned value.
2377     if (!TheCall->use_empty()) {
2378       ReturnInst *R = Returns[0];
2379       if (TheCall == R->getReturnValue())
2380         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2381       else
2382         TheCall->replaceAllUsesWith(R->getReturnValue());
2383     }
2384     // Since we are now done with the Call/Invoke, we can delete it.
2385     TheCall->eraseFromParent();
2386 
2387     // Since we are now done with the return instruction, delete it also.
2388     Returns[0]->eraseFromParent();
2389 
2390     // We are now done with the inlining.
2391     return InlineResult::success();
2392   }
2393 
2394   // Otherwise, we have the normal case, of more than one block to inline or
2395   // multiple return sites.
2396 
2397   // We want to clone the entire callee function into the hole between the
2398   // "starter" and "ender" blocks.  How we accomplish this depends on whether
2399   // this is an invoke instruction or a call instruction.
2400   BasicBlock *AfterCallBB;
2401   BranchInst *CreatedBranchToNormalDest = nullptr;
2402   if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
2403 
2404     // Add an unconditional branch to make this look like the CallInst case...
2405     CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
2406 
2407     // Split the basic block.  This guarantees that no PHI nodes will have to be
2408     // updated due to new incoming edges, and make the invoke case more
2409     // symmetric to the call case.
2410     AfterCallBB =
2411         OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2412                                 CalledFunc->getName() + ".exit");
2413 
2414   } else {  // It's a call
2415     // If this is a call instruction, we need to split the basic block that
2416     // the call lives in.
2417     //
2418     AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
2419                                           CalledFunc->getName() + ".exit");
2420   }
2421 
2422   if (IFI.CallerBFI) {
2423     // Copy original BB's block frequency to AfterCallBB
2424     IFI.CallerBFI->setBlockFreq(
2425         AfterCallBB, IFI.CallerBFI->getBlockFreq(OrigBB).getFrequency());
2426   }
2427 
2428   // Change the branch that used to go to AfterCallBB to branch to the first
2429   // basic block of the inlined function.
2430   //
2431   Instruction *Br = OrigBB->getTerminator();
2432   assert(Br && Br->getOpcode() == Instruction::Br &&
2433          "splitBasicBlock broken!");
2434   Br->setOperand(0, &*FirstNewBlock);
2435 
2436   // Now that the function is correct, make it a little bit nicer.  In
2437   // particular, move the basic blocks inserted from the end of the function
2438   // into the space made by splitting the source basic block.
2439   Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
2440                                      Caller->getBasicBlockList(), FirstNewBlock,
2441                                      Caller->end());
2442 
2443   // Handle all of the return instructions that we just cloned in, and eliminate
2444   // any users of the original call/invoke instruction.
2445   Type *RTy = CalledFunc->getReturnType();
2446 
2447   PHINode *PHI = nullptr;
2448   if (Returns.size() > 1) {
2449     // The PHI node should go at the front of the new basic block to merge all
2450     // possible incoming values.
2451     if (!TheCall->use_empty()) {
2452       PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
2453                             &AfterCallBB->front());
2454       // Anything that used the result of the function call should now use the
2455       // PHI node as their operand.
2456       TheCall->replaceAllUsesWith(PHI);
2457     }
2458 
2459     // Loop over all of the return instructions adding entries to the PHI node
2460     // as appropriate.
2461     if (PHI) {
2462       for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2463         ReturnInst *RI = Returns[i];
2464         assert(RI->getReturnValue()->getType() == PHI->getType() &&
2465                "Ret value not consistent in function!");
2466         PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2467       }
2468     }
2469 
2470     // Add a branch to the merge points and remove return instructions.
2471     DebugLoc Loc;
2472     for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2473       ReturnInst *RI = Returns[i];
2474       BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
2475       Loc = RI->getDebugLoc();
2476       BI->setDebugLoc(Loc);
2477       RI->eraseFromParent();
2478     }
2479     // We need to set the debug location to *somewhere* inside the
2480     // inlined function. The line number may be nonsensical, but the
2481     // instruction will at least be associated with the right
2482     // function.
2483     if (CreatedBranchToNormalDest)
2484       CreatedBranchToNormalDest->setDebugLoc(Loc);
2485   } else if (!Returns.empty()) {
2486     // Otherwise, if there is exactly one return value, just replace anything
2487     // using the return value of the call with the computed value.
2488     if (!TheCall->use_empty()) {
2489       if (TheCall == Returns[0]->getReturnValue())
2490         TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2491       else
2492         TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
2493     }
2494 
2495     // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2496     BasicBlock *ReturnBB = Returns[0]->getParent();
2497     ReturnBB->replaceAllUsesWith(AfterCallBB);
2498 
2499     // Splice the code from the return block into the block that it will return
2500     // to, which contains the code that was after the call.
2501     AfterCallBB->getInstList().splice(AfterCallBB->begin(),
2502                                       ReturnBB->getInstList());
2503 
2504     if (CreatedBranchToNormalDest)
2505       CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2506 
2507     // Delete the return instruction now and empty ReturnBB now.
2508     Returns[0]->eraseFromParent();
2509     ReturnBB->eraseFromParent();
2510   } else if (!TheCall->use_empty()) {
2511     // No returns, but something is using the return value of the call.  Just
2512     // nuke the result.
2513     TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
2514   }
2515 
2516   // Since we are now done with the Call/Invoke, we can delete it.
2517   TheCall->eraseFromParent();
2518 
2519   // If we inlined any musttail calls and the original return is now
2520   // unreachable, delete it.  It can only contain a bitcast and ret.
2521   if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
2522     AfterCallBB->eraseFromParent();
2523 
2524   // We should always be able to fold the entry block of the function into the
2525   // single predecessor of the block...
2526   assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
2527   BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2528 
2529   // Splice the code entry block into calling block, right before the
2530   // unconditional branch.
2531   CalleeEntry->replaceAllUsesWith(OrigBB);  // Update PHI nodes
2532   OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
2533 
2534   // Remove the unconditional branch.
2535   OrigBB->getInstList().erase(Br);
2536 
2537   // Now we can remove the CalleeEntry block, which is now empty.
2538   Caller->getBasicBlockList().erase(CalleeEntry);
2539 
2540   // If we inserted a phi node, check to see if it has a single value (e.g. all
2541   // the entries are the same or undef).  If so, remove the PHI so it doesn't
2542   // block other optimizations.
2543   if (PHI) {
2544     AssumptionCache *AC =
2545         IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
2546     auto &DL = Caller->getParent()->getDataLayout();
2547     if (Value *V = SimplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
2548       PHI->replaceAllUsesWith(V);
2549       PHI->eraseFromParent();
2550     }
2551   }
2552 
2553   return InlineResult::success();
2554 }
2555