1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "inline-cost"
49 
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51 
52 static cl::opt<int>
53     DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54                      cl::ZeroOrMore,
55                      cl::desc("Default amount of inlining to perform"));
56 
57 static cl::opt<bool> PrintDebugInstructionDeltas(
58     "print-instruction-deltas", cl::Hidden, cl::init(false),
59     cl::desc("Prints deltas of cost and threshold per instruction"));
60 
61 static cl::opt<int> InlineThreshold(
62     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63     cl::desc("Control the amount of inlining to perform (default = 225)"));
64 
65 static cl::opt<int> HintThreshold(
66     "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67     cl::desc("Threshold for inlining functions with inline hint"));
68 
69 static cl::opt<int>
70     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71                           cl::init(45), cl::ZeroOrMore,
72                           cl::desc("Threshold for inlining cold callsites"));
73 
74 // We introduce this threshold to help performance of instrumentation based
75 // PGO before we actually hook up inliner with analysis passes such as BPI and
76 // BFI.
77 static cl::opt<int> ColdThreshold(
78     "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
79     cl::desc("Threshold for inlining functions with cold attribute"));
80 
81 static cl::opt<int>
82     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
83                          cl::ZeroOrMore,
84                          cl::desc("Threshold for hot callsites "));
85 
86 static cl::opt<int> LocallyHotCallSiteThreshold(
87     "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
88     cl::desc("Threshold for locally hot callsites "));
89 
90 static cl::opt<int> ColdCallSiteRelFreq(
91     "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
92     cl::desc("Maximum block frequency, expressed as a percentage of caller's "
93              "entry frequency, for a callsite to be cold in the absence of "
94              "profile information."));
95 
96 static cl::opt<int> HotCallSiteRelFreq(
97     "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
98     cl::desc("Minimum block frequency, expressed as a multiple of caller's "
99              "entry frequency, for a callsite to be hot in the absence of "
100              "profile information."));
101 
102 static cl::opt<bool> OptComputeFullInlineCost(
103     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
104     cl::desc("Compute the full inline cost of a call site even when the cost "
105              "exceeds the threshold."));
106 
107 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
108     "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
109     cl::ZeroOrMore,
110     cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
111              "attributes."));
112 
113 namespace {
114 class InlineCostCallAnalyzer;
115 
116 // This struct is used to store information about inline cost of a
117 // particular instruction
118 struct InstructionCostDetail {
119   int CostBefore = 0;
120   int CostAfter = 0;
121   int ThresholdBefore = 0;
122   int ThresholdAfter = 0;
123 
124   int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
125 
126   int getCostDelta() const { return CostAfter - CostBefore; }
127 
128   bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
129 };
130 
131 class CostAnnotationWriter : public AssemblyAnnotationWriter {
132 public:
133   // This DenseMap stores the delta change in cost and threshold after
134   // accounting for the given instruction.
135   DenseMap<const Instruction *, InstructionCostDetail> CostThresholdMap;
136 
137   virtual void emitInstructionAnnot(const Instruction *I,
138                                     formatted_raw_ostream &OS);
139 };
140 
141 /// Carry out call site analysis, in order to evaluate inlinability.
142 /// NOTE: the type is currently used as implementation detail of functions such
143 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
144 /// expectation is that they come from the outer scope, from the wrapper
145 /// functions. If we want to support constructing CallAnalyzer objects where
146 /// lambdas are provided inline at construction, or where the object needs to
147 /// otherwise survive past the scope of the provided functions, we need to
148 /// revisit the argument types.
149 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
150   typedef InstVisitor<CallAnalyzer, bool> Base;
151   friend class InstVisitor<CallAnalyzer, bool>;
152 
153 protected:
154   virtual ~CallAnalyzer() {}
155   /// The TargetTransformInfo available for this compilation.
156   const TargetTransformInfo &TTI;
157 
158   /// Getter for the cache of @llvm.assume intrinsics.
159   function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
160 
161   /// Getter for BlockFrequencyInfo
162   function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
163 
164   /// Profile summary information.
165   ProfileSummaryInfo *PSI;
166 
167   /// The called function.
168   Function &F;
169 
170   // Cache the DataLayout since we use it a lot.
171   const DataLayout &DL;
172 
173   /// The OptimizationRemarkEmitter available for this compilation.
174   OptimizationRemarkEmitter *ORE;
175 
176   /// The candidate callsite being analyzed. Please do not use this to do
177   /// analysis in the caller function; we want the inline cost query to be
178   /// easily cacheable. Instead, use the cover function paramHasAttr.
179   CallBase &CandidateCall;
180 
181   /// Extension points for handling callsite features.
182   /// Called after a basic block was analyzed.
183   virtual void onBlockAnalyzed(const BasicBlock *BB) {}
184 
185   /// Called before an instruction was analyzed
186   virtual void onInstructionAnalysisStart(const Instruction *I) {}
187 
188   /// Called after an instruction was analyzed
189   virtual void onInstructionAnalysisFinish(const Instruction *I) {}
190 
191   /// Called at the end of the analysis of the callsite. Return the outcome of
192   /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
193   /// the reason it can't.
194   virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
195   /// Called when we're about to start processing a basic block, and every time
196   /// we are done processing an instruction. Return true if there is no point in
197   /// continuing the analysis (e.g. we've determined already the call site is
198   /// too expensive to inline)
199   virtual bool shouldStop() { return false; }
200 
201   /// Called before the analysis of the callee body starts (with callsite
202   /// contexts propagated).  It checks callsite-specific information. Return a
203   /// reason analysis can't continue if that's the case, or 'true' if it may
204   /// continue.
205   virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
206   /// Called if the analysis engine decides SROA cannot be done for the given
207   /// alloca.
208   virtual void onDisableSROA(AllocaInst *Arg) {}
209 
210   /// Called the analysis engine determines load elimination won't happen.
211   virtual void onDisableLoadElimination() {}
212 
213   /// Called to account for a call.
214   virtual void onCallPenalty() {}
215 
216   /// Called to account for the expectation the inlining would result in a load
217   /// elimination.
218   virtual void onLoadEliminationOpportunity() {}
219 
220   /// Called to account for the cost of argument setup for the Call in the
221   /// callee's body (not the callsite currently under analysis).
222   virtual void onCallArgumentSetup(const CallBase &Call) {}
223 
224   /// Called to account for a load relative intrinsic.
225   virtual void onLoadRelativeIntrinsic() {}
226 
227   /// Called to account for a lowered call.
228   virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
229   }
230 
231   /// Account for a jump table of given size. Return false to stop further
232   /// processing the switch instruction
233   virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
234 
235   /// Account for a case cluster of given size. Return false to stop further
236   /// processing of the instruction.
237   virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
238 
239   /// Called at the end of processing a switch instruction, with the given
240   /// number of case clusters.
241   virtual void onFinalizeSwitch(unsigned JumpTableSize,
242                                 unsigned NumCaseCluster) {}
243 
244   /// Called to account for any other instruction not specifically accounted
245   /// for.
246   virtual void onMissedSimplification() {}
247 
248   /// Start accounting potential benefits due to SROA for the given alloca.
249   virtual void onInitializeSROAArg(AllocaInst *Arg) {}
250 
251   /// Account SROA savings for the AllocaInst value.
252   virtual void onAggregateSROAUse(AllocaInst *V) {}
253 
254   bool handleSROA(Value *V, bool DoNotDisable) {
255     // Check for SROA candidates in comparisons.
256     if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
257       if (DoNotDisable) {
258         onAggregateSROAUse(SROAArg);
259         return true;
260       }
261       disableSROAForArg(SROAArg);
262     }
263     return false;
264   }
265 
266   bool IsCallerRecursive = false;
267   bool IsRecursiveCall = false;
268   bool ExposesReturnsTwice = false;
269   bool HasDynamicAlloca = false;
270   bool ContainsNoDuplicateCall = false;
271   bool HasReturn = false;
272   bool HasIndirectBr = false;
273   bool HasUninlineableIntrinsic = false;
274   bool InitsVargArgs = false;
275 
276   /// Number of bytes allocated statically by the callee.
277   uint64_t AllocatedSize = 0;
278   unsigned NumInstructions = 0;
279   unsigned NumVectorInstructions = 0;
280 
281   /// While we walk the potentially-inlined instructions, we build up and
282   /// maintain a mapping of simplified values specific to this callsite. The
283   /// idea is to propagate any special information we have about arguments to
284   /// this call through the inlinable section of the function, and account for
285   /// likely simplifications post-inlining. The most important aspect we track
286   /// is CFG altering simplifications -- when we prove a basic block dead, that
287   /// can cause dramatic shifts in the cost of inlining a function.
288   DenseMap<Value *, Constant *> SimplifiedValues;
289 
290   /// Keep track of the values which map back (through function arguments) to
291   /// allocas on the caller stack which could be simplified through SROA.
292   DenseMap<Value *, AllocaInst *> SROAArgValues;
293 
294   /// Keep track of Allocas for which we believe we may get SROA optimization.
295   DenseSet<AllocaInst *> EnabledSROAAllocas;
296 
297   /// Keep track of values which map to a pointer base and constant offset.
298   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
299 
300   /// Keep track of dead blocks due to the constant arguments.
301   SetVector<BasicBlock *> DeadBlocks;
302 
303   /// The mapping of the blocks to their known unique successors due to the
304   /// constant arguments.
305   DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
306 
307   /// Model the elimination of repeated loads that is expected to happen
308   /// whenever we simplify away the stores that would otherwise cause them to be
309   /// loads.
310   bool EnableLoadElimination;
311   SmallPtrSet<Value *, 16> LoadAddrSet;
312 
313   AllocaInst *getSROAArgForValueOrNull(Value *V) const {
314     auto It = SROAArgValues.find(V);
315     if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
316       return nullptr;
317     return It->second;
318   }
319 
320   // Custom simplification helper routines.
321   bool isAllocaDerivedArg(Value *V);
322   void disableSROAForArg(AllocaInst *SROAArg);
323   void disableSROA(Value *V);
324   void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
325   void disableLoadElimination();
326   bool isGEPFree(GetElementPtrInst &GEP);
327   bool canFoldInboundsGEP(GetElementPtrInst &I);
328   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
329   bool simplifyCallSite(Function *F, CallBase &Call);
330   template <typename Callable>
331   bool simplifyInstruction(Instruction &I, Callable Evaluate);
332   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
333 
334   /// Return true if the given argument to the function being considered for
335   /// inlining has the given attribute set either at the call site or the
336   /// function declaration.  Primarily used to inspect call site specific
337   /// attributes since these can be more precise than the ones on the callee
338   /// itself.
339   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
340 
341   /// Return true if the given value is known non null within the callee if
342   /// inlined through this particular callsite.
343   bool isKnownNonNullInCallee(Value *V);
344 
345   /// Return true if size growth is allowed when inlining the callee at \p Call.
346   bool allowSizeGrowth(CallBase &Call);
347 
348   // Custom analysis routines.
349   InlineResult analyzeBlock(BasicBlock *BB,
350                             SmallPtrSetImpl<const Value *> &EphValues);
351 
352   // Disable several entry points to the visitor so we don't accidentally use
353   // them by declaring but not defining them here.
354   void visit(Module *);
355   void visit(Module &);
356   void visit(Function *);
357   void visit(Function &);
358   void visit(BasicBlock *);
359   void visit(BasicBlock &);
360 
361   // Provide base case for our instruction visit.
362   bool visitInstruction(Instruction &I);
363 
364   // Our visit overrides.
365   bool visitAlloca(AllocaInst &I);
366   bool visitPHI(PHINode &I);
367   bool visitGetElementPtr(GetElementPtrInst &I);
368   bool visitBitCast(BitCastInst &I);
369   bool visitPtrToInt(PtrToIntInst &I);
370   bool visitIntToPtr(IntToPtrInst &I);
371   bool visitCastInst(CastInst &I);
372   bool visitUnaryInstruction(UnaryInstruction &I);
373   bool visitCmpInst(CmpInst &I);
374   bool visitSub(BinaryOperator &I);
375   bool visitBinaryOperator(BinaryOperator &I);
376   bool visitFNeg(UnaryOperator &I);
377   bool visitLoad(LoadInst &I);
378   bool visitStore(StoreInst &I);
379   bool visitExtractValue(ExtractValueInst &I);
380   bool visitInsertValue(InsertValueInst &I);
381   bool visitCallBase(CallBase &Call);
382   bool visitReturnInst(ReturnInst &RI);
383   bool visitBranchInst(BranchInst &BI);
384   bool visitSelectInst(SelectInst &SI);
385   bool visitSwitchInst(SwitchInst &SI);
386   bool visitIndirectBrInst(IndirectBrInst &IBI);
387   bool visitResumeInst(ResumeInst &RI);
388   bool visitCleanupReturnInst(CleanupReturnInst &RI);
389   bool visitCatchReturnInst(CatchReturnInst &RI);
390   bool visitUnreachableInst(UnreachableInst &I);
391 
392 public:
393   CallAnalyzer(
394       Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
395       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
396       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
397       ProfileSummaryInfo *PSI = nullptr,
398       OptimizationRemarkEmitter *ORE = nullptr)
399       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
400         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
401         CandidateCall(Call), EnableLoadElimination(true) {}
402 
403   InlineResult analyze();
404 
405   // Keep a bunch of stats about the cost savings found so we can print them
406   // out when debugging.
407   unsigned NumConstantArgs = 0;
408   unsigned NumConstantOffsetPtrArgs = 0;
409   unsigned NumAllocaArgs = 0;
410   unsigned NumConstantPtrCmps = 0;
411   unsigned NumConstantPtrDiffs = 0;
412   unsigned NumInstructionsSimplified = 0;
413 
414   void dump();
415 };
416 
417 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
418 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
419 class InlineCostCallAnalyzer final : public CallAnalyzer {
420   const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
421   const bool ComputeFullInlineCost;
422   int LoadEliminationCost = 0;
423   /// Bonus to be applied when percentage of vector instructions in callee is
424   /// high (see more details in updateThreshold).
425   int VectorBonus = 0;
426   /// Bonus to be applied when the callee has only one reachable basic block.
427   int SingleBBBonus = 0;
428 
429   /// Tunable parameters that control the analysis.
430   const InlineParams &Params;
431 
432   /// Upper bound for the inlining cost. Bonuses are being applied to account
433   /// for speculative "expected profit" of the inlining decision.
434   int Threshold = 0;
435 
436   /// Attempt to evaluate indirect calls to boost its inline cost.
437   const bool BoostIndirectCalls;
438 
439   /// Ignore the threshold when finalizing analysis.
440   const bool IgnoreThreshold;
441 
442   /// Inlining cost measured in abstract units, accounts for all the
443   /// instructions expected to be executed for a given function invocation.
444   /// Instructions that are statically proven to be dead based on call-site
445   /// arguments are not counted here.
446   int Cost = 0;
447 
448   bool SingleBB = true;
449 
450   unsigned SROACostSavings = 0;
451   unsigned SROACostSavingsLost = 0;
452 
453   /// The mapping of caller Alloca values to their accumulated cost savings. If
454   /// we have to disable SROA for one of the allocas, this tells us how much
455   /// cost must be added.
456   DenseMap<AllocaInst *, int> SROAArgCosts;
457 
458   /// Return true if \p Call is a cold callsite.
459   bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
460 
461   /// Update Threshold based on callsite properties such as callee
462   /// attributes and callee hotness for PGO builds. The Callee is explicitly
463   /// passed to support analyzing indirect calls whose target is inferred by
464   /// analysis.
465   void updateThreshold(CallBase &Call, Function &Callee);
466   /// Return a higher threshold if \p Call is a hot callsite.
467   Optional<int> getHotCallSiteThreshold(CallBase &Call,
468                                         BlockFrequencyInfo *CallerBFI);
469 
470   /// Handle a capped 'int' increment for Cost.
471   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
472     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
473     Cost = (int)std::min(UpperBound, Cost + Inc);
474   }
475 
476   void onDisableSROA(AllocaInst *Arg) override {
477     auto CostIt = SROAArgCosts.find(Arg);
478     if (CostIt == SROAArgCosts.end())
479       return;
480     addCost(CostIt->second);
481     SROACostSavings -= CostIt->second;
482     SROACostSavingsLost += CostIt->second;
483     SROAArgCosts.erase(CostIt);
484   }
485 
486   void onDisableLoadElimination() override {
487     addCost(LoadEliminationCost);
488     LoadEliminationCost = 0;
489   }
490   void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
491   void onCallArgumentSetup(const CallBase &Call) override {
492     // Pay the price of the argument setup. We account for the average 1
493     // instruction per call argument setup here.
494     addCost(Call.arg_size() * InlineConstants::InstrCost);
495   }
496   void onLoadRelativeIntrinsic() override {
497     // This is normally lowered to 4 LLVM instructions.
498     addCost(3 * InlineConstants::InstrCost);
499   }
500   void onLoweredCall(Function *F, CallBase &Call,
501                      bool IsIndirectCall) override {
502     // We account for the average 1 instruction per call argument setup here.
503     addCost(Call.arg_size() * InlineConstants::InstrCost);
504 
505     // If we have a constant that we are calling as a function, we can peer
506     // through it and see the function target. This happens not infrequently
507     // during devirtualization and so we want to give it a hefty bonus for
508     // inlining, but cap that bonus in the event that inlining wouldn't pan out.
509     // Pretend to inline the function, with a custom threshold.
510     if (IsIndirectCall && BoostIndirectCalls) {
511       auto IndirectCallParams = Params;
512       IndirectCallParams.DefaultThreshold =
513           InlineConstants::IndirectCallThreshold;
514       /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
515       /// to instantiate the derived class.
516       InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
517                                 GetAssumptionCache, GetBFI, PSI, ORE, false);
518       if (CA.analyze().isSuccess()) {
519         // We were able to inline the indirect call! Subtract the cost from the
520         // threshold to get the bonus we want to apply, but don't go below zero.
521         Cost -= std::max(0, CA.getThreshold() - CA.getCost());
522       }
523     } else
524       // Otherwise simply add the cost for merely making the call.
525       addCost(InlineConstants::CallPenalty);
526   }
527 
528   void onFinalizeSwitch(unsigned JumpTableSize,
529                         unsigned NumCaseCluster) override {
530     // If suitable for a jump table, consider the cost for the table size and
531     // branch to destination.
532     // Maximum valid cost increased in this function.
533     if (JumpTableSize) {
534       int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
535                        4 * InlineConstants::InstrCost;
536 
537       addCost(JTCost, (int64_t)CostUpperBound);
538       return;
539     }
540     // Considering forming a binary search, we should find the number of nodes
541     // which is same as the number of comparisons when lowered. For a given
542     // number of clusters, n, we can define a recursive function, f(n), to find
543     // the number of nodes in the tree. The recursion is :
544     // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
545     // and f(n) = n, when n <= 3.
546     // This will lead a binary tree where the leaf should be either f(2) or f(3)
547     // when n > 3.  So, the number of comparisons from leaves should be n, while
548     // the number of non-leaf should be :
549     //   2^(log2(n) - 1) - 1
550     //   = 2^log2(n) * 2^-1 - 1
551     //   = n / 2 - 1.
552     // Considering comparisons from leaf and non-leaf nodes, we can estimate the
553     // number of comparisons in a simple closed form :
554     //   n + n / 2 - 1 = n * 3 / 2 - 1
555     if (NumCaseCluster <= 3) {
556       // Suppose a comparison includes one compare and one conditional branch.
557       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
558       return;
559     }
560 
561     int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
562     int64_t SwitchCost =
563         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
564 
565     addCost(SwitchCost, (int64_t)CostUpperBound);
566   }
567   void onMissedSimplification() override {
568     addCost(InlineConstants::InstrCost);
569   }
570 
571   void onInitializeSROAArg(AllocaInst *Arg) override {
572     assert(Arg != nullptr &&
573            "Should not initialize SROA costs for null value.");
574     SROAArgCosts[Arg] = 0;
575   }
576 
577   void onAggregateSROAUse(AllocaInst *SROAArg) override {
578     auto CostIt = SROAArgCosts.find(SROAArg);
579     assert(CostIt != SROAArgCosts.end() &&
580            "expected this argument to have a cost");
581     CostIt->second += InlineConstants::InstrCost;
582     SROACostSavings += InlineConstants::InstrCost;
583   }
584 
585   void onBlockAnalyzed(const BasicBlock *BB) override {
586     auto *TI = BB->getTerminator();
587     // If we had any successors at this point, than post-inlining is likely to
588     // have them as well. Note that we assume any basic blocks which existed
589     // due to branches or switches which folded above will also fold after
590     // inlining.
591     if (SingleBB && TI->getNumSuccessors() > 1) {
592       // Take off the bonus we applied to the threshold.
593       Threshold -= SingleBBBonus;
594       SingleBB = false;
595     }
596   }
597 
598   void onInstructionAnalysisStart(const Instruction *I) override {
599     // This function is called to store the initial cost of inlining before
600     // the given instruction was assessed.
601     if (!PrintDebugInstructionDeltas)
602       return;
603     Writer.CostThresholdMap[I].CostBefore = Cost;
604     Writer.CostThresholdMap[I].ThresholdBefore = Threshold;
605   }
606 
607   void onInstructionAnalysisFinish(const Instruction *I) override {
608     // This function is called to find new values of cost and threshold after
609     // the instruction has been assessed.
610     if (!PrintDebugInstructionDeltas)
611       return;
612     Writer.CostThresholdMap[I].CostAfter = Cost;
613     Writer.CostThresholdMap[I].ThresholdAfter = Threshold;
614   }
615 
616   InlineResult finalizeAnalysis() override {
617     // Loops generally act a lot like calls in that they act like barriers to
618     // movement, require a certain amount of setup, etc. So when optimising for
619     // size, we penalise any call sites that perform loops. We do this after all
620     // other costs here, so will likely only be dealing with relatively small
621     // functions (and hence DT and LI will hopefully be cheap).
622     auto *Caller = CandidateCall.getFunction();
623     if (Caller->hasMinSize()) {
624       DominatorTree DT(F);
625       LoopInfo LI(DT);
626       int NumLoops = 0;
627       for (Loop *L : LI) {
628         // Ignore loops that will not be executed
629         if (DeadBlocks.count(L->getHeader()))
630           continue;
631         NumLoops++;
632       }
633       addCost(NumLoops * InlineConstants::CallPenalty);
634     }
635 
636     // We applied the maximum possible vector bonus at the beginning. Now,
637     // subtract the excess bonus, if any, from the Threshold before
638     // comparing against Cost.
639     if (NumVectorInstructions <= NumInstructions / 10)
640       Threshold -= VectorBonus;
641     else if (NumVectorInstructions <= NumInstructions / 2)
642       Threshold -= VectorBonus / 2;
643 
644     if (IgnoreThreshold || Cost < std::max(1, Threshold))
645       return InlineResult::success();
646     return InlineResult::failure("Cost over threshold.");
647   }
648   bool shouldStop() override {
649     // Bail out the moment we cross the threshold. This means we'll under-count
650     // the cost, but only when undercounting doesn't matter.
651     return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
652   }
653 
654   void onLoadEliminationOpportunity() override {
655     LoadEliminationCost += InlineConstants::InstrCost;
656   }
657 
658   InlineResult onAnalysisStart() override {
659     // Perform some tweaks to the cost and threshold based on the direct
660     // callsite information.
661 
662     // We want to more aggressively inline vector-dense kernels, so up the
663     // threshold, and we'll lower it if the % of vector instructions gets too
664     // low. Note that these bonuses are some what arbitrary and evolved over
665     // time by accident as much as because they are principled bonuses.
666     //
667     // FIXME: It would be nice to remove all such bonuses. At least it would be
668     // nice to base the bonus values on something more scientific.
669     assert(NumInstructions == 0);
670     assert(NumVectorInstructions == 0);
671 
672     // Update the threshold based on callsite properties
673     updateThreshold(CandidateCall, F);
674 
675     // While Threshold depends on commandline options that can take negative
676     // values, we want to enforce the invariant that the computed threshold and
677     // bonuses are non-negative.
678     assert(Threshold >= 0);
679     assert(SingleBBBonus >= 0);
680     assert(VectorBonus >= 0);
681 
682     // Speculatively apply all possible bonuses to Threshold. If cost exceeds
683     // this Threshold any time, and cost cannot decrease, we can stop processing
684     // the rest of the function body.
685     Threshold += (SingleBBBonus + VectorBonus);
686 
687     // Give out bonuses for the callsite, as the instructions setting them up
688     // will be gone after inlining.
689     addCost(-getCallsiteCost(this->CandidateCall, DL));
690 
691     // If this function uses the coldcc calling convention, prefer not to inline
692     // it.
693     if (F.getCallingConv() == CallingConv::Cold)
694       Cost += InlineConstants::ColdccPenalty;
695 
696     // Check if we're done. This can happen due to bonuses and penalties.
697     if (Cost >= Threshold && !ComputeFullInlineCost)
698       return InlineResult::failure("high cost");
699 
700     return InlineResult::success();
701   }
702 
703 public:
704   InlineCostCallAnalyzer(
705       Function &Callee, CallBase &Call, const InlineParams &Params,
706       const TargetTransformInfo &TTI,
707       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
708       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
709       ProfileSummaryInfo *PSI = nullptr,
710       OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
711       bool IgnoreThreshold = false)
712       : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
713         ComputeFullInlineCost(OptComputeFullInlineCost ||
714                               Params.ComputeFullInlineCost || ORE),
715         Params(Params), Threshold(Params.DefaultThreshold),
716         BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold) {}
717 
718   /// Annotation Writer for cost annotation
719   CostAnnotationWriter Writer;
720 
721   void dump();
722 
723   virtual ~InlineCostCallAnalyzer() {}
724   int getThreshold() { return Threshold; }
725   int getCost() { return Cost; }
726 };
727 } // namespace
728 
729 /// Test whether the given value is an Alloca-derived function argument.
730 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
731   return SROAArgValues.count(V);
732 }
733 
734 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
735   onDisableSROA(SROAArg);
736   EnabledSROAAllocas.erase(SROAArg);
737   disableLoadElimination();
738 }
739 
740 void CostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
741                                                 formatted_raw_ostream &OS) {
742   // The cost of inlining of the given instruction is printed always.
743   // The threshold delta is printed only when it is non-zero. It happens
744   // when we decided to give a bonus at a particular instruction.
745   if (CostThresholdMap.count(I) == 0) {
746     OS << "; No analysis for the instruction\n";
747     return;
748   }
749   const auto &Record = CostThresholdMap[I];
750   OS << "; cost before = " << Record.CostBefore
751      << ", cost after = " << Record.CostAfter
752      << ", threshold before = " << Record.ThresholdBefore
753      << ", threshold after = " << Record.ThresholdAfter << ", ";
754   OS << "cost delta = " << Record.getCostDelta();
755   if (Record.hasThresholdChanged())
756     OS << ", threshold delta = " << Record.getThresholdDelta();
757   OS << "\n";
758 }
759 
760 /// If 'V' maps to a SROA candidate, disable SROA for it.
761 void CallAnalyzer::disableSROA(Value *V) {
762   if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
763     disableSROAForArg(SROAArg);
764   }
765 }
766 
767 void CallAnalyzer::disableLoadElimination() {
768   if (EnableLoadElimination) {
769     onDisableLoadElimination();
770     EnableLoadElimination = false;
771   }
772 }
773 
774 /// Accumulate a constant GEP offset into an APInt if possible.
775 ///
776 /// Returns false if unable to compute the offset for any reason. Respects any
777 /// simplified values known during the analysis of this callsite.
778 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
779   unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
780   assert(IntPtrWidth == Offset.getBitWidth());
781 
782   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
783        GTI != GTE; ++GTI) {
784     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
785     if (!OpC)
786       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
787         OpC = dyn_cast<ConstantInt>(SimpleOp);
788     if (!OpC)
789       return false;
790     if (OpC->isZero())
791       continue;
792 
793     // Handle a struct index, which adds its field offset to the pointer.
794     if (StructType *STy = GTI.getStructTypeOrNull()) {
795       unsigned ElementIdx = OpC->getZExtValue();
796       const StructLayout *SL = DL.getStructLayout(STy);
797       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
798       continue;
799     }
800 
801     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
802     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
803   }
804   return true;
805 }
806 
807 /// Use TTI to check whether a GEP is free.
808 ///
809 /// Respects any simplified values known during the analysis of this callsite.
810 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
811   SmallVector<Value *, 4> Operands;
812   Operands.push_back(GEP.getOperand(0));
813   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
814     if (Constant *SimpleOp = SimplifiedValues.lookup(*I))
815       Operands.push_back(SimpleOp);
816     else
817       Operands.push_back(*I);
818   return TargetTransformInfo::TCC_Free ==
819          TTI.getUserCost(&GEP, Operands,
820                          TargetTransformInfo::TCK_SizeAndLatency);
821 }
822 
823 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
824   // Check whether inlining will turn a dynamic alloca into a static
825   // alloca and handle that case.
826   if (I.isArrayAllocation()) {
827     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
828     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
829       Type *Ty = I.getAllocatedType();
830       AllocatedSize = SaturatingMultiplyAdd(
831           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getFixedSize(),
832           AllocatedSize);
833       return Base::visitAlloca(I);
834     }
835   }
836 
837   // Accumulate the allocated size.
838   if (I.isStaticAlloca()) {
839     Type *Ty = I.getAllocatedType();
840     AllocatedSize =
841         SaturatingAdd(DL.getTypeAllocSize(Ty).getFixedSize(), AllocatedSize);
842   }
843 
844   // We will happily inline static alloca instructions.
845   if (I.isStaticAlloca())
846     return Base::visitAlloca(I);
847 
848   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
849   // a variety of reasons, and so we would like to not inline them into
850   // functions which don't currently have a dynamic alloca. This simply
851   // disables inlining altogether in the presence of a dynamic alloca.
852   HasDynamicAlloca = true;
853   return false;
854 }
855 
856 bool CallAnalyzer::visitPHI(PHINode &I) {
857   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
858   // though we don't want to propagate it's bonuses. The idea is to disable
859   // SROA if it *might* be used in an inappropriate manner.
860 
861   // Phi nodes are always zero-cost.
862   // FIXME: Pointer sizes may differ between different address spaces, so do we
863   // need to use correct address space in the call to getPointerSizeInBits here?
864   // Or could we skip the getPointerSizeInBits call completely? As far as I can
865   // see the ZeroOffset is used as a dummy value, so we can probably use any
866   // bit width for the ZeroOffset?
867   APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
868   bool CheckSROA = I.getType()->isPointerTy();
869 
870   // Track the constant or pointer with constant offset we've seen so far.
871   Constant *FirstC = nullptr;
872   std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
873   Value *FirstV = nullptr;
874 
875   for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
876     BasicBlock *Pred = I.getIncomingBlock(i);
877     // If the incoming block is dead, skip the incoming block.
878     if (DeadBlocks.count(Pred))
879       continue;
880     // If the parent block of phi is not the known successor of the incoming
881     // block, skip the incoming block.
882     BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
883     if (KnownSuccessor && KnownSuccessor != I.getParent())
884       continue;
885 
886     Value *V = I.getIncomingValue(i);
887     // If the incoming value is this phi itself, skip the incoming value.
888     if (&I == V)
889       continue;
890 
891     Constant *C = dyn_cast<Constant>(V);
892     if (!C)
893       C = SimplifiedValues.lookup(V);
894 
895     std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
896     if (!C && CheckSROA)
897       BaseAndOffset = ConstantOffsetPtrs.lookup(V);
898 
899     if (!C && !BaseAndOffset.first)
900       // The incoming value is neither a constant nor a pointer with constant
901       // offset, exit early.
902       return true;
903 
904     if (FirstC) {
905       if (FirstC == C)
906         // If we've seen a constant incoming value before and it is the same
907         // constant we see this time, continue checking the next incoming value.
908         continue;
909       // Otherwise early exit because we either see a different constant or saw
910       // a constant before but we have a pointer with constant offset this time.
911       return true;
912     }
913 
914     if (FirstV) {
915       // The same logic as above, but check pointer with constant offset here.
916       if (FirstBaseAndOffset == BaseAndOffset)
917         continue;
918       return true;
919     }
920 
921     if (C) {
922       // This is the 1st time we've seen a constant, record it.
923       FirstC = C;
924       continue;
925     }
926 
927     // The remaining case is that this is the 1st time we've seen a pointer with
928     // constant offset, record it.
929     FirstV = V;
930     FirstBaseAndOffset = BaseAndOffset;
931   }
932 
933   // Check if we can map phi to a constant.
934   if (FirstC) {
935     SimplifiedValues[&I] = FirstC;
936     return true;
937   }
938 
939   // Check if we can map phi to a pointer with constant offset.
940   if (FirstBaseAndOffset.first) {
941     ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
942 
943     if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
944       SROAArgValues[&I] = SROAArg;
945   }
946 
947   return true;
948 }
949 
950 /// Check we can fold GEPs of constant-offset call site argument pointers.
951 /// This requires target data and inbounds GEPs.
952 ///
953 /// \return true if the specified GEP can be folded.
954 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
955   // Check if we have a base + offset for the pointer.
956   std::pair<Value *, APInt> BaseAndOffset =
957       ConstantOffsetPtrs.lookup(I.getPointerOperand());
958   if (!BaseAndOffset.first)
959     return false;
960 
961   // Check if the offset of this GEP is constant, and if so accumulate it
962   // into Offset.
963   if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
964     return false;
965 
966   // Add the result as a new mapping to Base + Offset.
967   ConstantOffsetPtrs[&I] = BaseAndOffset;
968 
969   return true;
970 }
971 
972 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
973   auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
974 
975   // Lambda to check whether a GEP's indices are all constant.
976   auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
977     for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
978       if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
979         return false;
980     return true;
981   };
982 
983   if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
984     if (SROAArg)
985       SROAArgValues[&I] = SROAArg;
986 
987     // Constant GEPs are modeled as free.
988     return true;
989   }
990 
991   // Variable GEPs will require math and will disable SROA.
992   if (SROAArg)
993     disableSROAForArg(SROAArg);
994   return isGEPFree(I);
995 }
996 
997 /// Simplify \p I if its operands are constants and update SimplifiedValues.
998 /// \p Evaluate is a callable specific to instruction type that evaluates the
999 /// instruction when all the operands are constants.
1000 template <typename Callable>
1001 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1002   SmallVector<Constant *, 2> COps;
1003   for (Value *Op : I.operands()) {
1004     Constant *COp = dyn_cast<Constant>(Op);
1005     if (!COp)
1006       COp = SimplifiedValues.lookup(Op);
1007     if (!COp)
1008       return false;
1009     COps.push_back(COp);
1010   }
1011   auto *C = Evaluate(COps);
1012   if (!C)
1013     return false;
1014   SimplifiedValues[&I] = C;
1015   return true;
1016 }
1017 
1018 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1019   // Propagate constants through bitcasts.
1020   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1021         return ConstantExpr::getBitCast(COps[0], I.getType());
1022       }))
1023     return true;
1024 
1025   // Track base/offsets through casts
1026   std::pair<Value *, APInt> BaseAndOffset =
1027       ConstantOffsetPtrs.lookup(I.getOperand(0));
1028   // Casts don't change the offset, just wrap it up.
1029   if (BaseAndOffset.first)
1030     ConstantOffsetPtrs[&I] = BaseAndOffset;
1031 
1032   // Also look for SROA candidates here.
1033   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1034     SROAArgValues[&I] = SROAArg;
1035 
1036   // Bitcasts are always zero cost.
1037   return true;
1038 }
1039 
1040 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1041   // Propagate constants through ptrtoint.
1042   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1043         return ConstantExpr::getPtrToInt(COps[0], I.getType());
1044       }))
1045     return true;
1046 
1047   // Track base/offset pairs when converted to a plain integer provided the
1048   // integer is large enough to represent the pointer.
1049   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1050   unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1051   if (IntegerSize >= DL.getPointerSizeInBits(AS)) {
1052     std::pair<Value *, APInt> BaseAndOffset =
1053         ConstantOffsetPtrs.lookup(I.getOperand(0));
1054     if (BaseAndOffset.first)
1055       ConstantOffsetPtrs[&I] = BaseAndOffset;
1056   }
1057 
1058   // This is really weird. Technically, ptrtoint will disable SROA. However,
1059   // unless that ptrtoint is *used* somewhere in the live basic blocks after
1060   // inlining, it will be nuked, and SROA should proceed. All of the uses which
1061   // would block SROA would also block SROA if applied directly to a pointer,
1062   // and so we can just add the integer in here. The only places where SROA is
1063   // preserved either cannot fire on an integer, or won't in-and-of themselves
1064   // disable SROA (ext) w/o some later use that we would see and disable.
1065   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1066     SROAArgValues[&I] = SROAArg;
1067 
1068   return TargetTransformInfo::TCC_Free ==
1069          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1070 }
1071 
1072 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1073   // Propagate constants through ptrtoint.
1074   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1075         return ConstantExpr::getIntToPtr(COps[0], I.getType());
1076       }))
1077     return true;
1078 
1079   // Track base/offset pairs when round-tripped through a pointer without
1080   // modifications provided the integer is not too large.
1081   Value *Op = I.getOperand(0);
1082   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1083   if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1084     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1085     if (BaseAndOffset.first)
1086       ConstantOffsetPtrs[&I] = BaseAndOffset;
1087   }
1088 
1089   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1090   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1091     SROAArgValues[&I] = SROAArg;
1092 
1093   return TargetTransformInfo::TCC_Free ==
1094          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1095 }
1096 
1097 bool CallAnalyzer::visitCastInst(CastInst &I) {
1098   // Propagate constants through casts.
1099   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1100         return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1101       }))
1102     return true;
1103 
1104   // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
1105   disableSROA(I.getOperand(0));
1106 
1107   // If this is a floating-point cast, and the target says this operation
1108   // is expensive, this may eventually become a library call. Treat the cost
1109   // as such.
1110   switch (I.getOpcode()) {
1111   case Instruction::FPTrunc:
1112   case Instruction::FPExt:
1113   case Instruction::UIToFP:
1114   case Instruction::SIToFP:
1115   case Instruction::FPToUI:
1116   case Instruction::FPToSI:
1117     if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1118       onCallPenalty();
1119     break;
1120   default:
1121     break;
1122   }
1123 
1124   return TargetTransformInfo::TCC_Free ==
1125          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1126 }
1127 
1128 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1129   Value *Operand = I.getOperand(0);
1130   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1131         return ConstantFoldInstOperands(&I, COps[0], DL);
1132       }))
1133     return true;
1134 
1135   // Disable any SROA on the argument to arbitrary unary instructions.
1136   disableSROA(Operand);
1137 
1138   return false;
1139 }
1140 
1141 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1142   return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1143 }
1144 
1145 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1146   // Does the *call site* have the NonNull attribute set on an argument?  We
1147   // use the attribute on the call site to memoize any analysis done in the
1148   // caller. This will also trip if the callee function has a non-null
1149   // parameter attribute, but that's a less interesting case because hopefully
1150   // the callee would already have been simplified based on that.
1151   if (Argument *A = dyn_cast<Argument>(V))
1152     if (paramHasAttr(A, Attribute::NonNull))
1153       return true;
1154 
1155   // Is this an alloca in the caller?  This is distinct from the attribute case
1156   // above because attributes aren't updated within the inliner itself and we
1157   // always want to catch the alloca derived case.
1158   if (isAllocaDerivedArg(V))
1159     // We can actually predict the result of comparisons between an
1160     // alloca-derived value and null. Note that this fires regardless of
1161     // SROA firing.
1162     return true;
1163 
1164   return false;
1165 }
1166 
1167 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1168   // If the normal destination of the invoke or the parent block of the call
1169   // site is unreachable-terminated, there is little point in inlining this
1170   // unless there is literally zero cost.
1171   // FIXME: Note that it is possible that an unreachable-terminated block has a
1172   // hot entry. For example, in below scenario inlining hot_call_X() may be
1173   // beneficial :
1174   // main() {
1175   //   hot_call_1();
1176   //   ...
1177   //   hot_call_N()
1178   //   exit(0);
1179   // }
1180   // For now, we are not handling this corner case here as it is rare in real
1181   // code. In future, we should elaborate this based on BPI and BFI in more
1182   // general threshold adjusting heuristics in updateThreshold().
1183   if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1184     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1185       return false;
1186   } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1187     return false;
1188 
1189   return true;
1190 }
1191 
1192 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1193                                             BlockFrequencyInfo *CallerBFI) {
1194   // If global profile summary is available, then callsite's coldness is
1195   // determined based on that.
1196   if (PSI && PSI->hasProfileSummary())
1197     return PSI->isColdCallSite(Call, CallerBFI);
1198 
1199   // Otherwise we need BFI to be available.
1200   if (!CallerBFI)
1201     return false;
1202 
1203   // Determine if the callsite is cold relative to caller's entry. We could
1204   // potentially cache the computation of scaled entry frequency, but the added
1205   // complexity is not worth it unless this scaling shows up high in the
1206   // profiles.
1207   const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1208   auto CallSiteBB = Call.getParent();
1209   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1210   auto CallerEntryFreq =
1211       CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1212   return CallSiteFreq < CallerEntryFreq * ColdProb;
1213 }
1214 
1215 Optional<int>
1216 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1217                                                 BlockFrequencyInfo *CallerBFI) {
1218 
1219   // If global profile summary is available, then callsite's hotness is
1220   // determined based on that.
1221   if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1222     return Params.HotCallSiteThreshold;
1223 
1224   // Otherwise we need BFI to be available and to have a locally hot callsite
1225   // threshold.
1226   if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1227     return None;
1228 
1229   // Determine if the callsite is hot relative to caller's entry. We could
1230   // potentially cache the computation of scaled entry frequency, but the added
1231   // complexity is not worth it unless this scaling shows up high in the
1232   // profiles.
1233   auto CallSiteBB = Call.getParent();
1234   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1235   auto CallerEntryFreq = CallerBFI->getEntryFreq();
1236   if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1237     return Params.LocallyHotCallSiteThreshold;
1238 
1239   // Otherwise treat it normally.
1240   return None;
1241 }
1242 
1243 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1244   // If no size growth is allowed for this inlining, set Threshold to 0.
1245   if (!allowSizeGrowth(Call)) {
1246     Threshold = 0;
1247     return;
1248   }
1249 
1250   Function *Caller = Call.getCaller();
1251 
1252   // return min(A, B) if B is valid.
1253   auto MinIfValid = [](int A, Optional<int> B) {
1254     return B ? std::min(A, B.getValue()) : A;
1255   };
1256 
1257   // return max(A, B) if B is valid.
1258   auto MaxIfValid = [](int A, Optional<int> B) {
1259     return B ? std::max(A, B.getValue()) : A;
1260   };
1261 
1262   // Various bonus percentages. These are multiplied by Threshold to get the
1263   // bonus values.
1264   // SingleBBBonus: This bonus is applied if the callee has a single reachable
1265   // basic block at the given callsite context. This is speculatively applied
1266   // and withdrawn if more than one basic block is seen.
1267   //
1268   // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1269   // of the last call to a static function as inlining such functions is
1270   // guaranteed to reduce code size.
1271   //
1272   // These bonus percentages may be set to 0 based on properties of the caller
1273   // and the callsite.
1274   int SingleBBBonusPercent = 50;
1275   int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1276   int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1277 
1278   // Lambda to set all the above bonus and bonus percentages to 0.
1279   auto DisallowAllBonuses = [&]() {
1280     SingleBBBonusPercent = 0;
1281     VectorBonusPercent = 0;
1282     LastCallToStaticBonus = 0;
1283   };
1284 
1285   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1286   // and reduce the threshold if the caller has the necessary attribute.
1287   if (Caller->hasMinSize()) {
1288     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1289     // For minsize, we want to disable the single BB bonus and the vector
1290     // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1291     // a static function will, at the minimum, eliminate the parameter setup and
1292     // call/return instructions.
1293     SingleBBBonusPercent = 0;
1294     VectorBonusPercent = 0;
1295   } else if (Caller->hasOptSize())
1296     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1297 
1298   // Adjust the threshold based on inlinehint attribute and profile based
1299   // hotness information if the caller does not have MinSize attribute.
1300   if (!Caller->hasMinSize()) {
1301     if (Callee.hasFnAttribute(Attribute::InlineHint))
1302       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1303 
1304     // FIXME: After switching to the new passmanager, simplify the logic below
1305     // by checking only the callsite hotness/coldness as we will reliably
1306     // have local profile information.
1307     //
1308     // Callsite hotness and coldness can be determined if sample profile is
1309     // used (which adds hotness metadata to calls) or if caller's
1310     // BlockFrequencyInfo is available.
1311     BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1312     auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1313     if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1314       LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1315       // FIXME: This should update the threshold only if it exceeds the
1316       // current threshold, but AutoFDO + ThinLTO currently relies on this
1317       // behavior to prevent inlining of hot callsites during ThinLTO
1318       // compile phase.
1319       Threshold = HotCallSiteThreshold.getValue();
1320     } else if (isColdCallSite(Call, CallerBFI)) {
1321       LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1322       // Do not apply bonuses for a cold callsite including the
1323       // LastCallToStatic bonus. While this bonus might result in code size
1324       // reduction, it can cause the size of a non-cold caller to increase
1325       // preventing it from being inlined.
1326       DisallowAllBonuses();
1327       Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1328     } else if (PSI) {
1329       // Use callee's global profile information only if we have no way of
1330       // determining this via callsite information.
1331       if (PSI->isFunctionEntryHot(&Callee)) {
1332         LLVM_DEBUG(dbgs() << "Hot callee.\n");
1333         // If callsite hotness can not be determined, we may still know
1334         // that the callee is hot and treat it as a weaker hint for threshold
1335         // increase.
1336         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1337       } else if (PSI->isFunctionEntryCold(&Callee)) {
1338         LLVM_DEBUG(dbgs() << "Cold callee.\n");
1339         // Do not apply bonuses for a cold callee including the
1340         // LastCallToStatic bonus. While this bonus might result in code size
1341         // reduction, it can cause the size of a non-cold caller to increase
1342         // preventing it from being inlined.
1343         DisallowAllBonuses();
1344         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1345       }
1346     }
1347   }
1348 
1349   // Finally, take the target-specific inlining threshold multiplier into
1350   // account.
1351   Threshold *= TTI.getInliningThresholdMultiplier();
1352 
1353   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1354   VectorBonus = Threshold * VectorBonusPercent / 100;
1355 
1356   bool OnlyOneCallAndLocalLinkage =
1357       F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1358   // If there is only one call of the function, and it has internal linkage,
1359   // the cost of inlining it drops dramatically. It may seem odd to update
1360   // Cost in updateThreshold, but the bonus depends on the logic in this method.
1361   if (OnlyOneCallAndLocalLinkage)
1362     Cost -= LastCallToStaticBonus;
1363 }
1364 
1365 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1366   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1367   // First try to handle simplified comparisons.
1368   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1369         return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1370       }))
1371     return true;
1372 
1373   if (I.getOpcode() == Instruction::FCmp)
1374     return false;
1375 
1376   // Otherwise look for a comparison between constant offset pointers with
1377   // a common base.
1378   Value *LHSBase, *RHSBase;
1379   APInt LHSOffset, RHSOffset;
1380   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1381   if (LHSBase) {
1382     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1383     if (RHSBase && LHSBase == RHSBase) {
1384       // We have common bases, fold the icmp to a constant based on the
1385       // offsets.
1386       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1387       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1388       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1389         SimplifiedValues[&I] = C;
1390         ++NumConstantPtrCmps;
1391         return true;
1392       }
1393     }
1394   }
1395 
1396   // If the comparison is an equality comparison with null, we can simplify it
1397   // if we know the value (argument) can't be null
1398   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1399       isKnownNonNullInCallee(I.getOperand(0))) {
1400     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1401     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1402                                       : ConstantInt::getFalse(I.getType());
1403     return true;
1404   }
1405   return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1406 }
1407 
1408 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1409   // Try to handle a special case: we can fold computing the difference of two
1410   // constant-related pointers.
1411   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1412   Value *LHSBase, *RHSBase;
1413   APInt LHSOffset, RHSOffset;
1414   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1415   if (LHSBase) {
1416     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1417     if (RHSBase && LHSBase == RHSBase) {
1418       // We have common bases, fold the subtract to a constant based on the
1419       // offsets.
1420       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1421       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1422       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1423         SimplifiedValues[&I] = C;
1424         ++NumConstantPtrDiffs;
1425         return true;
1426       }
1427     }
1428   }
1429 
1430   // Otherwise, fall back to the generic logic for simplifying and handling
1431   // instructions.
1432   return Base::visitSub(I);
1433 }
1434 
1435 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1436   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1437   Constant *CLHS = dyn_cast<Constant>(LHS);
1438   if (!CLHS)
1439     CLHS = SimplifiedValues.lookup(LHS);
1440   Constant *CRHS = dyn_cast<Constant>(RHS);
1441   if (!CRHS)
1442     CRHS = SimplifiedValues.lookup(RHS);
1443 
1444   Value *SimpleV = nullptr;
1445   if (auto FI = dyn_cast<FPMathOperator>(&I))
1446     SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1447                             FI->getFastMathFlags(), DL);
1448   else
1449     SimpleV =
1450         SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1451 
1452   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1453     SimplifiedValues[&I] = C;
1454 
1455   if (SimpleV)
1456     return true;
1457 
1458   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1459   disableSROA(LHS);
1460   disableSROA(RHS);
1461 
1462   // If the instruction is floating point, and the target says this operation
1463   // is expensive, this may eventually become a library call. Treat the cost
1464   // as such. Unless it's fneg which can be implemented with an xor.
1465   using namespace llvm::PatternMatch;
1466   if (I.getType()->isFloatingPointTy() &&
1467       TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1468       !match(&I, m_FNeg(m_Value())))
1469     onCallPenalty();
1470 
1471   return false;
1472 }
1473 
1474 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1475   Value *Op = I.getOperand(0);
1476   Constant *COp = dyn_cast<Constant>(Op);
1477   if (!COp)
1478     COp = SimplifiedValues.lookup(Op);
1479 
1480   Value *SimpleV = SimplifyFNegInst(
1481       COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1482 
1483   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1484     SimplifiedValues[&I] = C;
1485 
1486   if (SimpleV)
1487     return true;
1488 
1489   // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1490   disableSROA(Op);
1491 
1492   return false;
1493 }
1494 
1495 bool CallAnalyzer::visitLoad(LoadInst &I) {
1496   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1497     return true;
1498 
1499   // If the data is already loaded from this address and hasn't been clobbered
1500   // by any stores or calls, this load is likely to be redundant and can be
1501   // eliminated.
1502   if (EnableLoadElimination &&
1503       !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1504     onLoadEliminationOpportunity();
1505     return true;
1506   }
1507 
1508   return false;
1509 }
1510 
1511 bool CallAnalyzer::visitStore(StoreInst &I) {
1512   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1513     return true;
1514 
1515   // The store can potentially clobber loads and prevent repeated loads from
1516   // being eliminated.
1517   // FIXME:
1518   // 1. We can probably keep an initial set of eliminatable loads substracted
1519   // from the cost even when we finally see a store. We just need to disable
1520   // *further* accumulation of elimination savings.
1521   // 2. We should probably at some point thread MemorySSA for the callee into
1522   // this and then use that to actually compute *really* precise savings.
1523   disableLoadElimination();
1524   return false;
1525 }
1526 
1527 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1528   // Constant folding for extract value is trivial.
1529   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1530         return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1531       }))
1532     return true;
1533 
1534   // SROA can look through these but give them a cost.
1535   return false;
1536 }
1537 
1538 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1539   // Constant folding for insert value is trivial.
1540   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1541         return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1542                                             /*InsertedValueOperand*/ COps[1],
1543                                             I.getIndices());
1544       }))
1545     return true;
1546 
1547   // SROA can look through these but give them a cost.
1548   return false;
1549 }
1550 
1551 /// Try to simplify a call site.
1552 ///
1553 /// Takes a concrete function and callsite and tries to actually simplify it by
1554 /// analyzing the arguments and call itself with instsimplify. Returns true if
1555 /// it has simplified the callsite to some other entity (a constant), making it
1556 /// free.
1557 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1558   // FIXME: Using the instsimplify logic directly for this is inefficient
1559   // because we have to continually rebuild the argument list even when no
1560   // simplifications can be performed. Until that is fixed with remapping
1561   // inside of instsimplify, directly constant fold calls here.
1562   if (!canConstantFoldCallTo(&Call, F))
1563     return false;
1564 
1565   // Try to re-map the arguments to constants.
1566   SmallVector<Constant *, 4> ConstantArgs;
1567   ConstantArgs.reserve(Call.arg_size());
1568   for (Value *I : Call.args()) {
1569     Constant *C = dyn_cast<Constant>(I);
1570     if (!C)
1571       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1572     if (!C)
1573       return false; // This argument doesn't map to a constant.
1574 
1575     ConstantArgs.push_back(C);
1576   }
1577   if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1578     SimplifiedValues[&Call] = C;
1579     return true;
1580   }
1581 
1582   return false;
1583 }
1584 
1585 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1586   if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1587       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1588     // This aborts the entire analysis.
1589     ExposesReturnsTwice = true;
1590     return false;
1591   }
1592   if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1593     ContainsNoDuplicateCall = true;
1594 
1595   Value *Callee = Call.getCalledOperand();
1596   Function *F = dyn_cast_or_null<Function>(Callee);
1597   bool IsIndirectCall = !F;
1598   if (IsIndirectCall) {
1599     // Check if this happens to be an indirect function call to a known function
1600     // in this inline context. If not, we've done all we can.
1601     F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1602     if (!F) {
1603       onCallArgumentSetup(Call);
1604 
1605       if (!Call.onlyReadsMemory())
1606         disableLoadElimination();
1607       return Base::visitCallBase(Call);
1608     }
1609   }
1610 
1611   assert(F && "Expected a call to a known function");
1612 
1613   // When we have a concrete function, first try to simplify it directly.
1614   if (simplifyCallSite(F, Call))
1615     return true;
1616 
1617   // Next check if it is an intrinsic we know about.
1618   // FIXME: Lift this into part of the InstVisitor.
1619   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1620     switch (II->getIntrinsicID()) {
1621     default:
1622       if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1623         disableLoadElimination();
1624       return Base::visitCallBase(Call);
1625 
1626     case Intrinsic::load_relative:
1627       onLoadRelativeIntrinsic();
1628       return false;
1629 
1630     case Intrinsic::memset:
1631     case Intrinsic::memcpy:
1632     case Intrinsic::memmove:
1633       disableLoadElimination();
1634       // SROA can usually chew through these intrinsics, but they aren't free.
1635       return false;
1636     case Intrinsic::icall_branch_funnel:
1637     case Intrinsic::localescape:
1638       HasUninlineableIntrinsic = true;
1639       return false;
1640     case Intrinsic::vastart:
1641       InitsVargArgs = true;
1642       return false;
1643     }
1644   }
1645 
1646   if (F == Call.getFunction()) {
1647     // This flag will fully abort the analysis, so don't bother with anything
1648     // else.
1649     IsRecursiveCall = true;
1650     return false;
1651   }
1652 
1653   if (TTI.isLoweredToCall(F)) {
1654     onLoweredCall(F, Call, IsIndirectCall);
1655   }
1656 
1657   if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1658     disableLoadElimination();
1659   return Base::visitCallBase(Call);
1660 }
1661 
1662 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1663   // At least one return instruction will be free after inlining.
1664   bool Free = !HasReturn;
1665   HasReturn = true;
1666   return Free;
1667 }
1668 
1669 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1670   // We model unconditional branches as essentially free -- they really
1671   // shouldn't exist at all, but handling them makes the behavior of the
1672   // inliner more regular and predictable. Interestingly, conditional branches
1673   // which will fold away are also free.
1674   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1675          dyn_cast_or_null<ConstantInt>(
1676              SimplifiedValues.lookup(BI.getCondition()));
1677 }
1678 
1679 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1680   bool CheckSROA = SI.getType()->isPointerTy();
1681   Value *TrueVal = SI.getTrueValue();
1682   Value *FalseVal = SI.getFalseValue();
1683 
1684   Constant *TrueC = dyn_cast<Constant>(TrueVal);
1685   if (!TrueC)
1686     TrueC = SimplifiedValues.lookup(TrueVal);
1687   Constant *FalseC = dyn_cast<Constant>(FalseVal);
1688   if (!FalseC)
1689     FalseC = SimplifiedValues.lookup(FalseVal);
1690   Constant *CondC =
1691       dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1692 
1693   if (!CondC) {
1694     // Select C, X, X => X
1695     if (TrueC == FalseC && TrueC) {
1696       SimplifiedValues[&SI] = TrueC;
1697       return true;
1698     }
1699 
1700     if (!CheckSROA)
1701       return Base::visitSelectInst(SI);
1702 
1703     std::pair<Value *, APInt> TrueBaseAndOffset =
1704         ConstantOffsetPtrs.lookup(TrueVal);
1705     std::pair<Value *, APInt> FalseBaseAndOffset =
1706         ConstantOffsetPtrs.lookup(FalseVal);
1707     if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1708       ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1709 
1710       if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1711         SROAArgValues[&SI] = SROAArg;
1712       return true;
1713     }
1714 
1715     return Base::visitSelectInst(SI);
1716   }
1717 
1718   // Select condition is a constant.
1719   Value *SelectedV = CondC->isAllOnesValue()
1720                          ? TrueVal
1721                          : (CondC->isNullValue()) ? FalseVal : nullptr;
1722   if (!SelectedV) {
1723     // Condition is a vector constant that is not all 1s or all 0s.  If all
1724     // operands are constants, ConstantExpr::getSelect() can handle the cases
1725     // such as select vectors.
1726     if (TrueC && FalseC) {
1727       if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1728         SimplifiedValues[&SI] = C;
1729         return true;
1730       }
1731     }
1732     return Base::visitSelectInst(SI);
1733   }
1734 
1735   // Condition is either all 1s or all 0s. SI can be simplified.
1736   if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1737     SimplifiedValues[&SI] = SelectedC;
1738     return true;
1739   }
1740 
1741   if (!CheckSROA)
1742     return true;
1743 
1744   std::pair<Value *, APInt> BaseAndOffset =
1745       ConstantOffsetPtrs.lookup(SelectedV);
1746   if (BaseAndOffset.first) {
1747     ConstantOffsetPtrs[&SI] = BaseAndOffset;
1748 
1749     if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1750       SROAArgValues[&SI] = SROAArg;
1751   }
1752 
1753   return true;
1754 }
1755 
1756 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1757   // We model unconditional switches as free, see the comments on handling
1758   // branches.
1759   if (isa<ConstantInt>(SI.getCondition()))
1760     return true;
1761   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1762     if (isa<ConstantInt>(V))
1763       return true;
1764 
1765   // Assume the most general case where the switch is lowered into
1766   // either a jump table, bit test, or a balanced binary tree consisting of
1767   // case clusters without merging adjacent clusters with the same
1768   // destination. We do not consider the switches that are lowered with a mix
1769   // of jump table/bit test/binary search tree. The cost of the switch is
1770   // proportional to the size of the tree or the size of jump table range.
1771   //
1772   // NB: We convert large switches which are just used to initialize large phi
1773   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1774   // inlining those. It will prevent inlining in cases where the optimization
1775   // does not (yet) fire.
1776 
1777   unsigned JumpTableSize = 0;
1778   BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
1779   unsigned NumCaseCluster =
1780       TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
1781 
1782   onFinalizeSwitch(JumpTableSize, NumCaseCluster);
1783   return false;
1784 }
1785 
1786 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1787   // We never want to inline functions that contain an indirectbr.  This is
1788   // incorrect because all the blockaddress's (in static global initializers
1789   // for example) would be referring to the original function, and this
1790   // indirect jump would jump from the inlined copy of the function into the
1791   // original function which is extremely undefined behavior.
1792   // FIXME: This logic isn't really right; we can safely inline functions with
1793   // indirectbr's as long as no other function or global references the
1794   // blockaddress of a block within the current function.
1795   HasIndirectBr = true;
1796   return false;
1797 }
1798 
1799 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1800   // FIXME: It's not clear that a single instruction is an accurate model for
1801   // the inline cost of a resume instruction.
1802   return false;
1803 }
1804 
1805 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1806   // FIXME: It's not clear that a single instruction is an accurate model for
1807   // the inline cost of a cleanupret instruction.
1808   return false;
1809 }
1810 
1811 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1812   // FIXME: It's not clear that a single instruction is an accurate model for
1813   // the inline cost of a catchret instruction.
1814   return false;
1815 }
1816 
1817 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1818   // FIXME: It might be reasonably to discount the cost of instructions leading
1819   // to unreachable as they have the lowest possible impact on both runtime and
1820   // code size.
1821   return true; // No actual code is needed for unreachable.
1822 }
1823 
1824 bool CallAnalyzer::visitInstruction(Instruction &I) {
1825   // Some instructions are free. All of the free intrinsics can also be
1826   // handled by SROA, etc.
1827   if (TargetTransformInfo::TCC_Free ==
1828       TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
1829     return true;
1830 
1831   // We found something we don't understand or can't handle. Mark any SROA-able
1832   // values in the operand list as no longer viable.
1833   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
1834     disableSROA(*OI);
1835 
1836   return false;
1837 }
1838 
1839 /// Analyze a basic block for its contribution to the inline cost.
1840 ///
1841 /// This method walks the analyzer over every instruction in the given basic
1842 /// block and accounts for their cost during inlining at this callsite. It
1843 /// aborts early if the threshold has been exceeded or an impossible to inline
1844 /// construct has been detected. It returns false if inlining is no longer
1845 /// viable, and true if inlining remains viable.
1846 InlineResult
1847 CallAnalyzer::analyzeBlock(BasicBlock *BB,
1848                            SmallPtrSetImpl<const Value *> &EphValues) {
1849   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1850     // FIXME: Currently, the number of instructions in a function regardless of
1851     // our ability to simplify them during inline to constants or dead code,
1852     // are actually used by the vector bonus heuristic. As long as that's true,
1853     // we have to special case debug intrinsics here to prevent differences in
1854     // inlining due to debug symbols. Eventually, the number of unsimplified
1855     // instructions shouldn't factor into the cost computation, but until then,
1856     // hack around it here.
1857     if (isa<DbgInfoIntrinsic>(I))
1858       continue;
1859 
1860     // Skip ephemeral values.
1861     if (EphValues.count(&*I))
1862       continue;
1863 
1864     ++NumInstructions;
1865     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
1866       ++NumVectorInstructions;
1867 
1868     // If the instruction simplified to a constant, there is no cost to this
1869     // instruction. Visit the instructions using our InstVisitor to account for
1870     // all of the per-instruction logic. The visit tree returns true if we
1871     // consumed the instruction in any way, and false if the instruction's base
1872     // cost should count against inlining.
1873     onInstructionAnalysisStart(&*I);
1874 
1875     if (Base::visit(&*I))
1876       ++NumInstructionsSimplified;
1877     else
1878       onMissedSimplification();
1879 
1880     onInstructionAnalysisFinish(&*I);
1881     using namespace ore;
1882     // If the visit this instruction detected an uninlinable pattern, abort.
1883     InlineResult IR = InlineResult::success();
1884     if (IsRecursiveCall)
1885       IR = InlineResult::failure("recursive");
1886     else if (ExposesReturnsTwice)
1887       IR = InlineResult::failure("exposes returns twice");
1888     else if (HasDynamicAlloca)
1889       IR = InlineResult::failure("dynamic alloca");
1890     else if (HasIndirectBr)
1891       IR = InlineResult::failure("indirect branch");
1892     else if (HasUninlineableIntrinsic)
1893       IR = InlineResult::failure("uninlinable intrinsic");
1894     else if (InitsVargArgs)
1895       IR = InlineResult::failure("varargs");
1896     if (!IR.isSuccess()) {
1897       if (ORE)
1898         ORE->emit([&]() {
1899           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1900                                           &CandidateCall)
1901                  << NV("Callee", &F) << " has uninlinable pattern ("
1902                  << NV("InlineResult", IR.getFailureReason())
1903                  << ") and cost is not fully computed";
1904         });
1905       return IR;
1906     }
1907 
1908     // If the caller is a recursive function then we don't want to inline
1909     // functions which allocate a lot of stack space because it would increase
1910     // the caller stack usage dramatically.
1911     if (IsCallerRecursive &&
1912         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
1913       auto IR =
1914           InlineResult::failure("recursive and allocates too much stack space");
1915       if (ORE)
1916         ORE->emit([&]() {
1917           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1918                                           &CandidateCall)
1919                  << NV("Callee", &F) << " is "
1920                  << NV("InlineResult", IR.getFailureReason())
1921                  << ". Cost is not fully computed";
1922         });
1923       return IR;
1924     }
1925 
1926     if (shouldStop())
1927       return InlineResult::failure(
1928           "Call site analysis is not favorable to inlining.");
1929   }
1930 
1931   return InlineResult::success();
1932 }
1933 
1934 /// Compute the base pointer and cumulative constant offsets for V.
1935 ///
1936 /// This strips all constant offsets off of V, leaving it the base pointer, and
1937 /// accumulates the total constant offset applied in the returned constant. It
1938 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1939 /// no constant offsets applied.
1940 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1941   if (!V->getType()->isPointerTy())
1942     return nullptr;
1943 
1944   unsigned AS = V->getType()->getPointerAddressSpace();
1945   unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
1946   APInt Offset = APInt::getNullValue(IntPtrWidth);
1947 
1948   // Even though we don't look through PHI nodes, we could be called on an
1949   // instruction in an unreachable block, which may be on a cycle.
1950   SmallPtrSet<Value *, 4> Visited;
1951   Visited.insert(V);
1952   do {
1953     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1954       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
1955         return nullptr;
1956       V = GEP->getPointerOperand();
1957     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
1958       V = cast<Operator>(V)->getOperand(0);
1959     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1960       if (GA->isInterposable())
1961         break;
1962       V = GA->getAliasee();
1963     } else {
1964       break;
1965     }
1966     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1967   } while (Visited.insert(V).second);
1968 
1969   Type *IdxPtrTy = DL.getIndexType(V->getType());
1970   return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
1971 }
1972 
1973 /// Find dead blocks due to deleted CFG edges during inlining.
1974 ///
1975 /// If we know the successor of the current block, \p CurrBB, has to be \p
1976 /// NextBB, the other successors of \p CurrBB are dead if these successors have
1977 /// no live incoming CFG edges.  If one block is found to be dead, we can
1978 /// continue growing the dead block list by checking the successors of the dead
1979 /// blocks to see if all their incoming edges are dead or not.
1980 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
1981   auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
1982     // A CFG edge is dead if the predecessor is dead or the predecessor has a
1983     // known successor which is not the one under exam.
1984     return (DeadBlocks.count(Pred) ||
1985             (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
1986   };
1987 
1988   auto IsNewlyDead = [&](BasicBlock *BB) {
1989     // If all the edges to a block are dead, the block is also dead.
1990     return (!DeadBlocks.count(BB) &&
1991             llvm::all_of(predecessors(BB),
1992                          [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
1993   };
1994 
1995   for (BasicBlock *Succ : successors(CurrBB)) {
1996     if (Succ == NextBB || !IsNewlyDead(Succ))
1997       continue;
1998     SmallVector<BasicBlock *, 4> NewDead;
1999     NewDead.push_back(Succ);
2000     while (!NewDead.empty()) {
2001       BasicBlock *Dead = NewDead.pop_back_val();
2002       if (DeadBlocks.insert(Dead))
2003         // Continue growing the dead block lists.
2004         for (BasicBlock *S : successors(Dead))
2005           if (IsNewlyDead(S))
2006             NewDead.push_back(S);
2007     }
2008   }
2009 }
2010 
2011 /// Analyze a call site for potential inlining.
2012 ///
2013 /// Returns true if inlining this call is viable, and false if it is not
2014 /// viable. It computes the cost and adjusts the threshold based on numerous
2015 /// factors and heuristics. If this method returns false but the computed cost
2016 /// is below the computed threshold, then inlining was forcibly disabled by
2017 /// some artifact of the routine.
2018 InlineResult CallAnalyzer::analyze() {
2019   ++NumCallsAnalyzed;
2020 
2021   auto Result = onAnalysisStart();
2022   if (!Result.isSuccess())
2023     return Result;
2024 
2025   if (F.empty())
2026     return InlineResult::success();
2027 
2028   Function *Caller = CandidateCall.getFunction();
2029   // Check if the caller function is recursive itself.
2030   for (User *U : Caller->users()) {
2031     CallBase *Call = dyn_cast<CallBase>(U);
2032     if (Call && Call->getFunction() == Caller) {
2033       IsCallerRecursive = true;
2034       break;
2035     }
2036   }
2037 
2038   // Populate our simplified values by mapping from function arguments to call
2039   // arguments with known important simplifications.
2040   auto CAI = CandidateCall.arg_begin();
2041   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
2042        FAI != FAE; ++FAI, ++CAI) {
2043     assert(CAI != CandidateCall.arg_end());
2044     if (Constant *C = dyn_cast<Constant>(CAI))
2045       SimplifiedValues[&*FAI] = C;
2046 
2047     Value *PtrArg = *CAI;
2048     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2049       ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
2050 
2051       // We can SROA any pointer arguments derived from alloca instructions.
2052       if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2053         SROAArgValues[&*FAI] = SROAArg;
2054         onInitializeSROAArg(SROAArg);
2055         EnabledSROAAllocas.insert(SROAArg);
2056       }
2057     }
2058   }
2059   NumConstantArgs = SimplifiedValues.size();
2060   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2061   NumAllocaArgs = SROAArgValues.size();
2062 
2063   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2064   // the ephemeral values multiple times (and they're completely determined by
2065   // the callee, so this is purely duplicate work).
2066   SmallPtrSet<const Value *, 32> EphValues;
2067   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2068 
2069   // The worklist of live basic blocks in the callee *after* inlining. We avoid
2070   // adding basic blocks of the callee which can be proven to be dead for this
2071   // particular call site in order to get more accurate cost estimates. This
2072   // requires a somewhat heavyweight iteration pattern: we need to walk the
2073   // basic blocks in a breadth-first order as we insert live successors. To
2074   // accomplish this, prioritizing for small iterations because we exit after
2075   // crossing our threshold, we use a small-size optimized SetVector.
2076   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2077                     SmallPtrSet<BasicBlock *, 16>>
2078       BBSetVector;
2079   BBSetVector BBWorklist;
2080   BBWorklist.insert(&F.getEntryBlock());
2081 
2082   // Note that we *must not* cache the size, this loop grows the worklist.
2083   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2084     if (shouldStop())
2085       break;
2086 
2087     BasicBlock *BB = BBWorklist[Idx];
2088     if (BB->empty())
2089       continue;
2090 
2091     // Disallow inlining a blockaddress with uses other than strictly callbr.
2092     // A blockaddress only has defined behavior for an indirect branch in the
2093     // same function, and we do not currently support inlining indirect
2094     // branches.  But, the inliner may not see an indirect branch that ends up
2095     // being dead code at a particular call site. If the blockaddress escapes
2096     // the function, e.g., via a global variable, inlining may lead to an
2097     // invalid cross-function reference.
2098     // FIXME: pr/39560: continue relaxing this overt restriction.
2099     if (BB->hasAddressTaken())
2100       for (User *U : BlockAddress::get(&*BB)->users())
2101         if (!isa<CallBrInst>(*U))
2102           return InlineResult::failure("blockaddress used outside of callbr");
2103 
2104     // Analyze the cost of this block. If we blow through the threshold, this
2105     // returns false, and we can bail on out.
2106     InlineResult IR = analyzeBlock(BB, EphValues);
2107     if (!IR.isSuccess())
2108       return IR;
2109 
2110     Instruction *TI = BB->getTerminator();
2111 
2112     // Add in the live successors by first checking whether we have terminator
2113     // that may be simplified based on the values simplified by this call.
2114     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2115       if (BI->isConditional()) {
2116         Value *Cond = BI->getCondition();
2117         if (ConstantInt *SimpleCond =
2118                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2119           BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2120           BBWorklist.insert(NextBB);
2121           KnownSuccessors[BB] = NextBB;
2122           findDeadBlocks(BB, NextBB);
2123           continue;
2124         }
2125       }
2126     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2127       Value *Cond = SI->getCondition();
2128       if (ConstantInt *SimpleCond =
2129               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2130         BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2131         BBWorklist.insert(NextBB);
2132         KnownSuccessors[BB] = NextBB;
2133         findDeadBlocks(BB, NextBB);
2134         continue;
2135       }
2136     }
2137 
2138     // If we're unable to select a particular successor, just count all of
2139     // them.
2140     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2141          ++TIdx)
2142       BBWorklist.insert(TI->getSuccessor(TIdx));
2143 
2144     onBlockAnalyzed(BB);
2145   }
2146 
2147   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2148                                     &F == CandidateCall.getCalledFunction();
2149   // If this is a noduplicate call, we can still inline as long as
2150   // inlining this would cause the removal of the caller (so the instruction
2151   // is not actually duplicated, just moved).
2152   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2153     return InlineResult::failure("noduplicate");
2154 
2155   return finalizeAnalysis();
2156 }
2157 
2158 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2159 /// Dump stats about this call's analysis.
2160 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2161 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
2162   if (PrintDebugInstructionDeltas)
2163     F.print(dbgs(), &Writer);
2164   DEBUG_PRINT_STAT(NumConstantArgs);
2165   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2166   DEBUG_PRINT_STAT(NumAllocaArgs);
2167   DEBUG_PRINT_STAT(NumConstantPtrCmps);
2168   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2169   DEBUG_PRINT_STAT(NumInstructionsSimplified);
2170   DEBUG_PRINT_STAT(NumInstructions);
2171   DEBUG_PRINT_STAT(SROACostSavings);
2172   DEBUG_PRINT_STAT(SROACostSavingsLost);
2173   DEBUG_PRINT_STAT(LoadEliminationCost);
2174   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2175   DEBUG_PRINT_STAT(Cost);
2176   DEBUG_PRINT_STAT(Threshold);
2177 #undef DEBUG_PRINT_STAT
2178 }
2179 #endif
2180 
2181 /// Test that there are no attribute conflicts between Caller and Callee
2182 ///        that prevent inlining.
2183 static bool functionsHaveCompatibleAttributes(
2184     Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2185     function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2186   // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2187   // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2188   // object, and always returns the same object (which is overwritten on each
2189   // GetTLI call). Therefore we copy the first result.
2190   auto CalleeTLI = GetTLI(*Callee);
2191   return TTI.areInlineCompatible(Caller, Callee) &&
2192          GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2193                                              InlineCallerSupersetNoBuiltin) &&
2194          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2195 }
2196 
2197 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2198   int Cost = 0;
2199   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2200     if (Call.isByValArgument(I)) {
2201       // We approximate the number of loads and stores needed by dividing the
2202       // size of the byval type by the target's pointer size.
2203       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2204       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2205       unsigned AS = PTy->getAddressSpace();
2206       unsigned PointerSize = DL.getPointerSizeInBits(AS);
2207       // Ceiling division.
2208       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2209 
2210       // If it generates more than 8 stores it is likely to be expanded as an
2211       // inline memcpy so we take that as an upper bound. Otherwise we assume
2212       // one load and one store per word copied.
2213       // FIXME: The maxStoresPerMemcpy setting from the target should be used
2214       // here instead of a magic number of 8, but it's not available via
2215       // DataLayout.
2216       NumStores = std::min(NumStores, 8U);
2217 
2218       Cost += 2 * NumStores * InlineConstants::InstrCost;
2219     } else {
2220       // For non-byval arguments subtract off one instruction per call
2221       // argument.
2222       Cost += InlineConstants::InstrCost;
2223     }
2224   }
2225   // The call instruction also disappears after inlining.
2226   Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2227   return Cost;
2228 }
2229 
2230 InlineCost llvm::getInlineCost(
2231     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2232     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2233     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2234     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2235     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2236   return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2237                        GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2238 }
2239 
2240 Optional<int> llvm::getInliningCostEstimate(
2241     CallBase &Call, TargetTransformInfo &CalleeTTI,
2242     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2243     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2244     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2245   const InlineParams Params = {/* DefaultThreshold*/ 0,
2246                                /*HintThreshold*/ {},
2247                                /*ColdThreshold*/ {},
2248                                /*OptSizeThreshold*/ {},
2249                                /*OptMinSizeThreshold*/ {},
2250                                /*HotCallSiteThreshold*/ {},
2251                                /*LocallyHotCallSiteThreshold*/ {},
2252                                /*ColdCallSiteThreshold*/ {},
2253                                /* ComputeFullInlineCost*/ true};
2254 
2255   InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2256                             GetAssumptionCache, GetBFI, PSI, ORE, true,
2257                             /*IgnoreThreshold*/ true);
2258   auto R = CA.analyze();
2259   if (!R.isSuccess())
2260     return None;
2261   return CA.getCost();
2262 }
2263 
2264 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2265     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2266     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2267 
2268   // Cannot inline indirect calls.
2269   if (!Callee)
2270     return InlineResult::failure("indirect call");
2271 
2272   // Never inline calls with byval arguments that does not have the alloca
2273   // address space. Since byval arguments can be replaced with a copy to an
2274   // alloca, the inlined code would need to be adjusted to handle that the
2275   // argument is in the alloca address space (so it is a little bit complicated
2276   // to solve).
2277   unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2278   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2279     if (Call.isByValArgument(I)) {
2280       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2281       if (PTy->getAddressSpace() != AllocaAS)
2282         return InlineResult::failure("byval arguments without alloca"
2283                                      " address space");
2284     }
2285 
2286   // Calls to functions with always-inline attributes should be inlined
2287   // whenever possible.
2288   if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2289     auto IsViable = isInlineViable(*Callee);
2290     if (IsViable.isSuccess())
2291       return InlineResult::success();
2292     return InlineResult::failure(IsViable.getFailureReason());
2293   }
2294 
2295   // Never inline functions with conflicting attributes (unless callee has
2296   // always-inline attribute).
2297   Function *Caller = Call.getCaller();
2298   if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2299     return InlineResult::failure("conflicting attributes");
2300 
2301   // Don't inline this call if the caller has the optnone attribute.
2302   if (Caller->hasOptNone())
2303     return InlineResult::failure("optnone attribute");
2304 
2305   // Don't inline a function that treats null pointer as valid into a caller
2306   // that does not have this attribute.
2307   if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2308     return InlineResult::failure("nullptr definitions incompatible");
2309 
2310   // Don't inline functions which can be interposed at link-time.
2311   if (Callee->isInterposable())
2312     return InlineResult::failure("interposable");
2313 
2314   // Don't inline functions marked noinline.
2315   if (Callee->hasFnAttribute(Attribute::NoInline))
2316     return InlineResult::failure("noinline function attribute");
2317 
2318   // Don't inline call sites marked noinline.
2319   if (Call.isNoInline())
2320     return InlineResult::failure("noinline call site attribute");
2321 
2322   return None;
2323 }
2324 
2325 InlineCost llvm::getInlineCost(
2326     CallBase &Call, Function *Callee, const InlineParams &Params,
2327     TargetTransformInfo &CalleeTTI,
2328     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2329     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2330     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2331     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2332 
2333   auto UserDecision =
2334       llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2335 
2336   if (UserDecision.hasValue()) {
2337     if (UserDecision->isSuccess())
2338       return llvm::InlineCost::getAlways("always inline attribute");
2339     return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2340   }
2341 
2342   LLVM_DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
2343                           << "... (caller:" << Call.getCaller()->getName()
2344                           << ")\n");
2345 
2346   InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2347                             GetAssumptionCache, GetBFI, PSI, ORE);
2348   InlineResult ShouldInline = CA.analyze();
2349 
2350   LLVM_DEBUG(CA.dump());
2351 
2352   // Check if there was a reason to force inlining or no inlining.
2353   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2354     return InlineCost::getNever(ShouldInline.getFailureReason());
2355   if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2356     return InlineCost::getAlways("empty function");
2357 
2358   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2359 }
2360 
2361 InlineResult llvm::isInlineViable(Function &F) {
2362   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2363   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
2364     // Disallow inlining of functions which contain indirect branches.
2365     if (isa<IndirectBrInst>(BI->getTerminator()))
2366       return InlineResult::failure("contains indirect branches");
2367 
2368     // Disallow inlining of blockaddresses which are used by non-callbr
2369     // instructions.
2370     if (BI->hasAddressTaken())
2371       for (User *U : BlockAddress::get(&*BI)->users())
2372         if (!isa<CallBrInst>(*U))
2373           return InlineResult::failure("blockaddress used outside of callbr");
2374 
2375     for (auto &II : *BI) {
2376       CallBase *Call = dyn_cast<CallBase>(&II);
2377       if (!Call)
2378         continue;
2379 
2380       // Disallow recursive calls.
2381       if (&F == Call->getCalledFunction())
2382         return InlineResult::failure("recursive call");
2383 
2384       // Disallow calls which expose returns-twice to a function not previously
2385       // attributed as such.
2386       if (!ReturnsTwice && isa<CallInst>(Call) &&
2387           cast<CallInst>(Call)->canReturnTwice())
2388         return InlineResult::failure("exposes returns-twice attribute");
2389 
2390       if (Call->getCalledFunction())
2391         switch (Call->getCalledFunction()->getIntrinsicID()) {
2392         default:
2393           break;
2394         case llvm::Intrinsic::icall_branch_funnel:
2395           // Disallow inlining of @llvm.icall.branch.funnel because current
2396           // backend can't separate call targets from call arguments.
2397           return InlineResult::failure(
2398               "disallowed inlining of @llvm.icall.branch.funnel");
2399         case llvm::Intrinsic::localescape:
2400           // Disallow inlining functions that call @llvm.localescape. Doing this
2401           // correctly would require major changes to the inliner.
2402           return InlineResult::failure(
2403               "disallowed inlining of @llvm.localescape");
2404         case llvm::Intrinsic::vastart:
2405           // Disallow inlining of functions that initialize VarArgs with
2406           // va_start.
2407           return InlineResult::failure(
2408               "contains VarArgs initialized with va_start");
2409         }
2410     }
2411   }
2412 
2413   return InlineResult::success();
2414 }
2415 
2416 // APIs to create InlineParams based on command line flags and/or other
2417 // parameters.
2418 
2419 InlineParams llvm::getInlineParams(int Threshold) {
2420   InlineParams Params;
2421 
2422   // This field is the threshold to use for a callee by default. This is
2423   // derived from one or more of:
2424   //  * optimization or size-optimization levels,
2425   //  * a value passed to createFunctionInliningPass function, or
2426   //  * the -inline-threshold flag.
2427   //  If the -inline-threshold flag is explicitly specified, that is used
2428   //  irrespective of anything else.
2429   if (InlineThreshold.getNumOccurrences() > 0)
2430     Params.DefaultThreshold = InlineThreshold;
2431   else
2432     Params.DefaultThreshold = Threshold;
2433 
2434   // Set the HintThreshold knob from the -inlinehint-threshold.
2435   Params.HintThreshold = HintThreshold;
2436 
2437   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2438   Params.HotCallSiteThreshold = HotCallSiteThreshold;
2439 
2440   // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2441   // populate LocallyHotCallSiteThreshold. Later, we populate
2442   // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2443   // we know that optimization level is O3 (in the getInlineParams variant that
2444   // takes the opt and size levels).
2445   // FIXME: Remove this check (and make the assignment unconditional) after
2446   // addressing size regression issues at O2.
2447   if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2448     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2449 
2450   // Set the ColdCallSiteThreshold knob from the
2451   // -inline-cold-callsite-threshold.
2452   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2453 
2454   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2455   // -inlinehint-threshold commandline option is not explicitly given. If that
2456   // option is present, then its value applies even for callees with size and
2457   // minsize attributes.
2458   // If the -inline-threshold is not specified, set the ColdThreshold from the
2459   // -inlinecold-threshold even if it is not explicitly passed. If
2460   // -inline-threshold is specified, then -inlinecold-threshold needs to be
2461   // explicitly specified to set the ColdThreshold knob
2462   if (InlineThreshold.getNumOccurrences() == 0) {
2463     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2464     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2465     Params.ColdThreshold = ColdThreshold;
2466   } else if (ColdThreshold.getNumOccurrences() > 0) {
2467     Params.ColdThreshold = ColdThreshold;
2468   }
2469   return Params;
2470 }
2471 
2472 InlineParams llvm::getInlineParams() {
2473   return getInlineParams(DefaultThreshold);
2474 }
2475 
2476 // Compute the default threshold for inlining based on the opt level and the
2477 // size opt level.
2478 static int computeThresholdFromOptLevels(unsigned OptLevel,
2479                                          unsigned SizeOptLevel) {
2480   if (OptLevel > 2)
2481     return InlineConstants::OptAggressiveThreshold;
2482   if (SizeOptLevel == 1) // -Os
2483     return InlineConstants::OptSizeThreshold;
2484   if (SizeOptLevel == 2) // -Oz
2485     return InlineConstants::OptMinSizeThreshold;
2486   return DefaultThreshold;
2487 }
2488 
2489 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2490   auto Params =
2491       getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2492   // At O3, use the value of -locally-hot-callsite-threshold option to populate
2493   // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2494   // when it is specified explicitly.
2495   if (OptLevel > 2)
2496     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2497   return Params;
2498 }
2499