1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "inline-cost"
49 
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51 
52 static cl::opt<int>
53     DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54                      cl::ZeroOrMore,
55                      cl::desc("Default amount of inlining to perform"));
56 
57 static cl::opt<bool> PrintInstructionComments(
58     "print-instruction-comments", cl::Hidden, cl::init(false),
59     cl::desc("Prints comments for instruction based on inline cost analysis"));
60 
61 static cl::opt<int> InlineThreshold(
62     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63     cl::desc("Control the amount of inlining to perform (default = 225)"));
64 
65 static cl::opt<int> HintThreshold(
66     "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67     cl::desc("Threshold for inlining functions with inline hint"));
68 
69 static cl::opt<int>
70     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71                           cl::init(45), cl::ZeroOrMore,
72                           cl::desc("Threshold for inlining cold callsites"));
73 
74 // We introduce this threshold to help performance of instrumentation based
75 // PGO before we actually hook up inliner with analysis passes such as BPI and
76 // BFI.
77 static cl::opt<int> ColdThreshold(
78     "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
79     cl::desc("Threshold for inlining functions with cold attribute"));
80 
81 static cl::opt<int>
82     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
83                          cl::ZeroOrMore,
84                          cl::desc("Threshold for hot callsites "));
85 
86 static cl::opt<int> LocallyHotCallSiteThreshold(
87     "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
88     cl::desc("Threshold for locally hot callsites "));
89 
90 static cl::opt<int> ColdCallSiteRelFreq(
91     "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
92     cl::desc("Maximum block frequency, expressed as a percentage of caller's "
93              "entry frequency, for a callsite to be cold in the absence of "
94              "profile information."));
95 
96 static cl::opt<int> HotCallSiteRelFreq(
97     "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
98     cl::desc("Minimum block frequency, expressed as a multiple of caller's "
99              "entry frequency, for a callsite to be hot in the absence of "
100              "profile information."));
101 
102 static cl::opt<bool> OptComputeFullInlineCost(
103     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
104     cl::desc("Compute the full inline cost of a call site even when the cost "
105              "exceeds the threshold."));
106 
107 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
108     "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
109     cl::ZeroOrMore,
110     cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
111              "attributes."));
112 
113 namespace {
114 class InlineCostCallAnalyzer;
115 
116 // This struct is used to store information about inline cost of a
117 // particular instruction
118 struct InstructionCostDetail {
119   int CostBefore = 0;
120   int CostAfter = 0;
121   int ThresholdBefore = 0;
122   int ThresholdAfter = 0;
123 
124   int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
125 
126   int getCostDelta() const { return CostAfter - CostBefore; }
127 
128   bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
129 };
130 
131 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
132 private:
133   InlineCostCallAnalyzer *const ICCA;
134 
135 public:
136   InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
137   virtual void emitInstructionAnnot(const Instruction *I,
138                                     formatted_raw_ostream &OS) override;
139 };
140 
141 /// Carry out call site analysis, in order to evaluate inlinability.
142 /// NOTE: the type is currently used as implementation detail of functions such
143 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
144 /// expectation is that they come from the outer scope, from the wrapper
145 /// functions. If we want to support constructing CallAnalyzer objects where
146 /// lambdas are provided inline at construction, or where the object needs to
147 /// otherwise survive past the scope of the provided functions, we need to
148 /// revisit the argument types.
149 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
150   typedef InstVisitor<CallAnalyzer, bool> Base;
151   friend class InstVisitor<CallAnalyzer, bool>;
152 
153 protected:
154   virtual ~CallAnalyzer() {}
155   /// The TargetTransformInfo available for this compilation.
156   const TargetTransformInfo &TTI;
157 
158   /// Getter for the cache of @llvm.assume intrinsics.
159   function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
160 
161   /// Getter for BlockFrequencyInfo
162   function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
163 
164   /// Profile summary information.
165   ProfileSummaryInfo *PSI;
166 
167   /// The called function.
168   Function &F;
169 
170   // Cache the DataLayout since we use it a lot.
171   const DataLayout &DL;
172 
173   /// The OptimizationRemarkEmitter available for this compilation.
174   OptimizationRemarkEmitter *ORE;
175 
176   /// The candidate callsite being analyzed. Please do not use this to do
177   /// analysis in the caller function; we want the inline cost query to be
178   /// easily cacheable. Instead, use the cover function paramHasAttr.
179   CallBase &CandidateCall;
180 
181   /// Extension points for handling callsite features.
182   /// Called after a basic block was analyzed.
183   virtual void onBlockAnalyzed(const BasicBlock *BB) {}
184 
185   /// Called before an instruction was analyzed
186   virtual void onInstructionAnalysisStart(const Instruction *I) {}
187 
188   /// Called after an instruction was analyzed
189   virtual void onInstructionAnalysisFinish(const Instruction *I) {}
190 
191   /// Called at the end of the analysis of the callsite. Return the outcome of
192   /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
193   /// the reason it can't.
194   virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
195   /// Called when we're about to start processing a basic block, and every time
196   /// we are done processing an instruction. Return true if there is no point in
197   /// continuing the analysis (e.g. we've determined already the call site is
198   /// too expensive to inline)
199   virtual bool shouldStop() { return false; }
200 
201   /// Called before the analysis of the callee body starts (with callsite
202   /// contexts propagated).  It checks callsite-specific information. Return a
203   /// reason analysis can't continue if that's the case, or 'true' if it may
204   /// continue.
205   virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
206   /// Called if the analysis engine decides SROA cannot be done for the given
207   /// alloca.
208   virtual void onDisableSROA(AllocaInst *Arg) {}
209 
210   /// Called the analysis engine determines load elimination won't happen.
211   virtual void onDisableLoadElimination() {}
212 
213   /// Called to account for a call.
214   virtual void onCallPenalty() {}
215 
216   /// Called to account for the expectation the inlining would result in a load
217   /// elimination.
218   virtual void onLoadEliminationOpportunity() {}
219 
220   /// Called to account for the cost of argument setup for the Call in the
221   /// callee's body (not the callsite currently under analysis).
222   virtual void onCallArgumentSetup(const CallBase &Call) {}
223 
224   /// Called to account for a load relative intrinsic.
225   virtual void onLoadRelativeIntrinsic() {}
226 
227   /// Called to account for a lowered call.
228   virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
229   }
230 
231   /// Account for a jump table of given size. Return false to stop further
232   /// processing the switch instruction
233   virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
234 
235   /// Account for a case cluster of given size. Return false to stop further
236   /// processing of the instruction.
237   virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
238 
239   /// Called at the end of processing a switch instruction, with the given
240   /// number of case clusters.
241   virtual void onFinalizeSwitch(unsigned JumpTableSize,
242                                 unsigned NumCaseCluster) {}
243 
244   /// Called to account for any other instruction not specifically accounted
245   /// for.
246   virtual void onMissedSimplification() {}
247 
248   /// Start accounting potential benefits due to SROA for the given alloca.
249   virtual void onInitializeSROAArg(AllocaInst *Arg) {}
250 
251   /// Account SROA savings for the AllocaInst value.
252   virtual void onAggregateSROAUse(AllocaInst *V) {}
253 
254   bool handleSROA(Value *V, bool DoNotDisable) {
255     // Check for SROA candidates in comparisons.
256     if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
257       if (DoNotDisable) {
258         onAggregateSROAUse(SROAArg);
259         return true;
260       }
261       disableSROAForArg(SROAArg);
262     }
263     return false;
264   }
265 
266   bool IsCallerRecursive = false;
267   bool IsRecursiveCall = false;
268   bool ExposesReturnsTwice = false;
269   bool HasDynamicAlloca = false;
270   bool ContainsNoDuplicateCall = false;
271   bool HasReturn = false;
272   bool HasIndirectBr = false;
273   bool HasUninlineableIntrinsic = false;
274   bool InitsVargArgs = false;
275 
276   /// Number of bytes allocated statically by the callee.
277   uint64_t AllocatedSize = 0;
278   unsigned NumInstructions = 0;
279   unsigned NumVectorInstructions = 0;
280 
281   /// While we walk the potentially-inlined instructions, we build up and
282   /// maintain a mapping of simplified values specific to this callsite. The
283   /// idea is to propagate any special information we have about arguments to
284   /// this call through the inlinable section of the function, and account for
285   /// likely simplifications post-inlining. The most important aspect we track
286   /// is CFG altering simplifications -- when we prove a basic block dead, that
287   /// can cause dramatic shifts in the cost of inlining a function.
288   DenseMap<Value *, Constant *> SimplifiedValues;
289 
290   /// Keep track of the values which map back (through function arguments) to
291   /// allocas on the caller stack which could be simplified through SROA.
292   DenseMap<Value *, AllocaInst *> SROAArgValues;
293 
294   /// Keep track of Allocas for which we believe we may get SROA optimization.
295   DenseSet<AllocaInst *> EnabledSROAAllocas;
296 
297   /// Keep track of values which map to a pointer base and constant offset.
298   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
299 
300   /// Keep track of dead blocks due to the constant arguments.
301   SetVector<BasicBlock *> DeadBlocks;
302 
303   /// The mapping of the blocks to their known unique successors due to the
304   /// constant arguments.
305   DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
306 
307   /// Model the elimination of repeated loads that is expected to happen
308   /// whenever we simplify away the stores that would otherwise cause them to be
309   /// loads.
310   bool EnableLoadElimination;
311   SmallPtrSet<Value *, 16> LoadAddrSet;
312 
313   AllocaInst *getSROAArgForValueOrNull(Value *V) const {
314     auto It = SROAArgValues.find(V);
315     if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
316       return nullptr;
317     return It->second;
318   }
319 
320   // Custom simplification helper routines.
321   bool isAllocaDerivedArg(Value *V);
322   void disableSROAForArg(AllocaInst *SROAArg);
323   void disableSROA(Value *V);
324   void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
325   void disableLoadElimination();
326   bool isGEPFree(GetElementPtrInst &GEP);
327   bool canFoldInboundsGEP(GetElementPtrInst &I);
328   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
329   bool simplifyCallSite(Function *F, CallBase &Call);
330   template <typename Callable>
331   bool simplifyInstruction(Instruction &I, Callable Evaluate);
332   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
333 
334   /// Return true if the given argument to the function being considered for
335   /// inlining has the given attribute set either at the call site or the
336   /// function declaration.  Primarily used to inspect call site specific
337   /// attributes since these can be more precise than the ones on the callee
338   /// itself.
339   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
340 
341   /// Return true if the given value is known non null within the callee if
342   /// inlined through this particular callsite.
343   bool isKnownNonNullInCallee(Value *V);
344 
345   /// Return true if size growth is allowed when inlining the callee at \p Call.
346   bool allowSizeGrowth(CallBase &Call);
347 
348   // Custom analysis routines.
349   InlineResult analyzeBlock(BasicBlock *BB,
350                             SmallPtrSetImpl<const Value *> &EphValues);
351 
352   // Disable several entry points to the visitor so we don't accidentally use
353   // them by declaring but not defining them here.
354   void visit(Module *);
355   void visit(Module &);
356   void visit(Function *);
357   void visit(Function &);
358   void visit(BasicBlock *);
359   void visit(BasicBlock &);
360 
361   // Provide base case for our instruction visit.
362   bool visitInstruction(Instruction &I);
363 
364   // Our visit overrides.
365   bool visitAlloca(AllocaInst &I);
366   bool visitPHI(PHINode &I);
367   bool visitGetElementPtr(GetElementPtrInst &I);
368   bool visitBitCast(BitCastInst &I);
369   bool visitPtrToInt(PtrToIntInst &I);
370   bool visitIntToPtr(IntToPtrInst &I);
371   bool visitCastInst(CastInst &I);
372   bool visitUnaryInstruction(UnaryInstruction &I);
373   bool visitCmpInst(CmpInst &I);
374   bool visitSub(BinaryOperator &I);
375   bool visitBinaryOperator(BinaryOperator &I);
376   bool visitFNeg(UnaryOperator &I);
377   bool visitLoad(LoadInst &I);
378   bool visitStore(StoreInst &I);
379   bool visitExtractValue(ExtractValueInst &I);
380   bool visitInsertValue(InsertValueInst &I);
381   bool visitCallBase(CallBase &Call);
382   bool visitReturnInst(ReturnInst &RI);
383   bool visitBranchInst(BranchInst &BI);
384   bool visitSelectInst(SelectInst &SI);
385   bool visitSwitchInst(SwitchInst &SI);
386   bool visitIndirectBrInst(IndirectBrInst &IBI);
387   bool visitResumeInst(ResumeInst &RI);
388   bool visitCleanupReturnInst(CleanupReturnInst &RI);
389   bool visitCatchReturnInst(CatchReturnInst &RI);
390   bool visitUnreachableInst(UnreachableInst &I);
391 
392 public:
393   CallAnalyzer(
394       Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
395       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
396       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
397       ProfileSummaryInfo *PSI = nullptr,
398       OptimizationRemarkEmitter *ORE = nullptr)
399       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
400         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
401         CandidateCall(Call), EnableLoadElimination(true) {}
402 
403   InlineResult analyze();
404 
405   // Keep a bunch of stats about the cost savings found so we can print them
406   // out when debugging.
407   unsigned NumConstantArgs = 0;
408   unsigned NumConstantOffsetPtrArgs = 0;
409   unsigned NumAllocaArgs = 0;
410   unsigned NumConstantPtrCmps = 0;
411   unsigned NumConstantPtrDiffs = 0;
412   unsigned NumInstructionsSimplified = 0;
413 
414   void dump();
415 };
416 
417 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
418 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
419 class InlineCostCallAnalyzer final : public CallAnalyzer {
420   const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
421   const bool ComputeFullInlineCost;
422   int LoadEliminationCost = 0;
423   /// Bonus to be applied when percentage of vector instructions in callee is
424   /// high (see more details in updateThreshold).
425   int VectorBonus = 0;
426   /// Bonus to be applied when the callee has only one reachable basic block.
427   int SingleBBBonus = 0;
428 
429   /// Tunable parameters that control the analysis.
430   const InlineParams &Params;
431 
432   // This DenseMap stores the delta change in cost and threshold after
433   // accounting for the given instruction. The map is filled only with the
434   // flag PrintInstructionComments on.
435   DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
436 
437   /// Upper bound for the inlining cost. Bonuses are being applied to account
438   /// for speculative "expected profit" of the inlining decision.
439   int Threshold = 0;
440 
441   /// Attempt to evaluate indirect calls to boost its inline cost.
442   const bool BoostIndirectCalls;
443 
444   /// Ignore the threshold when finalizing analysis.
445   const bool IgnoreThreshold;
446 
447   /// Inlining cost measured in abstract units, accounts for all the
448   /// instructions expected to be executed for a given function invocation.
449   /// Instructions that are statically proven to be dead based on call-site
450   /// arguments are not counted here.
451   int Cost = 0;
452 
453   bool SingleBB = true;
454 
455   unsigned SROACostSavings = 0;
456   unsigned SROACostSavingsLost = 0;
457 
458   /// The mapping of caller Alloca values to their accumulated cost savings. If
459   /// we have to disable SROA for one of the allocas, this tells us how much
460   /// cost must be added.
461   DenseMap<AllocaInst *, int> SROAArgCosts;
462 
463   /// Return true if \p Call is a cold callsite.
464   bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
465 
466   /// Update Threshold based on callsite properties such as callee
467   /// attributes and callee hotness for PGO builds. The Callee is explicitly
468   /// passed to support analyzing indirect calls whose target is inferred by
469   /// analysis.
470   void updateThreshold(CallBase &Call, Function &Callee);
471   /// Return a higher threshold if \p Call is a hot callsite.
472   Optional<int> getHotCallSiteThreshold(CallBase &Call,
473                                         BlockFrequencyInfo *CallerBFI);
474 
475   /// Handle a capped 'int' increment for Cost.
476   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
477     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
478     Cost = (int)std::min(UpperBound, Cost + Inc);
479   }
480 
481   void onDisableSROA(AllocaInst *Arg) override {
482     auto CostIt = SROAArgCosts.find(Arg);
483     if (CostIt == SROAArgCosts.end())
484       return;
485     addCost(CostIt->second);
486     SROACostSavings -= CostIt->second;
487     SROACostSavingsLost += CostIt->second;
488     SROAArgCosts.erase(CostIt);
489   }
490 
491   void onDisableLoadElimination() override {
492     addCost(LoadEliminationCost);
493     LoadEliminationCost = 0;
494   }
495   void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
496   void onCallArgumentSetup(const CallBase &Call) override {
497     // Pay the price of the argument setup. We account for the average 1
498     // instruction per call argument setup here.
499     addCost(Call.arg_size() * InlineConstants::InstrCost);
500   }
501   void onLoadRelativeIntrinsic() override {
502     // This is normally lowered to 4 LLVM instructions.
503     addCost(3 * InlineConstants::InstrCost);
504   }
505   void onLoweredCall(Function *F, CallBase &Call,
506                      bool IsIndirectCall) override {
507     // We account for the average 1 instruction per call argument setup here.
508     addCost(Call.arg_size() * InlineConstants::InstrCost);
509 
510     // If we have a constant that we are calling as a function, we can peer
511     // through it and see the function target. This happens not infrequently
512     // during devirtualization and so we want to give it a hefty bonus for
513     // inlining, but cap that bonus in the event that inlining wouldn't pan out.
514     // Pretend to inline the function, with a custom threshold.
515     if (IsIndirectCall && BoostIndirectCalls) {
516       auto IndirectCallParams = Params;
517       IndirectCallParams.DefaultThreshold =
518           InlineConstants::IndirectCallThreshold;
519       /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
520       /// to instantiate the derived class.
521       InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
522                                 GetAssumptionCache, GetBFI, PSI, ORE, false);
523       if (CA.analyze().isSuccess()) {
524         // We were able to inline the indirect call! Subtract the cost from the
525         // threshold to get the bonus we want to apply, but don't go below zero.
526         Cost -= std::max(0, CA.getThreshold() - CA.getCost());
527       }
528     } else
529       // Otherwise simply add the cost for merely making the call.
530       addCost(InlineConstants::CallPenalty);
531   }
532 
533   void onFinalizeSwitch(unsigned JumpTableSize,
534                         unsigned NumCaseCluster) override {
535     // If suitable for a jump table, consider the cost for the table size and
536     // branch to destination.
537     // Maximum valid cost increased in this function.
538     if (JumpTableSize) {
539       int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
540                        4 * InlineConstants::InstrCost;
541 
542       addCost(JTCost, (int64_t)CostUpperBound);
543       return;
544     }
545     // Considering forming a binary search, we should find the number of nodes
546     // which is same as the number of comparisons when lowered. For a given
547     // number of clusters, n, we can define a recursive function, f(n), to find
548     // the number of nodes in the tree. The recursion is :
549     // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
550     // and f(n) = n, when n <= 3.
551     // This will lead a binary tree where the leaf should be either f(2) or f(3)
552     // when n > 3.  So, the number of comparisons from leaves should be n, while
553     // the number of non-leaf should be :
554     //   2^(log2(n) - 1) - 1
555     //   = 2^log2(n) * 2^-1 - 1
556     //   = n / 2 - 1.
557     // Considering comparisons from leaf and non-leaf nodes, we can estimate the
558     // number of comparisons in a simple closed form :
559     //   n + n / 2 - 1 = n * 3 / 2 - 1
560     if (NumCaseCluster <= 3) {
561       // Suppose a comparison includes one compare and one conditional branch.
562       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
563       return;
564     }
565 
566     int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
567     int64_t SwitchCost =
568         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
569 
570     addCost(SwitchCost, (int64_t)CostUpperBound);
571   }
572   void onMissedSimplification() override {
573     addCost(InlineConstants::InstrCost);
574   }
575 
576   void onInitializeSROAArg(AllocaInst *Arg) override {
577     assert(Arg != nullptr &&
578            "Should not initialize SROA costs for null value.");
579     SROAArgCosts[Arg] = 0;
580   }
581 
582   void onAggregateSROAUse(AllocaInst *SROAArg) override {
583     auto CostIt = SROAArgCosts.find(SROAArg);
584     assert(CostIt != SROAArgCosts.end() &&
585            "expected this argument to have a cost");
586     CostIt->second += InlineConstants::InstrCost;
587     SROACostSavings += InlineConstants::InstrCost;
588   }
589 
590   void onBlockAnalyzed(const BasicBlock *BB) override {
591     auto *TI = BB->getTerminator();
592     // If we had any successors at this point, than post-inlining is likely to
593     // have them as well. Note that we assume any basic blocks which existed
594     // due to branches or switches which folded above will also fold after
595     // inlining.
596     if (SingleBB && TI->getNumSuccessors() > 1) {
597       // Take off the bonus we applied to the threshold.
598       Threshold -= SingleBBBonus;
599       SingleBB = false;
600     }
601   }
602 
603   void onInstructionAnalysisStart(const Instruction *I) override {
604     // This function is called to store the initial cost of inlining before
605     // the given instruction was assessed.
606     if (!PrintInstructionComments)
607       return;
608     InstructionCostDetailMap[I].CostBefore = Cost;
609     InstructionCostDetailMap[I].ThresholdBefore = Threshold;
610   }
611 
612   void onInstructionAnalysisFinish(const Instruction *I) override {
613     // This function is called to find new values of cost and threshold after
614     // the instruction has been assessed.
615     if (!PrintInstructionComments)
616       return;
617     InstructionCostDetailMap[I].CostAfter = Cost;
618     InstructionCostDetailMap[I].ThresholdAfter = Threshold;
619   }
620 
621   InlineResult finalizeAnalysis() override {
622     // Loops generally act a lot like calls in that they act like barriers to
623     // movement, require a certain amount of setup, etc. So when optimising for
624     // size, we penalise any call sites that perform loops. We do this after all
625     // other costs here, so will likely only be dealing with relatively small
626     // functions (and hence DT and LI will hopefully be cheap).
627     auto *Caller = CandidateCall.getFunction();
628     if (Caller->hasMinSize()) {
629       DominatorTree DT(F);
630       LoopInfo LI(DT);
631       int NumLoops = 0;
632       for (Loop *L : LI) {
633         // Ignore loops that will not be executed
634         if (DeadBlocks.count(L->getHeader()))
635           continue;
636         NumLoops++;
637       }
638       addCost(NumLoops * InlineConstants::CallPenalty);
639     }
640 
641     // We applied the maximum possible vector bonus at the beginning. Now,
642     // subtract the excess bonus, if any, from the Threshold before
643     // comparing against Cost.
644     if (NumVectorInstructions <= NumInstructions / 10)
645       Threshold -= VectorBonus;
646     else if (NumVectorInstructions <= NumInstructions / 2)
647       Threshold -= VectorBonus / 2;
648 
649     if (IgnoreThreshold || Cost < std::max(1, Threshold))
650       return InlineResult::success();
651     return InlineResult::failure("Cost over threshold.");
652   }
653   bool shouldStop() override {
654     // Bail out the moment we cross the threshold. This means we'll under-count
655     // the cost, but only when undercounting doesn't matter.
656     return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
657   }
658 
659   void onLoadEliminationOpportunity() override {
660     LoadEliminationCost += InlineConstants::InstrCost;
661   }
662 
663   InlineResult onAnalysisStart() override {
664     // Perform some tweaks to the cost and threshold based on the direct
665     // callsite information.
666 
667     // We want to more aggressively inline vector-dense kernels, so up the
668     // threshold, and we'll lower it if the % of vector instructions gets too
669     // low. Note that these bonuses are some what arbitrary and evolved over
670     // time by accident as much as because they are principled bonuses.
671     //
672     // FIXME: It would be nice to remove all such bonuses. At least it would be
673     // nice to base the bonus values on something more scientific.
674     assert(NumInstructions == 0);
675     assert(NumVectorInstructions == 0);
676 
677     // Update the threshold based on callsite properties
678     updateThreshold(CandidateCall, F);
679 
680     // While Threshold depends on commandline options that can take negative
681     // values, we want to enforce the invariant that the computed threshold and
682     // bonuses are non-negative.
683     assert(Threshold >= 0);
684     assert(SingleBBBonus >= 0);
685     assert(VectorBonus >= 0);
686 
687     // Speculatively apply all possible bonuses to Threshold. If cost exceeds
688     // this Threshold any time, and cost cannot decrease, we can stop processing
689     // the rest of the function body.
690     Threshold += (SingleBBBonus + VectorBonus);
691 
692     // Give out bonuses for the callsite, as the instructions setting them up
693     // will be gone after inlining.
694     addCost(-getCallsiteCost(this->CandidateCall, DL));
695 
696     // If this function uses the coldcc calling convention, prefer not to inline
697     // it.
698     if (F.getCallingConv() == CallingConv::Cold)
699       Cost += InlineConstants::ColdccPenalty;
700 
701     // Check if we're done. This can happen due to bonuses and penalties.
702     if (Cost >= Threshold && !ComputeFullInlineCost)
703       return InlineResult::failure("high cost");
704 
705     return InlineResult::success();
706   }
707 
708 public:
709   InlineCostCallAnalyzer(
710       Function &Callee, CallBase &Call, const InlineParams &Params,
711       const TargetTransformInfo &TTI,
712       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
713       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
714       ProfileSummaryInfo *PSI = nullptr,
715       OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
716       bool IgnoreThreshold = false)
717       : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
718         ComputeFullInlineCost(OptComputeFullInlineCost ||
719                               Params.ComputeFullInlineCost || ORE),
720         Params(Params), Threshold(Params.DefaultThreshold),
721         BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
722         Writer(this) {}
723 
724   /// Annotation Writer for instruction details
725   InlineCostAnnotationWriter Writer;
726 
727   void dump();
728 
729   Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
730     if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
731       return InstructionCostDetailMap[I];
732     return None;
733   }
734 
735   virtual ~InlineCostCallAnalyzer() {}
736   int getThreshold() { return Threshold; }
737   int getCost() { return Cost; }
738 };
739 } // namespace
740 
741 /// Test whether the given value is an Alloca-derived function argument.
742 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
743   return SROAArgValues.count(V);
744 }
745 
746 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
747   onDisableSROA(SROAArg);
748   EnabledSROAAllocas.erase(SROAArg);
749   disableLoadElimination();
750 }
751 
752 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
753                                                 formatted_raw_ostream &OS) {
754   // The cost of inlining of the given instruction is printed always.
755   // The threshold delta is printed only when it is non-zero. It happens
756   // when we decided to give a bonus at a particular instruction.
757   Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
758   if (!Record)
759     OS << "; No analysis for the instruction";
760   else {
761     OS << "; cost before = " << Record->CostBefore
762        << ", cost after = " << Record->CostAfter
763        << ", threshold before = " << Record->ThresholdBefore
764        << ", threshold after = " << Record->ThresholdAfter << ", ";
765     OS << "cost delta = " << Record->getCostDelta();
766     if (Record->hasThresholdChanged())
767       OS << ", threshold delta = " << Record->getThresholdDelta();
768   }
769   OS << "\n";
770 }
771 
772 /// If 'V' maps to a SROA candidate, disable SROA for it.
773 void CallAnalyzer::disableSROA(Value *V) {
774   if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
775     disableSROAForArg(SROAArg);
776   }
777 }
778 
779 void CallAnalyzer::disableLoadElimination() {
780   if (EnableLoadElimination) {
781     onDisableLoadElimination();
782     EnableLoadElimination = false;
783   }
784 }
785 
786 /// Accumulate a constant GEP offset into an APInt if possible.
787 ///
788 /// Returns false if unable to compute the offset for any reason. Respects any
789 /// simplified values known during the analysis of this callsite.
790 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
791   unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
792   assert(IntPtrWidth == Offset.getBitWidth());
793 
794   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
795        GTI != GTE; ++GTI) {
796     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
797     if (!OpC)
798       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
799         OpC = dyn_cast<ConstantInt>(SimpleOp);
800     if (!OpC)
801       return false;
802     if (OpC->isZero())
803       continue;
804 
805     // Handle a struct index, which adds its field offset to the pointer.
806     if (StructType *STy = GTI.getStructTypeOrNull()) {
807       unsigned ElementIdx = OpC->getZExtValue();
808       const StructLayout *SL = DL.getStructLayout(STy);
809       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
810       continue;
811     }
812 
813     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
814     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
815   }
816   return true;
817 }
818 
819 /// Use TTI to check whether a GEP is free.
820 ///
821 /// Respects any simplified values known during the analysis of this callsite.
822 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
823   SmallVector<Value *, 4> Operands;
824   Operands.push_back(GEP.getOperand(0));
825   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
826     if (Constant *SimpleOp = SimplifiedValues.lookup(*I))
827       Operands.push_back(SimpleOp);
828     else
829       Operands.push_back(*I);
830   return TargetTransformInfo::TCC_Free ==
831          TTI.getUserCost(&GEP, Operands,
832                          TargetTransformInfo::TCK_SizeAndLatency);
833 }
834 
835 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
836   // Check whether inlining will turn a dynamic alloca into a static
837   // alloca and handle that case.
838   if (I.isArrayAllocation()) {
839     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
840     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
841       Type *Ty = I.getAllocatedType();
842       AllocatedSize = SaturatingMultiplyAdd(
843           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getFixedSize(),
844           AllocatedSize);
845       return Base::visitAlloca(I);
846     }
847   }
848 
849   // Accumulate the allocated size.
850   if (I.isStaticAlloca()) {
851     Type *Ty = I.getAllocatedType();
852     AllocatedSize =
853         SaturatingAdd(DL.getTypeAllocSize(Ty).getFixedSize(), AllocatedSize);
854   }
855 
856   // We will happily inline static alloca instructions.
857   if (I.isStaticAlloca())
858     return Base::visitAlloca(I);
859 
860   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
861   // a variety of reasons, and so we would like to not inline them into
862   // functions which don't currently have a dynamic alloca. This simply
863   // disables inlining altogether in the presence of a dynamic alloca.
864   HasDynamicAlloca = true;
865   return false;
866 }
867 
868 bool CallAnalyzer::visitPHI(PHINode &I) {
869   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
870   // though we don't want to propagate it's bonuses. The idea is to disable
871   // SROA if it *might* be used in an inappropriate manner.
872 
873   // Phi nodes are always zero-cost.
874   // FIXME: Pointer sizes may differ between different address spaces, so do we
875   // need to use correct address space in the call to getPointerSizeInBits here?
876   // Or could we skip the getPointerSizeInBits call completely? As far as I can
877   // see the ZeroOffset is used as a dummy value, so we can probably use any
878   // bit width for the ZeroOffset?
879   APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
880   bool CheckSROA = I.getType()->isPointerTy();
881 
882   // Track the constant or pointer with constant offset we've seen so far.
883   Constant *FirstC = nullptr;
884   std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
885   Value *FirstV = nullptr;
886 
887   for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
888     BasicBlock *Pred = I.getIncomingBlock(i);
889     // If the incoming block is dead, skip the incoming block.
890     if (DeadBlocks.count(Pred))
891       continue;
892     // If the parent block of phi is not the known successor of the incoming
893     // block, skip the incoming block.
894     BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
895     if (KnownSuccessor && KnownSuccessor != I.getParent())
896       continue;
897 
898     Value *V = I.getIncomingValue(i);
899     // If the incoming value is this phi itself, skip the incoming value.
900     if (&I == V)
901       continue;
902 
903     Constant *C = dyn_cast<Constant>(V);
904     if (!C)
905       C = SimplifiedValues.lookup(V);
906 
907     std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
908     if (!C && CheckSROA)
909       BaseAndOffset = ConstantOffsetPtrs.lookup(V);
910 
911     if (!C && !BaseAndOffset.first)
912       // The incoming value is neither a constant nor a pointer with constant
913       // offset, exit early.
914       return true;
915 
916     if (FirstC) {
917       if (FirstC == C)
918         // If we've seen a constant incoming value before and it is the same
919         // constant we see this time, continue checking the next incoming value.
920         continue;
921       // Otherwise early exit because we either see a different constant or saw
922       // a constant before but we have a pointer with constant offset this time.
923       return true;
924     }
925 
926     if (FirstV) {
927       // The same logic as above, but check pointer with constant offset here.
928       if (FirstBaseAndOffset == BaseAndOffset)
929         continue;
930       return true;
931     }
932 
933     if (C) {
934       // This is the 1st time we've seen a constant, record it.
935       FirstC = C;
936       continue;
937     }
938 
939     // The remaining case is that this is the 1st time we've seen a pointer with
940     // constant offset, record it.
941     FirstV = V;
942     FirstBaseAndOffset = BaseAndOffset;
943   }
944 
945   // Check if we can map phi to a constant.
946   if (FirstC) {
947     SimplifiedValues[&I] = FirstC;
948     return true;
949   }
950 
951   // Check if we can map phi to a pointer with constant offset.
952   if (FirstBaseAndOffset.first) {
953     ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
954 
955     if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
956       SROAArgValues[&I] = SROAArg;
957   }
958 
959   return true;
960 }
961 
962 /// Check we can fold GEPs of constant-offset call site argument pointers.
963 /// This requires target data and inbounds GEPs.
964 ///
965 /// \return true if the specified GEP can be folded.
966 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
967   // Check if we have a base + offset for the pointer.
968   std::pair<Value *, APInt> BaseAndOffset =
969       ConstantOffsetPtrs.lookup(I.getPointerOperand());
970   if (!BaseAndOffset.first)
971     return false;
972 
973   // Check if the offset of this GEP is constant, and if so accumulate it
974   // into Offset.
975   if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
976     return false;
977 
978   // Add the result as a new mapping to Base + Offset.
979   ConstantOffsetPtrs[&I] = BaseAndOffset;
980 
981   return true;
982 }
983 
984 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
985   auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
986 
987   // Lambda to check whether a GEP's indices are all constant.
988   auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
989     for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
990       if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
991         return false;
992     return true;
993   };
994 
995   if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
996     if (SROAArg)
997       SROAArgValues[&I] = SROAArg;
998 
999     // Constant GEPs are modeled as free.
1000     return true;
1001   }
1002 
1003   // Variable GEPs will require math and will disable SROA.
1004   if (SROAArg)
1005     disableSROAForArg(SROAArg);
1006   return isGEPFree(I);
1007 }
1008 
1009 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1010 /// \p Evaluate is a callable specific to instruction type that evaluates the
1011 /// instruction when all the operands are constants.
1012 template <typename Callable>
1013 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1014   SmallVector<Constant *, 2> COps;
1015   for (Value *Op : I.operands()) {
1016     Constant *COp = dyn_cast<Constant>(Op);
1017     if (!COp)
1018       COp = SimplifiedValues.lookup(Op);
1019     if (!COp)
1020       return false;
1021     COps.push_back(COp);
1022   }
1023   auto *C = Evaluate(COps);
1024   if (!C)
1025     return false;
1026   SimplifiedValues[&I] = C;
1027   return true;
1028 }
1029 
1030 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1031   // Propagate constants through bitcasts.
1032   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1033         return ConstantExpr::getBitCast(COps[0], I.getType());
1034       }))
1035     return true;
1036 
1037   // Track base/offsets through casts
1038   std::pair<Value *, APInt> BaseAndOffset =
1039       ConstantOffsetPtrs.lookup(I.getOperand(0));
1040   // Casts don't change the offset, just wrap it up.
1041   if (BaseAndOffset.first)
1042     ConstantOffsetPtrs[&I] = BaseAndOffset;
1043 
1044   // Also look for SROA candidates here.
1045   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1046     SROAArgValues[&I] = SROAArg;
1047 
1048   // Bitcasts are always zero cost.
1049   return true;
1050 }
1051 
1052 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1053   // Propagate constants through ptrtoint.
1054   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1055         return ConstantExpr::getPtrToInt(COps[0], I.getType());
1056       }))
1057     return true;
1058 
1059   // Track base/offset pairs when converted to a plain integer provided the
1060   // integer is large enough to represent the pointer.
1061   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1062   unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1063   if (IntegerSize >= DL.getPointerSizeInBits(AS)) {
1064     std::pair<Value *, APInt> BaseAndOffset =
1065         ConstantOffsetPtrs.lookup(I.getOperand(0));
1066     if (BaseAndOffset.first)
1067       ConstantOffsetPtrs[&I] = BaseAndOffset;
1068   }
1069 
1070   // This is really weird. Technically, ptrtoint will disable SROA. However,
1071   // unless that ptrtoint is *used* somewhere in the live basic blocks after
1072   // inlining, it will be nuked, and SROA should proceed. All of the uses which
1073   // would block SROA would also block SROA if applied directly to a pointer,
1074   // and so we can just add the integer in here. The only places where SROA is
1075   // preserved either cannot fire on an integer, or won't in-and-of themselves
1076   // disable SROA (ext) w/o some later use that we would see and disable.
1077   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1078     SROAArgValues[&I] = SROAArg;
1079 
1080   return TargetTransformInfo::TCC_Free ==
1081          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1082 }
1083 
1084 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1085   // Propagate constants through ptrtoint.
1086   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1087         return ConstantExpr::getIntToPtr(COps[0], I.getType());
1088       }))
1089     return true;
1090 
1091   // Track base/offset pairs when round-tripped through a pointer without
1092   // modifications provided the integer is not too large.
1093   Value *Op = I.getOperand(0);
1094   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1095   if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1096     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1097     if (BaseAndOffset.first)
1098       ConstantOffsetPtrs[&I] = BaseAndOffset;
1099   }
1100 
1101   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1102   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1103     SROAArgValues[&I] = SROAArg;
1104 
1105   return TargetTransformInfo::TCC_Free ==
1106          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1107 }
1108 
1109 bool CallAnalyzer::visitCastInst(CastInst &I) {
1110   // Propagate constants through casts.
1111   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1112         return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1113       }))
1114     return true;
1115 
1116   // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
1117   disableSROA(I.getOperand(0));
1118 
1119   // If this is a floating-point cast, and the target says this operation
1120   // is expensive, this may eventually become a library call. Treat the cost
1121   // as such.
1122   switch (I.getOpcode()) {
1123   case Instruction::FPTrunc:
1124   case Instruction::FPExt:
1125   case Instruction::UIToFP:
1126   case Instruction::SIToFP:
1127   case Instruction::FPToUI:
1128   case Instruction::FPToSI:
1129     if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1130       onCallPenalty();
1131     break;
1132   default:
1133     break;
1134   }
1135 
1136   return TargetTransformInfo::TCC_Free ==
1137          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1138 }
1139 
1140 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1141   Value *Operand = I.getOperand(0);
1142   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1143         return ConstantFoldInstOperands(&I, COps[0], DL);
1144       }))
1145     return true;
1146 
1147   // Disable any SROA on the argument to arbitrary unary instructions.
1148   disableSROA(Operand);
1149 
1150   return false;
1151 }
1152 
1153 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1154   return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1155 }
1156 
1157 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1158   // Does the *call site* have the NonNull attribute set on an argument?  We
1159   // use the attribute on the call site to memoize any analysis done in the
1160   // caller. This will also trip if the callee function has a non-null
1161   // parameter attribute, but that's a less interesting case because hopefully
1162   // the callee would already have been simplified based on that.
1163   if (Argument *A = dyn_cast<Argument>(V))
1164     if (paramHasAttr(A, Attribute::NonNull))
1165       return true;
1166 
1167   // Is this an alloca in the caller?  This is distinct from the attribute case
1168   // above because attributes aren't updated within the inliner itself and we
1169   // always want to catch the alloca derived case.
1170   if (isAllocaDerivedArg(V))
1171     // We can actually predict the result of comparisons between an
1172     // alloca-derived value and null. Note that this fires regardless of
1173     // SROA firing.
1174     return true;
1175 
1176   return false;
1177 }
1178 
1179 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1180   // If the normal destination of the invoke or the parent block of the call
1181   // site is unreachable-terminated, there is little point in inlining this
1182   // unless there is literally zero cost.
1183   // FIXME: Note that it is possible that an unreachable-terminated block has a
1184   // hot entry. For example, in below scenario inlining hot_call_X() may be
1185   // beneficial :
1186   // main() {
1187   //   hot_call_1();
1188   //   ...
1189   //   hot_call_N()
1190   //   exit(0);
1191   // }
1192   // For now, we are not handling this corner case here as it is rare in real
1193   // code. In future, we should elaborate this based on BPI and BFI in more
1194   // general threshold adjusting heuristics in updateThreshold().
1195   if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1196     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1197       return false;
1198   } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1199     return false;
1200 
1201   return true;
1202 }
1203 
1204 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1205                                             BlockFrequencyInfo *CallerBFI) {
1206   // If global profile summary is available, then callsite's coldness is
1207   // determined based on that.
1208   if (PSI && PSI->hasProfileSummary())
1209     return PSI->isColdCallSite(Call, CallerBFI);
1210 
1211   // Otherwise we need BFI to be available.
1212   if (!CallerBFI)
1213     return false;
1214 
1215   // Determine if the callsite is cold relative to caller's entry. We could
1216   // potentially cache the computation of scaled entry frequency, but the added
1217   // complexity is not worth it unless this scaling shows up high in the
1218   // profiles.
1219   const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1220   auto CallSiteBB = Call.getParent();
1221   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1222   auto CallerEntryFreq =
1223       CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1224   return CallSiteFreq < CallerEntryFreq * ColdProb;
1225 }
1226 
1227 Optional<int>
1228 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1229                                                 BlockFrequencyInfo *CallerBFI) {
1230 
1231   // If global profile summary is available, then callsite's hotness is
1232   // determined based on that.
1233   if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1234     return Params.HotCallSiteThreshold;
1235 
1236   // Otherwise we need BFI to be available and to have a locally hot callsite
1237   // threshold.
1238   if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1239     return None;
1240 
1241   // Determine if the callsite is hot relative to caller's entry. We could
1242   // potentially cache the computation of scaled entry frequency, but the added
1243   // complexity is not worth it unless this scaling shows up high in the
1244   // profiles.
1245   auto CallSiteBB = Call.getParent();
1246   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1247   auto CallerEntryFreq = CallerBFI->getEntryFreq();
1248   if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1249     return Params.LocallyHotCallSiteThreshold;
1250 
1251   // Otherwise treat it normally.
1252   return None;
1253 }
1254 
1255 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1256   // If no size growth is allowed for this inlining, set Threshold to 0.
1257   if (!allowSizeGrowth(Call)) {
1258     Threshold = 0;
1259     return;
1260   }
1261 
1262   Function *Caller = Call.getCaller();
1263 
1264   // return min(A, B) if B is valid.
1265   auto MinIfValid = [](int A, Optional<int> B) {
1266     return B ? std::min(A, B.getValue()) : A;
1267   };
1268 
1269   // return max(A, B) if B is valid.
1270   auto MaxIfValid = [](int A, Optional<int> B) {
1271     return B ? std::max(A, B.getValue()) : A;
1272   };
1273 
1274   // Various bonus percentages. These are multiplied by Threshold to get the
1275   // bonus values.
1276   // SingleBBBonus: This bonus is applied if the callee has a single reachable
1277   // basic block at the given callsite context. This is speculatively applied
1278   // and withdrawn if more than one basic block is seen.
1279   //
1280   // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1281   // of the last call to a static function as inlining such functions is
1282   // guaranteed to reduce code size.
1283   //
1284   // These bonus percentages may be set to 0 based on properties of the caller
1285   // and the callsite.
1286   int SingleBBBonusPercent = 50;
1287   int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1288   int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1289 
1290   // Lambda to set all the above bonus and bonus percentages to 0.
1291   auto DisallowAllBonuses = [&]() {
1292     SingleBBBonusPercent = 0;
1293     VectorBonusPercent = 0;
1294     LastCallToStaticBonus = 0;
1295   };
1296 
1297   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1298   // and reduce the threshold if the caller has the necessary attribute.
1299   if (Caller->hasMinSize()) {
1300     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1301     // For minsize, we want to disable the single BB bonus and the vector
1302     // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1303     // a static function will, at the minimum, eliminate the parameter setup and
1304     // call/return instructions.
1305     SingleBBBonusPercent = 0;
1306     VectorBonusPercent = 0;
1307   } else if (Caller->hasOptSize())
1308     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1309 
1310   // Adjust the threshold based on inlinehint attribute and profile based
1311   // hotness information if the caller does not have MinSize attribute.
1312   if (!Caller->hasMinSize()) {
1313     if (Callee.hasFnAttribute(Attribute::InlineHint))
1314       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1315 
1316     // FIXME: After switching to the new passmanager, simplify the logic below
1317     // by checking only the callsite hotness/coldness as we will reliably
1318     // have local profile information.
1319     //
1320     // Callsite hotness and coldness can be determined if sample profile is
1321     // used (which adds hotness metadata to calls) or if caller's
1322     // BlockFrequencyInfo is available.
1323     BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1324     auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1325     if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1326       LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1327       // FIXME: This should update the threshold only if it exceeds the
1328       // current threshold, but AutoFDO + ThinLTO currently relies on this
1329       // behavior to prevent inlining of hot callsites during ThinLTO
1330       // compile phase.
1331       Threshold = HotCallSiteThreshold.getValue();
1332     } else if (isColdCallSite(Call, CallerBFI)) {
1333       LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1334       // Do not apply bonuses for a cold callsite including the
1335       // LastCallToStatic bonus. While this bonus might result in code size
1336       // reduction, it can cause the size of a non-cold caller to increase
1337       // preventing it from being inlined.
1338       DisallowAllBonuses();
1339       Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1340     } else if (PSI) {
1341       // Use callee's global profile information only if we have no way of
1342       // determining this via callsite information.
1343       if (PSI->isFunctionEntryHot(&Callee)) {
1344         LLVM_DEBUG(dbgs() << "Hot callee.\n");
1345         // If callsite hotness can not be determined, we may still know
1346         // that the callee is hot and treat it as a weaker hint for threshold
1347         // increase.
1348         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1349       } else if (PSI->isFunctionEntryCold(&Callee)) {
1350         LLVM_DEBUG(dbgs() << "Cold callee.\n");
1351         // Do not apply bonuses for a cold callee including the
1352         // LastCallToStatic bonus. While this bonus might result in code size
1353         // reduction, it can cause the size of a non-cold caller to increase
1354         // preventing it from being inlined.
1355         DisallowAllBonuses();
1356         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1357       }
1358     }
1359   }
1360 
1361   // Finally, take the target-specific inlining threshold multiplier into
1362   // account.
1363   Threshold *= TTI.getInliningThresholdMultiplier();
1364 
1365   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1366   VectorBonus = Threshold * VectorBonusPercent / 100;
1367 
1368   bool OnlyOneCallAndLocalLinkage =
1369       F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1370   // If there is only one call of the function, and it has internal linkage,
1371   // the cost of inlining it drops dramatically. It may seem odd to update
1372   // Cost in updateThreshold, but the bonus depends on the logic in this method.
1373   if (OnlyOneCallAndLocalLinkage)
1374     Cost -= LastCallToStaticBonus;
1375 }
1376 
1377 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1378   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1379   // First try to handle simplified comparisons.
1380   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1381         return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1382       }))
1383     return true;
1384 
1385   if (I.getOpcode() == Instruction::FCmp)
1386     return false;
1387 
1388   // Otherwise look for a comparison between constant offset pointers with
1389   // a common base.
1390   Value *LHSBase, *RHSBase;
1391   APInt LHSOffset, RHSOffset;
1392   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1393   if (LHSBase) {
1394     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1395     if (RHSBase && LHSBase == RHSBase) {
1396       // We have common bases, fold the icmp to a constant based on the
1397       // offsets.
1398       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1399       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1400       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1401         SimplifiedValues[&I] = C;
1402         ++NumConstantPtrCmps;
1403         return true;
1404       }
1405     }
1406   }
1407 
1408   // If the comparison is an equality comparison with null, we can simplify it
1409   // if we know the value (argument) can't be null
1410   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1411       isKnownNonNullInCallee(I.getOperand(0))) {
1412     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1413     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1414                                       : ConstantInt::getFalse(I.getType());
1415     return true;
1416   }
1417   return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1418 }
1419 
1420 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1421   // Try to handle a special case: we can fold computing the difference of two
1422   // constant-related pointers.
1423   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1424   Value *LHSBase, *RHSBase;
1425   APInt LHSOffset, RHSOffset;
1426   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1427   if (LHSBase) {
1428     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1429     if (RHSBase && LHSBase == RHSBase) {
1430       // We have common bases, fold the subtract to a constant based on the
1431       // offsets.
1432       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1433       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1434       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1435         SimplifiedValues[&I] = C;
1436         ++NumConstantPtrDiffs;
1437         return true;
1438       }
1439     }
1440   }
1441 
1442   // Otherwise, fall back to the generic logic for simplifying and handling
1443   // instructions.
1444   return Base::visitSub(I);
1445 }
1446 
1447 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1448   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1449   Constant *CLHS = dyn_cast<Constant>(LHS);
1450   if (!CLHS)
1451     CLHS = SimplifiedValues.lookup(LHS);
1452   Constant *CRHS = dyn_cast<Constant>(RHS);
1453   if (!CRHS)
1454     CRHS = SimplifiedValues.lookup(RHS);
1455 
1456   Value *SimpleV = nullptr;
1457   if (auto FI = dyn_cast<FPMathOperator>(&I))
1458     SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1459                             FI->getFastMathFlags(), DL);
1460   else
1461     SimpleV =
1462         SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1463 
1464   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1465     SimplifiedValues[&I] = C;
1466 
1467   if (SimpleV)
1468     return true;
1469 
1470   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1471   disableSROA(LHS);
1472   disableSROA(RHS);
1473 
1474   // If the instruction is floating point, and the target says this operation
1475   // is expensive, this may eventually become a library call. Treat the cost
1476   // as such. Unless it's fneg which can be implemented with an xor.
1477   using namespace llvm::PatternMatch;
1478   if (I.getType()->isFloatingPointTy() &&
1479       TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1480       !match(&I, m_FNeg(m_Value())))
1481     onCallPenalty();
1482 
1483   return false;
1484 }
1485 
1486 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1487   Value *Op = I.getOperand(0);
1488   Constant *COp = dyn_cast<Constant>(Op);
1489   if (!COp)
1490     COp = SimplifiedValues.lookup(Op);
1491 
1492   Value *SimpleV = SimplifyFNegInst(
1493       COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1494 
1495   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1496     SimplifiedValues[&I] = C;
1497 
1498   if (SimpleV)
1499     return true;
1500 
1501   // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1502   disableSROA(Op);
1503 
1504   return false;
1505 }
1506 
1507 bool CallAnalyzer::visitLoad(LoadInst &I) {
1508   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1509     return true;
1510 
1511   // If the data is already loaded from this address and hasn't been clobbered
1512   // by any stores or calls, this load is likely to be redundant and can be
1513   // eliminated.
1514   if (EnableLoadElimination &&
1515       !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1516     onLoadEliminationOpportunity();
1517     return true;
1518   }
1519 
1520   return false;
1521 }
1522 
1523 bool CallAnalyzer::visitStore(StoreInst &I) {
1524   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1525     return true;
1526 
1527   // The store can potentially clobber loads and prevent repeated loads from
1528   // being eliminated.
1529   // FIXME:
1530   // 1. We can probably keep an initial set of eliminatable loads substracted
1531   // from the cost even when we finally see a store. We just need to disable
1532   // *further* accumulation of elimination savings.
1533   // 2. We should probably at some point thread MemorySSA for the callee into
1534   // this and then use that to actually compute *really* precise savings.
1535   disableLoadElimination();
1536   return false;
1537 }
1538 
1539 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1540   // Constant folding for extract value is trivial.
1541   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1542         return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1543       }))
1544     return true;
1545 
1546   // SROA can look through these but give them a cost.
1547   return false;
1548 }
1549 
1550 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1551   // Constant folding for insert value is trivial.
1552   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1553         return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1554                                             /*InsertedValueOperand*/ COps[1],
1555                                             I.getIndices());
1556       }))
1557     return true;
1558 
1559   // SROA can look through these but give them a cost.
1560   return false;
1561 }
1562 
1563 /// Try to simplify a call site.
1564 ///
1565 /// Takes a concrete function and callsite and tries to actually simplify it by
1566 /// analyzing the arguments and call itself with instsimplify. Returns true if
1567 /// it has simplified the callsite to some other entity (a constant), making it
1568 /// free.
1569 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1570   // FIXME: Using the instsimplify logic directly for this is inefficient
1571   // because we have to continually rebuild the argument list even when no
1572   // simplifications can be performed. Until that is fixed with remapping
1573   // inside of instsimplify, directly constant fold calls here.
1574   if (!canConstantFoldCallTo(&Call, F))
1575     return false;
1576 
1577   // Try to re-map the arguments to constants.
1578   SmallVector<Constant *, 4> ConstantArgs;
1579   ConstantArgs.reserve(Call.arg_size());
1580   for (Value *I : Call.args()) {
1581     Constant *C = dyn_cast<Constant>(I);
1582     if (!C)
1583       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1584     if (!C)
1585       return false; // This argument doesn't map to a constant.
1586 
1587     ConstantArgs.push_back(C);
1588   }
1589   if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1590     SimplifiedValues[&Call] = C;
1591     return true;
1592   }
1593 
1594   return false;
1595 }
1596 
1597 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1598   if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1599       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1600     // This aborts the entire analysis.
1601     ExposesReturnsTwice = true;
1602     return false;
1603   }
1604   if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1605     ContainsNoDuplicateCall = true;
1606 
1607   Value *Callee = Call.getCalledOperand();
1608   Function *F = dyn_cast_or_null<Function>(Callee);
1609   bool IsIndirectCall = !F;
1610   if (IsIndirectCall) {
1611     // Check if this happens to be an indirect function call to a known function
1612     // in this inline context. If not, we've done all we can.
1613     F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1614     if (!F) {
1615       onCallArgumentSetup(Call);
1616 
1617       if (!Call.onlyReadsMemory())
1618         disableLoadElimination();
1619       return Base::visitCallBase(Call);
1620     }
1621   }
1622 
1623   assert(F && "Expected a call to a known function");
1624 
1625   // When we have a concrete function, first try to simplify it directly.
1626   if (simplifyCallSite(F, Call))
1627     return true;
1628 
1629   // Next check if it is an intrinsic we know about.
1630   // FIXME: Lift this into part of the InstVisitor.
1631   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1632     switch (II->getIntrinsicID()) {
1633     default:
1634       if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1635         disableLoadElimination();
1636       return Base::visitCallBase(Call);
1637 
1638     case Intrinsic::load_relative:
1639       onLoadRelativeIntrinsic();
1640       return false;
1641 
1642     case Intrinsic::memset:
1643     case Intrinsic::memcpy:
1644     case Intrinsic::memmove:
1645       disableLoadElimination();
1646       // SROA can usually chew through these intrinsics, but they aren't free.
1647       return false;
1648     case Intrinsic::icall_branch_funnel:
1649     case Intrinsic::localescape:
1650       HasUninlineableIntrinsic = true;
1651       return false;
1652     case Intrinsic::vastart:
1653       InitsVargArgs = true;
1654       return false;
1655     }
1656   }
1657 
1658   if (F == Call.getFunction()) {
1659     // This flag will fully abort the analysis, so don't bother with anything
1660     // else.
1661     IsRecursiveCall = true;
1662     return false;
1663   }
1664 
1665   if (TTI.isLoweredToCall(F)) {
1666     onLoweredCall(F, Call, IsIndirectCall);
1667   }
1668 
1669   if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1670     disableLoadElimination();
1671   return Base::visitCallBase(Call);
1672 }
1673 
1674 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1675   // At least one return instruction will be free after inlining.
1676   bool Free = !HasReturn;
1677   HasReturn = true;
1678   return Free;
1679 }
1680 
1681 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1682   // We model unconditional branches as essentially free -- they really
1683   // shouldn't exist at all, but handling them makes the behavior of the
1684   // inliner more regular and predictable. Interestingly, conditional branches
1685   // which will fold away are also free.
1686   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1687          dyn_cast_or_null<ConstantInt>(
1688              SimplifiedValues.lookup(BI.getCondition()));
1689 }
1690 
1691 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1692   bool CheckSROA = SI.getType()->isPointerTy();
1693   Value *TrueVal = SI.getTrueValue();
1694   Value *FalseVal = SI.getFalseValue();
1695 
1696   Constant *TrueC = dyn_cast<Constant>(TrueVal);
1697   if (!TrueC)
1698     TrueC = SimplifiedValues.lookup(TrueVal);
1699   Constant *FalseC = dyn_cast<Constant>(FalseVal);
1700   if (!FalseC)
1701     FalseC = SimplifiedValues.lookup(FalseVal);
1702   Constant *CondC =
1703       dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1704 
1705   if (!CondC) {
1706     // Select C, X, X => X
1707     if (TrueC == FalseC && TrueC) {
1708       SimplifiedValues[&SI] = TrueC;
1709       return true;
1710     }
1711 
1712     if (!CheckSROA)
1713       return Base::visitSelectInst(SI);
1714 
1715     std::pair<Value *, APInt> TrueBaseAndOffset =
1716         ConstantOffsetPtrs.lookup(TrueVal);
1717     std::pair<Value *, APInt> FalseBaseAndOffset =
1718         ConstantOffsetPtrs.lookup(FalseVal);
1719     if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1720       ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1721 
1722       if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1723         SROAArgValues[&SI] = SROAArg;
1724       return true;
1725     }
1726 
1727     return Base::visitSelectInst(SI);
1728   }
1729 
1730   // Select condition is a constant.
1731   Value *SelectedV = CondC->isAllOnesValue()
1732                          ? TrueVal
1733                          : (CondC->isNullValue()) ? FalseVal : nullptr;
1734   if (!SelectedV) {
1735     // Condition is a vector constant that is not all 1s or all 0s.  If all
1736     // operands are constants, ConstantExpr::getSelect() can handle the cases
1737     // such as select vectors.
1738     if (TrueC && FalseC) {
1739       if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1740         SimplifiedValues[&SI] = C;
1741         return true;
1742       }
1743     }
1744     return Base::visitSelectInst(SI);
1745   }
1746 
1747   // Condition is either all 1s or all 0s. SI can be simplified.
1748   if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1749     SimplifiedValues[&SI] = SelectedC;
1750     return true;
1751   }
1752 
1753   if (!CheckSROA)
1754     return true;
1755 
1756   std::pair<Value *, APInt> BaseAndOffset =
1757       ConstantOffsetPtrs.lookup(SelectedV);
1758   if (BaseAndOffset.first) {
1759     ConstantOffsetPtrs[&SI] = BaseAndOffset;
1760 
1761     if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1762       SROAArgValues[&SI] = SROAArg;
1763   }
1764 
1765   return true;
1766 }
1767 
1768 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1769   // We model unconditional switches as free, see the comments on handling
1770   // branches.
1771   if (isa<ConstantInt>(SI.getCondition()))
1772     return true;
1773   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1774     if (isa<ConstantInt>(V))
1775       return true;
1776 
1777   // Assume the most general case where the switch is lowered into
1778   // either a jump table, bit test, or a balanced binary tree consisting of
1779   // case clusters without merging adjacent clusters with the same
1780   // destination. We do not consider the switches that are lowered with a mix
1781   // of jump table/bit test/binary search tree. The cost of the switch is
1782   // proportional to the size of the tree or the size of jump table range.
1783   //
1784   // NB: We convert large switches which are just used to initialize large phi
1785   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1786   // inlining those. It will prevent inlining in cases where the optimization
1787   // does not (yet) fire.
1788 
1789   unsigned JumpTableSize = 0;
1790   BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
1791   unsigned NumCaseCluster =
1792       TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
1793 
1794   onFinalizeSwitch(JumpTableSize, NumCaseCluster);
1795   return false;
1796 }
1797 
1798 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1799   // We never want to inline functions that contain an indirectbr.  This is
1800   // incorrect because all the blockaddress's (in static global initializers
1801   // for example) would be referring to the original function, and this
1802   // indirect jump would jump from the inlined copy of the function into the
1803   // original function which is extremely undefined behavior.
1804   // FIXME: This logic isn't really right; we can safely inline functions with
1805   // indirectbr's as long as no other function or global references the
1806   // blockaddress of a block within the current function.
1807   HasIndirectBr = true;
1808   return false;
1809 }
1810 
1811 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1812   // FIXME: It's not clear that a single instruction is an accurate model for
1813   // the inline cost of a resume instruction.
1814   return false;
1815 }
1816 
1817 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1818   // FIXME: It's not clear that a single instruction is an accurate model for
1819   // the inline cost of a cleanupret instruction.
1820   return false;
1821 }
1822 
1823 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1824   // FIXME: It's not clear that a single instruction is an accurate model for
1825   // the inline cost of a catchret instruction.
1826   return false;
1827 }
1828 
1829 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1830   // FIXME: It might be reasonably to discount the cost of instructions leading
1831   // to unreachable as they have the lowest possible impact on both runtime and
1832   // code size.
1833   return true; // No actual code is needed for unreachable.
1834 }
1835 
1836 bool CallAnalyzer::visitInstruction(Instruction &I) {
1837   // Some instructions are free. All of the free intrinsics can also be
1838   // handled by SROA, etc.
1839   if (TargetTransformInfo::TCC_Free ==
1840       TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
1841     return true;
1842 
1843   // We found something we don't understand or can't handle. Mark any SROA-able
1844   // values in the operand list as no longer viable.
1845   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
1846     disableSROA(*OI);
1847 
1848   return false;
1849 }
1850 
1851 /// Analyze a basic block for its contribution to the inline cost.
1852 ///
1853 /// This method walks the analyzer over every instruction in the given basic
1854 /// block and accounts for their cost during inlining at this callsite. It
1855 /// aborts early if the threshold has been exceeded or an impossible to inline
1856 /// construct has been detected. It returns false if inlining is no longer
1857 /// viable, and true if inlining remains viable.
1858 InlineResult
1859 CallAnalyzer::analyzeBlock(BasicBlock *BB,
1860                            SmallPtrSetImpl<const Value *> &EphValues) {
1861   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1862     // FIXME: Currently, the number of instructions in a function regardless of
1863     // our ability to simplify them during inline to constants or dead code,
1864     // are actually used by the vector bonus heuristic. As long as that's true,
1865     // we have to special case debug intrinsics here to prevent differences in
1866     // inlining due to debug symbols. Eventually, the number of unsimplified
1867     // instructions shouldn't factor into the cost computation, but until then,
1868     // hack around it here.
1869     if (isa<DbgInfoIntrinsic>(I))
1870       continue;
1871 
1872     // Skip ephemeral values.
1873     if (EphValues.count(&*I))
1874       continue;
1875 
1876     ++NumInstructions;
1877     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
1878       ++NumVectorInstructions;
1879 
1880     // If the instruction simplified to a constant, there is no cost to this
1881     // instruction. Visit the instructions using our InstVisitor to account for
1882     // all of the per-instruction logic. The visit tree returns true if we
1883     // consumed the instruction in any way, and false if the instruction's base
1884     // cost should count against inlining.
1885     onInstructionAnalysisStart(&*I);
1886 
1887     if (Base::visit(&*I))
1888       ++NumInstructionsSimplified;
1889     else
1890       onMissedSimplification();
1891 
1892     onInstructionAnalysisFinish(&*I);
1893     using namespace ore;
1894     // If the visit this instruction detected an uninlinable pattern, abort.
1895     InlineResult IR = InlineResult::success();
1896     if (IsRecursiveCall)
1897       IR = InlineResult::failure("recursive");
1898     else if (ExposesReturnsTwice)
1899       IR = InlineResult::failure("exposes returns twice");
1900     else if (HasDynamicAlloca)
1901       IR = InlineResult::failure("dynamic alloca");
1902     else if (HasIndirectBr)
1903       IR = InlineResult::failure("indirect branch");
1904     else if (HasUninlineableIntrinsic)
1905       IR = InlineResult::failure("uninlinable intrinsic");
1906     else if (InitsVargArgs)
1907       IR = InlineResult::failure("varargs");
1908     if (!IR.isSuccess()) {
1909       if (ORE)
1910         ORE->emit([&]() {
1911           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1912                                           &CandidateCall)
1913                  << NV("Callee", &F) << " has uninlinable pattern ("
1914                  << NV("InlineResult", IR.getFailureReason())
1915                  << ") and cost is not fully computed";
1916         });
1917       return IR;
1918     }
1919 
1920     // If the caller is a recursive function then we don't want to inline
1921     // functions which allocate a lot of stack space because it would increase
1922     // the caller stack usage dramatically.
1923     if (IsCallerRecursive &&
1924         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
1925       auto IR =
1926           InlineResult::failure("recursive and allocates too much stack space");
1927       if (ORE)
1928         ORE->emit([&]() {
1929           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1930                                           &CandidateCall)
1931                  << NV("Callee", &F) << " is "
1932                  << NV("InlineResult", IR.getFailureReason())
1933                  << ". Cost is not fully computed";
1934         });
1935       return IR;
1936     }
1937 
1938     if (shouldStop())
1939       return InlineResult::failure(
1940           "Call site analysis is not favorable to inlining.");
1941   }
1942 
1943   return InlineResult::success();
1944 }
1945 
1946 /// Compute the base pointer and cumulative constant offsets for V.
1947 ///
1948 /// This strips all constant offsets off of V, leaving it the base pointer, and
1949 /// accumulates the total constant offset applied in the returned constant. It
1950 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1951 /// no constant offsets applied.
1952 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1953   if (!V->getType()->isPointerTy())
1954     return nullptr;
1955 
1956   unsigned AS = V->getType()->getPointerAddressSpace();
1957   unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
1958   APInt Offset = APInt::getNullValue(IntPtrWidth);
1959 
1960   // Even though we don't look through PHI nodes, we could be called on an
1961   // instruction in an unreachable block, which may be on a cycle.
1962   SmallPtrSet<Value *, 4> Visited;
1963   Visited.insert(V);
1964   do {
1965     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1966       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
1967         return nullptr;
1968       V = GEP->getPointerOperand();
1969     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
1970       V = cast<Operator>(V)->getOperand(0);
1971     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1972       if (GA->isInterposable())
1973         break;
1974       V = GA->getAliasee();
1975     } else {
1976       break;
1977     }
1978     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1979   } while (Visited.insert(V).second);
1980 
1981   Type *IdxPtrTy = DL.getIndexType(V->getType());
1982   return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
1983 }
1984 
1985 /// Find dead blocks due to deleted CFG edges during inlining.
1986 ///
1987 /// If we know the successor of the current block, \p CurrBB, has to be \p
1988 /// NextBB, the other successors of \p CurrBB are dead if these successors have
1989 /// no live incoming CFG edges.  If one block is found to be dead, we can
1990 /// continue growing the dead block list by checking the successors of the dead
1991 /// blocks to see if all their incoming edges are dead or not.
1992 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
1993   auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
1994     // A CFG edge is dead if the predecessor is dead or the predecessor has a
1995     // known successor which is not the one under exam.
1996     return (DeadBlocks.count(Pred) ||
1997             (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
1998   };
1999 
2000   auto IsNewlyDead = [&](BasicBlock *BB) {
2001     // If all the edges to a block are dead, the block is also dead.
2002     return (!DeadBlocks.count(BB) &&
2003             llvm::all_of(predecessors(BB),
2004                          [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
2005   };
2006 
2007   for (BasicBlock *Succ : successors(CurrBB)) {
2008     if (Succ == NextBB || !IsNewlyDead(Succ))
2009       continue;
2010     SmallVector<BasicBlock *, 4> NewDead;
2011     NewDead.push_back(Succ);
2012     while (!NewDead.empty()) {
2013       BasicBlock *Dead = NewDead.pop_back_val();
2014       if (DeadBlocks.insert(Dead))
2015         // Continue growing the dead block lists.
2016         for (BasicBlock *S : successors(Dead))
2017           if (IsNewlyDead(S))
2018             NewDead.push_back(S);
2019     }
2020   }
2021 }
2022 
2023 /// Analyze a call site for potential inlining.
2024 ///
2025 /// Returns true if inlining this call is viable, and false if it is not
2026 /// viable. It computes the cost and adjusts the threshold based on numerous
2027 /// factors and heuristics. If this method returns false but the computed cost
2028 /// is below the computed threshold, then inlining was forcibly disabled by
2029 /// some artifact of the routine.
2030 InlineResult CallAnalyzer::analyze() {
2031   ++NumCallsAnalyzed;
2032 
2033   auto Result = onAnalysisStart();
2034   if (!Result.isSuccess())
2035     return Result;
2036 
2037   if (F.empty())
2038     return InlineResult::success();
2039 
2040   Function *Caller = CandidateCall.getFunction();
2041   // Check if the caller function is recursive itself.
2042   for (User *U : Caller->users()) {
2043     CallBase *Call = dyn_cast<CallBase>(U);
2044     if (Call && Call->getFunction() == Caller) {
2045       IsCallerRecursive = true;
2046       break;
2047     }
2048   }
2049 
2050   // Populate our simplified values by mapping from function arguments to call
2051   // arguments with known important simplifications.
2052   auto CAI = CandidateCall.arg_begin();
2053   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
2054        FAI != FAE; ++FAI, ++CAI) {
2055     assert(CAI != CandidateCall.arg_end());
2056     if (Constant *C = dyn_cast<Constant>(CAI))
2057       SimplifiedValues[&*FAI] = C;
2058 
2059     Value *PtrArg = *CAI;
2060     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2061       ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
2062 
2063       // We can SROA any pointer arguments derived from alloca instructions.
2064       if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2065         SROAArgValues[&*FAI] = SROAArg;
2066         onInitializeSROAArg(SROAArg);
2067         EnabledSROAAllocas.insert(SROAArg);
2068       }
2069     }
2070   }
2071   NumConstantArgs = SimplifiedValues.size();
2072   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2073   NumAllocaArgs = SROAArgValues.size();
2074 
2075   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2076   // the ephemeral values multiple times (and they're completely determined by
2077   // the callee, so this is purely duplicate work).
2078   SmallPtrSet<const Value *, 32> EphValues;
2079   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2080 
2081   // The worklist of live basic blocks in the callee *after* inlining. We avoid
2082   // adding basic blocks of the callee which can be proven to be dead for this
2083   // particular call site in order to get more accurate cost estimates. This
2084   // requires a somewhat heavyweight iteration pattern: we need to walk the
2085   // basic blocks in a breadth-first order as we insert live successors. To
2086   // accomplish this, prioritizing for small iterations because we exit after
2087   // crossing our threshold, we use a small-size optimized SetVector.
2088   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2089                     SmallPtrSet<BasicBlock *, 16>>
2090       BBSetVector;
2091   BBSetVector BBWorklist;
2092   BBWorklist.insert(&F.getEntryBlock());
2093 
2094   // Note that we *must not* cache the size, this loop grows the worklist.
2095   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2096     if (shouldStop())
2097       break;
2098 
2099     BasicBlock *BB = BBWorklist[Idx];
2100     if (BB->empty())
2101       continue;
2102 
2103     // Disallow inlining a blockaddress with uses other than strictly callbr.
2104     // A blockaddress only has defined behavior for an indirect branch in the
2105     // same function, and we do not currently support inlining indirect
2106     // branches.  But, the inliner may not see an indirect branch that ends up
2107     // being dead code at a particular call site. If the blockaddress escapes
2108     // the function, e.g., via a global variable, inlining may lead to an
2109     // invalid cross-function reference.
2110     // FIXME: pr/39560: continue relaxing this overt restriction.
2111     if (BB->hasAddressTaken())
2112       for (User *U : BlockAddress::get(&*BB)->users())
2113         if (!isa<CallBrInst>(*U))
2114           return InlineResult::failure("blockaddress used outside of callbr");
2115 
2116     // Analyze the cost of this block. If we blow through the threshold, this
2117     // returns false, and we can bail on out.
2118     InlineResult IR = analyzeBlock(BB, EphValues);
2119     if (!IR.isSuccess())
2120       return IR;
2121 
2122     Instruction *TI = BB->getTerminator();
2123 
2124     // Add in the live successors by first checking whether we have terminator
2125     // that may be simplified based on the values simplified by this call.
2126     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2127       if (BI->isConditional()) {
2128         Value *Cond = BI->getCondition();
2129         if (ConstantInt *SimpleCond =
2130                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2131           BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2132           BBWorklist.insert(NextBB);
2133           KnownSuccessors[BB] = NextBB;
2134           findDeadBlocks(BB, NextBB);
2135           continue;
2136         }
2137       }
2138     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2139       Value *Cond = SI->getCondition();
2140       if (ConstantInt *SimpleCond =
2141               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2142         BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2143         BBWorklist.insert(NextBB);
2144         KnownSuccessors[BB] = NextBB;
2145         findDeadBlocks(BB, NextBB);
2146         continue;
2147       }
2148     }
2149 
2150     // If we're unable to select a particular successor, just count all of
2151     // them.
2152     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2153          ++TIdx)
2154       BBWorklist.insert(TI->getSuccessor(TIdx));
2155 
2156     onBlockAnalyzed(BB);
2157   }
2158 
2159   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2160                                     &F == CandidateCall.getCalledFunction();
2161   // If this is a noduplicate call, we can still inline as long as
2162   // inlining this would cause the removal of the caller (so the instruction
2163   // is not actually duplicated, just moved).
2164   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2165     return InlineResult::failure("noduplicate");
2166 
2167   return finalizeAnalysis();
2168 }
2169 
2170 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2171 /// Dump stats about this call's analysis.
2172 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2173 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
2174   if (PrintInstructionComments)
2175     F.print(dbgs(), &Writer);
2176   DEBUG_PRINT_STAT(NumConstantArgs);
2177   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2178   DEBUG_PRINT_STAT(NumAllocaArgs);
2179   DEBUG_PRINT_STAT(NumConstantPtrCmps);
2180   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2181   DEBUG_PRINT_STAT(NumInstructionsSimplified);
2182   DEBUG_PRINT_STAT(NumInstructions);
2183   DEBUG_PRINT_STAT(SROACostSavings);
2184   DEBUG_PRINT_STAT(SROACostSavingsLost);
2185   DEBUG_PRINT_STAT(LoadEliminationCost);
2186   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2187   DEBUG_PRINT_STAT(Cost);
2188   DEBUG_PRINT_STAT(Threshold);
2189 #undef DEBUG_PRINT_STAT
2190 }
2191 #endif
2192 
2193 /// Test that there are no attribute conflicts between Caller and Callee
2194 ///        that prevent inlining.
2195 static bool functionsHaveCompatibleAttributes(
2196     Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2197     function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2198   // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2199   // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2200   // object, and always returns the same object (which is overwritten on each
2201   // GetTLI call). Therefore we copy the first result.
2202   auto CalleeTLI = GetTLI(*Callee);
2203   return TTI.areInlineCompatible(Caller, Callee) &&
2204          GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2205                                              InlineCallerSupersetNoBuiltin) &&
2206          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2207 }
2208 
2209 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2210   int Cost = 0;
2211   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2212     if (Call.isByValArgument(I)) {
2213       // We approximate the number of loads and stores needed by dividing the
2214       // size of the byval type by the target's pointer size.
2215       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2216       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2217       unsigned AS = PTy->getAddressSpace();
2218       unsigned PointerSize = DL.getPointerSizeInBits(AS);
2219       // Ceiling division.
2220       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2221 
2222       // If it generates more than 8 stores it is likely to be expanded as an
2223       // inline memcpy so we take that as an upper bound. Otherwise we assume
2224       // one load and one store per word copied.
2225       // FIXME: The maxStoresPerMemcpy setting from the target should be used
2226       // here instead of a magic number of 8, but it's not available via
2227       // DataLayout.
2228       NumStores = std::min(NumStores, 8U);
2229 
2230       Cost += 2 * NumStores * InlineConstants::InstrCost;
2231     } else {
2232       // For non-byval arguments subtract off one instruction per call
2233       // argument.
2234       Cost += InlineConstants::InstrCost;
2235     }
2236   }
2237   // The call instruction also disappears after inlining.
2238   Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2239   return Cost;
2240 }
2241 
2242 InlineCost llvm::getInlineCost(
2243     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2244     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2245     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2246     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2247     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2248   return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2249                        GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2250 }
2251 
2252 Optional<int> llvm::getInliningCostEstimate(
2253     CallBase &Call, TargetTransformInfo &CalleeTTI,
2254     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2255     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2256     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2257   const InlineParams Params = {/* DefaultThreshold*/ 0,
2258                                /*HintThreshold*/ {},
2259                                /*ColdThreshold*/ {},
2260                                /*OptSizeThreshold*/ {},
2261                                /*OptMinSizeThreshold*/ {},
2262                                /*HotCallSiteThreshold*/ {},
2263                                /*LocallyHotCallSiteThreshold*/ {},
2264                                /*ColdCallSiteThreshold*/ {},
2265                                /*ComputeFullInlineCost*/ true,
2266                                /*EnableDeferral*/ true};
2267 
2268   InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2269                             GetAssumptionCache, GetBFI, PSI, ORE, true,
2270                             /*IgnoreThreshold*/ true);
2271   auto R = CA.analyze();
2272   if (!R.isSuccess())
2273     return None;
2274   return CA.getCost();
2275 }
2276 
2277 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2278     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2279     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2280 
2281   // Cannot inline indirect calls.
2282   if (!Callee)
2283     return InlineResult::failure("indirect call");
2284 
2285   // Never inline calls with byval arguments that does not have the alloca
2286   // address space. Since byval arguments can be replaced with a copy to an
2287   // alloca, the inlined code would need to be adjusted to handle that the
2288   // argument is in the alloca address space (so it is a little bit complicated
2289   // to solve).
2290   unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2291   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2292     if (Call.isByValArgument(I)) {
2293       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2294       if (PTy->getAddressSpace() != AllocaAS)
2295         return InlineResult::failure("byval arguments without alloca"
2296                                      " address space");
2297     }
2298 
2299   // Calls to functions with always-inline attributes should be inlined
2300   // whenever possible.
2301   if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2302     auto IsViable = isInlineViable(*Callee);
2303     if (IsViable.isSuccess())
2304       return InlineResult::success();
2305     return InlineResult::failure(IsViable.getFailureReason());
2306   }
2307 
2308   // Never inline functions with conflicting attributes (unless callee has
2309   // always-inline attribute).
2310   Function *Caller = Call.getCaller();
2311   if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2312     return InlineResult::failure("conflicting attributes");
2313 
2314   // Don't inline this call if the caller has the optnone attribute.
2315   if (Caller->hasOptNone())
2316     return InlineResult::failure("optnone attribute");
2317 
2318   // Don't inline a function that treats null pointer as valid into a caller
2319   // that does not have this attribute.
2320   if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2321     return InlineResult::failure("nullptr definitions incompatible");
2322 
2323   // Don't inline functions which can be interposed at link-time.
2324   if (Callee->isInterposable())
2325     return InlineResult::failure("interposable");
2326 
2327   // Don't inline functions marked noinline.
2328   if (Callee->hasFnAttribute(Attribute::NoInline))
2329     return InlineResult::failure("noinline function attribute");
2330 
2331   // Don't inline call sites marked noinline.
2332   if (Call.isNoInline())
2333     return InlineResult::failure("noinline call site attribute");
2334 
2335   return None;
2336 }
2337 
2338 InlineCost llvm::getInlineCost(
2339     CallBase &Call, Function *Callee, const InlineParams &Params,
2340     TargetTransformInfo &CalleeTTI,
2341     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2342     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2343     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2344     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2345 
2346   auto UserDecision =
2347       llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2348 
2349   if (UserDecision.hasValue()) {
2350     if (UserDecision->isSuccess())
2351       return llvm::InlineCost::getAlways("always inline attribute");
2352     return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2353   }
2354 
2355   LLVM_DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
2356                           << "... (caller:" << Call.getCaller()->getName()
2357                           << ")\n");
2358 
2359   InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2360                             GetAssumptionCache, GetBFI, PSI, ORE);
2361   InlineResult ShouldInline = CA.analyze();
2362 
2363   LLVM_DEBUG(CA.dump());
2364 
2365   // Check if there was a reason to force inlining or no inlining.
2366   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2367     return InlineCost::getNever(ShouldInline.getFailureReason());
2368   if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2369     return InlineCost::getAlways("empty function");
2370 
2371   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2372 }
2373 
2374 InlineResult llvm::isInlineViable(Function &F) {
2375   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2376   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
2377     // Disallow inlining of functions which contain indirect branches.
2378     if (isa<IndirectBrInst>(BI->getTerminator()))
2379       return InlineResult::failure("contains indirect branches");
2380 
2381     // Disallow inlining of blockaddresses which are used by non-callbr
2382     // instructions.
2383     if (BI->hasAddressTaken())
2384       for (User *U : BlockAddress::get(&*BI)->users())
2385         if (!isa<CallBrInst>(*U))
2386           return InlineResult::failure("blockaddress used outside of callbr");
2387 
2388     for (auto &II : *BI) {
2389       CallBase *Call = dyn_cast<CallBase>(&II);
2390       if (!Call)
2391         continue;
2392 
2393       // Disallow recursive calls.
2394       if (&F == Call->getCalledFunction())
2395         return InlineResult::failure("recursive call");
2396 
2397       // Disallow calls which expose returns-twice to a function not previously
2398       // attributed as such.
2399       if (!ReturnsTwice && isa<CallInst>(Call) &&
2400           cast<CallInst>(Call)->canReturnTwice())
2401         return InlineResult::failure("exposes returns-twice attribute");
2402 
2403       if (Call->getCalledFunction())
2404         switch (Call->getCalledFunction()->getIntrinsicID()) {
2405         default:
2406           break;
2407         case llvm::Intrinsic::icall_branch_funnel:
2408           // Disallow inlining of @llvm.icall.branch.funnel because current
2409           // backend can't separate call targets from call arguments.
2410           return InlineResult::failure(
2411               "disallowed inlining of @llvm.icall.branch.funnel");
2412         case llvm::Intrinsic::localescape:
2413           // Disallow inlining functions that call @llvm.localescape. Doing this
2414           // correctly would require major changes to the inliner.
2415           return InlineResult::failure(
2416               "disallowed inlining of @llvm.localescape");
2417         case llvm::Intrinsic::vastart:
2418           // Disallow inlining of functions that initialize VarArgs with
2419           // va_start.
2420           return InlineResult::failure(
2421               "contains VarArgs initialized with va_start");
2422         }
2423     }
2424   }
2425 
2426   return InlineResult::success();
2427 }
2428 
2429 // APIs to create InlineParams based on command line flags and/or other
2430 // parameters.
2431 
2432 InlineParams llvm::getInlineParams(int Threshold) {
2433   InlineParams Params;
2434 
2435   // This field is the threshold to use for a callee by default. This is
2436   // derived from one or more of:
2437   //  * optimization or size-optimization levels,
2438   //  * a value passed to createFunctionInliningPass function, or
2439   //  * the -inline-threshold flag.
2440   //  If the -inline-threshold flag is explicitly specified, that is used
2441   //  irrespective of anything else.
2442   if (InlineThreshold.getNumOccurrences() > 0)
2443     Params.DefaultThreshold = InlineThreshold;
2444   else
2445     Params.DefaultThreshold = Threshold;
2446 
2447   // Set the HintThreshold knob from the -inlinehint-threshold.
2448   Params.HintThreshold = HintThreshold;
2449 
2450   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2451   Params.HotCallSiteThreshold = HotCallSiteThreshold;
2452 
2453   // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2454   // populate LocallyHotCallSiteThreshold. Later, we populate
2455   // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2456   // we know that optimization level is O3 (in the getInlineParams variant that
2457   // takes the opt and size levels).
2458   // FIXME: Remove this check (and make the assignment unconditional) after
2459   // addressing size regression issues at O2.
2460   if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2461     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2462 
2463   // Set the ColdCallSiteThreshold knob from the
2464   // -inline-cold-callsite-threshold.
2465   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2466 
2467   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2468   // -inlinehint-threshold commandline option is not explicitly given. If that
2469   // option is present, then its value applies even for callees with size and
2470   // minsize attributes.
2471   // If the -inline-threshold is not specified, set the ColdThreshold from the
2472   // -inlinecold-threshold even if it is not explicitly passed. If
2473   // -inline-threshold is specified, then -inlinecold-threshold needs to be
2474   // explicitly specified to set the ColdThreshold knob
2475   if (InlineThreshold.getNumOccurrences() == 0) {
2476     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2477     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2478     Params.ColdThreshold = ColdThreshold;
2479   } else if (ColdThreshold.getNumOccurrences() > 0) {
2480     Params.ColdThreshold = ColdThreshold;
2481   }
2482   return Params;
2483 }
2484 
2485 InlineParams llvm::getInlineParams() {
2486   return getInlineParams(DefaultThreshold);
2487 }
2488 
2489 // Compute the default threshold for inlining based on the opt level and the
2490 // size opt level.
2491 static int computeThresholdFromOptLevels(unsigned OptLevel,
2492                                          unsigned SizeOptLevel) {
2493   if (OptLevel > 2)
2494     return InlineConstants::OptAggressiveThreshold;
2495   if (SizeOptLevel == 1) // -Os
2496     return InlineConstants::OptSizeThreshold;
2497   if (SizeOptLevel == 2) // -Oz
2498     return InlineConstants::OptMinSizeThreshold;
2499   return DefaultThreshold;
2500 }
2501 
2502 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2503   auto Params =
2504       getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2505   // At O3, use the value of -locally-hot-callsite-threshold option to populate
2506   // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2507   // when it is specified explicitly.
2508   if (OptLevel > 2)
2509     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2510   return Params;
2511 }
2512