1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "inline-cost"
49 
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51 
52 static cl::opt<int>
53     DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54                      cl::ZeroOrMore,
55                      cl::desc("Default amount of inlining to perform"));
56 
57 static cl::opt<bool> PrintInstructionComments(
58     "print-instruction-comments", cl::Hidden, cl::init(false),
59     cl::desc("Prints comments for instruction based on inline cost analysis"));
60 
61 static cl::opt<int> InlineThreshold(
62     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63     cl::desc("Control the amount of inlining to perform (default = 225)"));
64 
65 static cl::opt<int> HintThreshold(
66     "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67     cl::desc("Threshold for inlining functions with inline hint"));
68 
69 static cl::opt<int>
70     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71                           cl::init(45), cl::ZeroOrMore,
72                           cl::desc("Threshold for inlining cold callsites"));
73 
74 // We introduce this threshold to help performance of instrumentation based
75 // PGO before we actually hook up inliner with analysis passes such as BPI and
76 // BFI.
77 static cl::opt<int> ColdThreshold(
78     "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
79     cl::desc("Threshold for inlining functions with cold attribute"));
80 
81 static cl::opt<int>
82     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
83                          cl::ZeroOrMore,
84                          cl::desc("Threshold for hot callsites "));
85 
86 static cl::opt<int> LocallyHotCallSiteThreshold(
87     "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
88     cl::desc("Threshold for locally hot callsites "));
89 
90 static cl::opt<int> ColdCallSiteRelFreq(
91     "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
92     cl::desc("Maximum block frequency, expressed as a percentage of caller's "
93              "entry frequency, for a callsite to be cold in the absence of "
94              "profile information."));
95 
96 static cl::opt<int> HotCallSiteRelFreq(
97     "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
98     cl::desc("Minimum block frequency, expressed as a multiple of caller's "
99              "entry frequency, for a callsite to be hot in the absence of "
100              "profile information."));
101 
102 static cl::opt<bool> OptComputeFullInlineCost(
103     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
104     cl::desc("Compute the full inline cost of a call site even when the cost "
105              "exceeds the threshold."));
106 
107 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
108     "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
109     cl::ZeroOrMore,
110     cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
111              "attributes."));
112 
113 namespace {
114 class InlineCostCallAnalyzer;
115 
116 // This struct is used to store information about inline cost of a
117 // particular instruction
118 struct InstructionCostDetail {
119   int CostBefore = 0;
120   int CostAfter = 0;
121   int ThresholdBefore = 0;
122   int ThresholdAfter = 0;
123 
124   int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
125 
126   int getCostDelta() const { return CostAfter - CostBefore; }
127 
128   bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
129 };
130 
131 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
132 private:
133   InlineCostCallAnalyzer *const ICCA;
134 
135 public:
136   InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
137   virtual void emitInstructionAnnot(const Instruction *I,
138                                     formatted_raw_ostream &OS) override;
139 };
140 
141 /// Carry out call site analysis, in order to evaluate inlinability.
142 /// NOTE: the type is currently used as implementation detail of functions such
143 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
144 /// expectation is that they come from the outer scope, from the wrapper
145 /// functions. If we want to support constructing CallAnalyzer objects where
146 /// lambdas are provided inline at construction, or where the object needs to
147 /// otherwise survive past the scope of the provided functions, we need to
148 /// revisit the argument types.
149 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
150   typedef InstVisitor<CallAnalyzer, bool> Base;
151   friend class InstVisitor<CallAnalyzer, bool>;
152 
153 protected:
154   virtual ~CallAnalyzer() {}
155   /// The TargetTransformInfo available for this compilation.
156   const TargetTransformInfo &TTI;
157 
158   /// Getter for the cache of @llvm.assume intrinsics.
159   function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
160 
161   /// Getter for BlockFrequencyInfo
162   function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
163 
164   /// Profile summary information.
165   ProfileSummaryInfo *PSI;
166 
167   /// The called function.
168   Function &F;
169 
170   // Cache the DataLayout since we use it a lot.
171   const DataLayout &DL;
172 
173   /// The OptimizationRemarkEmitter available for this compilation.
174   OptimizationRemarkEmitter *ORE;
175 
176   /// The candidate callsite being analyzed. Please do not use this to do
177   /// analysis in the caller function; we want the inline cost query to be
178   /// easily cacheable. Instead, use the cover function paramHasAttr.
179   CallBase &CandidateCall;
180 
181   /// Extension points for handling callsite features.
182   /// Called after a basic block was analyzed.
183   virtual void onBlockAnalyzed(const BasicBlock *BB) {}
184 
185   /// Called before an instruction was analyzed
186   virtual void onInstructionAnalysisStart(const Instruction *I) {}
187 
188   /// Called after an instruction was analyzed
189   virtual void onInstructionAnalysisFinish(const Instruction *I) {}
190 
191   /// Called at the end of the analysis of the callsite. Return the outcome of
192   /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
193   /// the reason it can't.
194   virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
195   /// Called when we're about to start processing a basic block, and every time
196   /// we are done processing an instruction. Return true if there is no point in
197   /// continuing the analysis (e.g. we've determined already the call site is
198   /// too expensive to inline)
199   virtual bool shouldStop() { return false; }
200 
201   /// Called before the analysis of the callee body starts (with callsite
202   /// contexts propagated).  It checks callsite-specific information. Return a
203   /// reason analysis can't continue if that's the case, or 'true' if it may
204   /// continue.
205   virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
206   /// Called if the analysis engine decides SROA cannot be done for the given
207   /// alloca.
208   virtual void onDisableSROA(AllocaInst *Arg) {}
209 
210   /// Called the analysis engine determines load elimination won't happen.
211   virtual void onDisableLoadElimination() {}
212 
213   /// Called to account for a call.
214   virtual void onCallPenalty() {}
215 
216   /// Called to account for the expectation the inlining would result in a load
217   /// elimination.
218   virtual void onLoadEliminationOpportunity() {}
219 
220   /// Called to account for the cost of argument setup for the Call in the
221   /// callee's body (not the callsite currently under analysis).
222   virtual void onCallArgumentSetup(const CallBase &Call) {}
223 
224   /// Called to account for a load relative intrinsic.
225   virtual void onLoadRelativeIntrinsic() {}
226 
227   /// Called to account for a lowered call.
228   virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
229   }
230 
231   /// Account for a jump table of given size. Return false to stop further
232   /// processing the switch instruction
233   virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
234 
235   /// Account for a case cluster of given size. Return false to stop further
236   /// processing of the instruction.
237   virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
238 
239   /// Called at the end of processing a switch instruction, with the given
240   /// number of case clusters.
241   virtual void onFinalizeSwitch(unsigned JumpTableSize,
242                                 unsigned NumCaseCluster) {}
243 
244   /// Called to account for any other instruction not specifically accounted
245   /// for.
246   virtual void onMissedSimplification() {}
247 
248   /// Start accounting potential benefits due to SROA for the given alloca.
249   virtual void onInitializeSROAArg(AllocaInst *Arg) {}
250 
251   /// Account SROA savings for the AllocaInst value.
252   virtual void onAggregateSROAUse(AllocaInst *V) {}
253 
254   bool handleSROA(Value *V, bool DoNotDisable) {
255     // Check for SROA candidates in comparisons.
256     if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
257       if (DoNotDisable) {
258         onAggregateSROAUse(SROAArg);
259         return true;
260       }
261       disableSROAForArg(SROAArg);
262     }
263     return false;
264   }
265 
266   bool IsCallerRecursive = false;
267   bool IsRecursiveCall = false;
268   bool ExposesReturnsTwice = false;
269   bool HasDynamicAlloca = false;
270   bool ContainsNoDuplicateCall = false;
271   bool HasReturn = false;
272   bool HasIndirectBr = false;
273   bool HasUninlineableIntrinsic = false;
274   bool InitsVargArgs = false;
275 
276   /// Number of bytes allocated statically by the callee.
277   uint64_t AllocatedSize = 0;
278   unsigned NumInstructions = 0;
279   unsigned NumVectorInstructions = 0;
280 
281   /// While we walk the potentially-inlined instructions, we build up and
282   /// maintain a mapping of simplified values specific to this callsite. The
283   /// idea is to propagate any special information we have about arguments to
284   /// this call through the inlinable section of the function, and account for
285   /// likely simplifications post-inlining. The most important aspect we track
286   /// is CFG altering simplifications -- when we prove a basic block dead, that
287   /// can cause dramatic shifts in the cost of inlining a function.
288   DenseMap<Value *, Constant *> SimplifiedValues;
289 
290   /// Keep track of the values which map back (through function arguments) to
291   /// allocas on the caller stack which could be simplified through SROA.
292   DenseMap<Value *, AllocaInst *> SROAArgValues;
293 
294   /// Keep track of Allocas for which we believe we may get SROA optimization.
295   DenseSet<AllocaInst *> EnabledSROAAllocas;
296 
297   /// Keep track of values which map to a pointer base and constant offset.
298   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
299 
300   /// Keep track of dead blocks due to the constant arguments.
301   SetVector<BasicBlock *> DeadBlocks;
302 
303   /// The mapping of the blocks to their known unique successors due to the
304   /// constant arguments.
305   DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
306 
307   /// Model the elimination of repeated loads that is expected to happen
308   /// whenever we simplify away the stores that would otherwise cause them to be
309   /// loads.
310   bool EnableLoadElimination;
311   SmallPtrSet<Value *, 16> LoadAddrSet;
312 
313   AllocaInst *getSROAArgForValueOrNull(Value *V) const {
314     auto It = SROAArgValues.find(V);
315     if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
316       return nullptr;
317     return It->second;
318   }
319 
320   // Custom simplification helper routines.
321   bool isAllocaDerivedArg(Value *V);
322   void disableSROAForArg(AllocaInst *SROAArg);
323   void disableSROA(Value *V);
324   void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
325   void disableLoadElimination();
326   bool isGEPFree(GetElementPtrInst &GEP);
327   bool canFoldInboundsGEP(GetElementPtrInst &I);
328   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
329   bool simplifyCallSite(Function *F, CallBase &Call);
330   template <typename Callable>
331   bool simplifyInstruction(Instruction &I, Callable Evaluate);
332   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
333 
334   /// Return true if the given argument to the function being considered for
335   /// inlining has the given attribute set either at the call site or the
336   /// function declaration.  Primarily used to inspect call site specific
337   /// attributes since these can be more precise than the ones on the callee
338   /// itself.
339   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
340 
341   /// Return true if the given value is known non null within the callee if
342   /// inlined through this particular callsite.
343   bool isKnownNonNullInCallee(Value *V);
344 
345   /// Return true if size growth is allowed when inlining the callee at \p Call.
346   bool allowSizeGrowth(CallBase &Call);
347 
348   // Custom analysis routines.
349   InlineResult analyzeBlock(BasicBlock *BB,
350                             SmallPtrSetImpl<const Value *> &EphValues);
351 
352   // Disable several entry points to the visitor so we don't accidentally use
353   // them by declaring but not defining them here.
354   void visit(Module *);
355   void visit(Module &);
356   void visit(Function *);
357   void visit(Function &);
358   void visit(BasicBlock *);
359   void visit(BasicBlock &);
360 
361   // Provide base case for our instruction visit.
362   bool visitInstruction(Instruction &I);
363 
364   // Our visit overrides.
365   bool visitAlloca(AllocaInst &I);
366   bool visitPHI(PHINode &I);
367   bool visitGetElementPtr(GetElementPtrInst &I);
368   bool visitBitCast(BitCastInst &I);
369   bool visitPtrToInt(PtrToIntInst &I);
370   bool visitIntToPtr(IntToPtrInst &I);
371   bool visitCastInst(CastInst &I);
372   bool visitUnaryInstruction(UnaryInstruction &I);
373   bool visitCmpInst(CmpInst &I);
374   bool visitSub(BinaryOperator &I);
375   bool visitBinaryOperator(BinaryOperator &I);
376   bool visitFNeg(UnaryOperator &I);
377   bool visitLoad(LoadInst &I);
378   bool visitStore(StoreInst &I);
379   bool visitExtractValue(ExtractValueInst &I);
380   bool visitInsertValue(InsertValueInst &I);
381   bool visitCallBase(CallBase &Call);
382   bool visitReturnInst(ReturnInst &RI);
383   bool visitBranchInst(BranchInst &BI);
384   bool visitSelectInst(SelectInst &SI);
385   bool visitSwitchInst(SwitchInst &SI);
386   bool visitIndirectBrInst(IndirectBrInst &IBI);
387   bool visitResumeInst(ResumeInst &RI);
388   bool visitCleanupReturnInst(CleanupReturnInst &RI);
389   bool visitCatchReturnInst(CatchReturnInst &RI);
390   bool visitUnreachableInst(UnreachableInst &I);
391 
392 public:
393   CallAnalyzer(
394       Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
395       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
396       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
397       ProfileSummaryInfo *PSI = nullptr,
398       OptimizationRemarkEmitter *ORE = nullptr)
399       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
400         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
401         CandidateCall(Call), EnableLoadElimination(true) {}
402 
403   InlineResult analyze();
404 
405   // Keep a bunch of stats about the cost savings found so we can print them
406   // out when debugging.
407   unsigned NumConstantArgs = 0;
408   unsigned NumConstantOffsetPtrArgs = 0;
409   unsigned NumAllocaArgs = 0;
410   unsigned NumConstantPtrCmps = 0;
411   unsigned NumConstantPtrDiffs = 0;
412   unsigned NumInstructionsSimplified = 0;
413 
414   void dump();
415 };
416 
417 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
418 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
419 class InlineCostCallAnalyzer final : public CallAnalyzer {
420   const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
421   const bool ComputeFullInlineCost;
422   int LoadEliminationCost = 0;
423   /// Bonus to be applied when percentage of vector instructions in callee is
424   /// high (see more details in updateThreshold).
425   int VectorBonus = 0;
426   /// Bonus to be applied when the callee has only one reachable basic block.
427   int SingleBBBonus = 0;
428 
429   /// Tunable parameters that control the analysis.
430   const InlineParams &Params;
431 
432   // This DenseMap stores the delta change in cost and threshold after
433   // accounting for the given instruction. The map is filled only with the
434   // flag PrintInstructionComments on.
435   DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
436 
437   /// Upper bound for the inlining cost. Bonuses are being applied to account
438   /// for speculative "expected profit" of the inlining decision.
439   int Threshold = 0;
440 
441   /// Attempt to evaluate indirect calls to boost its inline cost.
442   const bool BoostIndirectCalls;
443 
444   /// Ignore the threshold when finalizing analysis.
445   const bool IgnoreThreshold;
446 
447   /// Inlining cost measured in abstract units, accounts for all the
448   /// instructions expected to be executed for a given function invocation.
449   /// Instructions that are statically proven to be dead based on call-site
450   /// arguments are not counted here.
451   int Cost = 0;
452 
453   bool SingleBB = true;
454 
455   unsigned SROACostSavings = 0;
456   unsigned SROACostSavingsLost = 0;
457 
458   /// The mapping of caller Alloca values to their accumulated cost savings. If
459   /// we have to disable SROA for one of the allocas, this tells us how much
460   /// cost must be added.
461   DenseMap<AllocaInst *, int> SROAArgCosts;
462 
463   /// Return true if \p Call is a cold callsite.
464   bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
465 
466   /// Update Threshold based on callsite properties such as callee
467   /// attributes and callee hotness for PGO builds. The Callee is explicitly
468   /// passed to support analyzing indirect calls whose target is inferred by
469   /// analysis.
470   void updateThreshold(CallBase &Call, Function &Callee);
471   /// Return a higher threshold if \p Call is a hot callsite.
472   Optional<int> getHotCallSiteThreshold(CallBase &Call,
473                                         BlockFrequencyInfo *CallerBFI);
474 
475   /// Handle a capped 'int' increment for Cost.
476   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
477     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
478     Cost = (int)std::min(UpperBound, Cost + Inc);
479   }
480 
481   void onDisableSROA(AllocaInst *Arg) override {
482     auto CostIt = SROAArgCosts.find(Arg);
483     if (CostIt == SROAArgCosts.end())
484       return;
485     addCost(CostIt->second);
486     SROACostSavings -= CostIt->second;
487     SROACostSavingsLost += CostIt->second;
488     SROAArgCosts.erase(CostIt);
489   }
490 
491   void onDisableLoadElimination() override {
492     addCost(LoadEliminationCost);
493     LoadEliminationCost = 0;
494   }
495   void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
496   void onCallArgumentSetup(const CallBase &Call) override {
497     // Pay the price of the argument setup. We account for the average 1
498     // instruction per call argument setup here.
499     addCost(Call.arg_size() * InlineConstants::InstrCost);
500   }
501   void onLoadRelativeIntrinsic() override {
502     // This is normally lowered to 4 LLVM instructions.
503     addCost(3 * InlineConstants::InstrCost);
504   }
505   void onLoweredCall(Function *F, CallBase &Call,
506                      bool IsIndirectCall) override {
507     // We account for the average 1 instruction per call argument setup here.
508     addCost(Call.arg_size() * InlineConstants::InstrCost);
509 
510     // If we have a constant that we are calling as a function, we can peer
511     // through it and see the function target. This happens not infrequently
512     // during devirtualization and so we want to give it a hefty bonus for
513     // inlining, but cap that bonus in the event that inlining wouldn't pan out.
514     // Pretend to inline the function, with a custom threshold.
515     if (IsIndirectCall && BoostIndirectCalls) {
516       auto IndirectCallParams = Params;
517       IndirectCallParams.DefaultThreshold =
518           InlineConstants::IndirectCallThreshold;
519       /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
520       /// to instantiate the derived class.
521       InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
522                                 GetAssumptionCache, GetBFI, PSI, ORE, false);
523       if (CA.analyze().isSuccess()) {
524         // We were able to inline the indirect call! Subtract the cost from the
525         // threshold to get the bonus we want to apply, but don't go below zero.
526         Cost -= std::max(0, CA.getThreshold() - CA.getCost());
527       }
528     } else
529       // Otherwise simply add the cost for merely making the call.
530       addCost(InlineConstants::CallPenalty);
531   }
532 
533   void onFinalizeSwitch(unsigned JumpTableSize,
534                         unsigned NumCaseCluster) override {
535     // If suitable for a jump table, consider the cost for the table size and
536     // branch to destination.
537     // Maximum valid cost increased in this function.
538     if (JumpTableSize) {
539       int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
540                        4 * InlineConstants::InstrCost;
541 
542       addCost(JTCost, (int64_t)CostUpperBound);
543       return;
544     }
545     // Considering forming a binary search, we should find the number of nodes
546     // which is same as the number of comparisons when lowered. For a given
547     // number of clusters, n, we can define a recursive function, f(n), to find
548     // the number of nodes in the tree. The recursion is :
549     // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
550     // and f(n) = n, when n <= 3.
551     // This will lead a binary tree where the leaf should be either f(2) or f(3)
552     // when n > 3.  So, the number of comparisons from leaves should be n, while
553     // the number of non-leaf should be :
554     //   2^(log2(n) - 1) - 1
555     //   = 2^log2(n) * 2^-1 - 1
556     //   = n / 2 - 1.
557     // Considering comparisons from leaf and non-leaf nodes, we can estimate the
558     // number of comparisons in a simple closed form :
559     //   n + n / 2 - 1 = n * 3 / 2 - 1
560     if (NumCaseCluster <= 3) {
561       // Suppose a comparison includes one compare and one conditional branch.
562       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
563       return;
564     }
565 
566     int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
567     int64_t SwitchCost =
568         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
569 
570     addCost(SwitchCost, (int64_t)CostUpperBound);
571   }
572   void onMissedSimplification() override {
573     addCost(InlineConstants::InstrCost);
574   }
575 
576   void onInitializeSROAArg(AllocaInst *Arg) override {
577     assert(Arg != nullptr &&
578            "Should not initialize SROA costs for null value.");
579     SROAArgCosts[Arg] = 0;
580   }
581 
582   void onAggregateSROAUse(AllocaInst *SROAArg) override {
583     auto CostIt = SROAArgCosts.find(SROAArg);
584     assert(CostIt != SROAArgCosts.end() &&
585            "expected this argument to have a cost");
586     CostIt->second += InlineConstants::InstrCost;
587     SROACostSavings += InlineConstants::InstrCost;
588   }
589 
590   void onBlockAnalyzed(const BasicBlock *BB) override {
591     auto *TI = BB->getTerminator();
592     // If we had any successors at this point, than post-inlining is likely to
593     // have them as well. Note that we assume any basic blocks which existed
594     // due to branches or switches which folded above will also fold after
595     // inlining.
596     if (SingleBB && TI->getNumSuccessors() > 1) {
597       // Take off the bonus we applied to the threshold.
598       Threshold -= SingleBBBonus;
599       SingleBB = false;
600     }
601   }
602 
603   void onInstructionAnalysisStart(const Instruction *I) override {
604     // This function is called to store the initial cost of inlining before
605     // the given instruction was assessed.
606     if (!PrintInstructionComments)
607       return;
608     InstructionCostDetailMap[I].CostBefore = Cost;
609     InstructionCostDetailMap[I].ThresholdBefore = Threshold;
610   }
611 
612   void onInstructionAnalysisFinish(const Instruction *I) override {
613     // This function is called to find new values of cost and threshold after
614     // the instruction has been assessed.
615     if (!PrintInstructionComments)
616       return;
617     InstructionCostDetailMap[I].CostAfter = Cost;
618     InstructionCostDetailMap[I].ThresholdAfter = Threshold;
619   }
620 
621   InlineResult finalizeAnalysis() override {
622     // Loops generally act a lot like calls in that they act like barriers to
623     // movement, require a certain amount of setup, etc. So when optimising for
624     // size, we penalise any call sites that perform loops. We do this after all
625     // other costs here, so will likely only be dealing with relatively small
626     // functions (and hence DT and LI will hopefully be cheap).
627     auto *Caller = CandidateCall.getFunction();
628     if (Caller->hasMinSize()) {
629       DominatorTree DT(F);
630       LoopInfo LI(DT);
631       int NumLoops = 0;
632       for (Loop *L : LI) {
633         // Ignore loops that will not be executed
634         if (DeadBlocks.count(L->getHeader()))
635           continue;
636         NumLoops++;
637       }
638       addCost(NumLoops * InlineConstants::CallPenalty);
639     }
640 
641     // We applied the maximum possible vector bonus at the beginning. Now,
642     // subtract the excess bonus, if any, from the Threshold before
643     // comparing against Cost.
644     if (NumVectorInstructions <= NumInstructions / 10)
645       Threshold -= VectorBonus;
646     else if (NumVectorInstructions <= NumInstructions / 2)
647       Threshold -= VectorBonus / 2;
648 
649     if (IgnoreThreshold || Cost < std::max(1, Threshold))
650       return InlineResult::success();
651     return InlineResult::failure("Cost over threshold.");
652   }
653   bool shouldStop() override {
654     // Bail out the moment we cross the threshold. This means we'll under-count
655     // the cost, but only when undercounting doesn't matter.
656     return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
657   }
658 
659   void onLoadEliminationOpportunity() override {
660     LoadEliminationCost += InlineConstants::InstrCost;
661   }
662 
663   InlineResult onAnalysisStart() override {
664     // Perform some tweaks to the cost and threshold based on the direct
665     // callsite information.
666 
667     // We want to more aggressively inline vector-dense kernels, so up the
668     // threshold, and we'll lower it if the % of vector instructions gets too
669     // low. Note that these bonuses are some what arbitrary and evolved over
670     // time by accident as much as because they are principled bonuses.
671     //
672     // FIXME: It would be nice to remove all such bonuses. At least it would be
673     // nice to base the bonus values on something more scientific.
674     assert(NumInstructions == 0);
675     assert(NumVectorInstructions == 0);
676 
677     // Update the threshold based on callsite properties
678     updateThreshold(CandidateCall, F);
679 
680     // While Threshold depends on commandline options that can take negative
681     // values, we want to enforce the invariant that the computed threshold and
682     // bonuses are non-negative.
683     assert(Threshold >= 0);
684     assert(SingleBBBonus >= 0);
685     assert(VectorBonus >= 0);
686 
687     // Speculatively apply all possible bonuses to Threshold. If cost exceeds
688     // this Threshold any time, and cost cannot decrease, we can stop processing
689     // the rest of the function body.
690     Threshold += (SingleBBBonus + VectorBonus);
691 
692     // Give out bonuses for the callsite, as the instructions setting them up
693     // will be gone after inlining.
694     addCost(-getCallsiteCost(this->CandidateCall, DL));
695 
696     // If this function uses the coldcc calling convention, prefer not to inline
697     // it.
698     if (F.getCallingConv() == CallingConv::Cold)
699       Cost += InlineConstants::ColdccPenalty;
700 
701     // Check if we're done. This can happen due to bonuses and penalties.
702     if (Cost >= Threshold && !ComputeFullInlineCost)
703       return InlineResult::failure("high cost");
704 
705     return InlineResult::success();
706   }
707 
708 public:
709   InlineCostCallAnalyzer(
710       Function &Callee, CallBase &Call, const InlineParams &Params,
711       const TargetTransformInfo &TTI,
712       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
713       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
714       ProfileSummaryInfo *PSI = nullptr,
715       OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
716       bool IgnoreThreshold = false)
717       : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
718         ComputeFullInlineCost(OptComputeFullInlineCost ||
719                               Params.ComputeFullInlineCost || ORE),
720         Params(Params), Threshold(Params.DefaultThreshold),
721         BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
722         Writer(this) {}
723 
724   /// Annotation Writer for instruction details
725   InlineCostAnnotationWriter Writer;
726 
727   void dump();
728 
729   Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
730     if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
731       return InstructionCostDetailMap[I];
732     return None;
733   }
734 
735   virtual ~InlineCostCallAnalyzer() {}
736   int getThreshold() { return Threshold; }
737   int getCost() { return Cost; }
738 };
739 } // namespace
740 
741 /// Test whether the given value is an Alloca-derived function argument.
742 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
743   return SROAArgValues.count(V);
744 }
745 
746 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
747   onDisableSROA(SROAArg);
748   EnabledSROAAllocas.erase(SROAArg);
749   disableLoadElimination();
750 }
751 
752 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
753                                                 formatted_raw_ostream &OS) {
754   // The cost of inlining of the given instruction is printed always.
755   // The threshold delta is printed only when it is non-zero. It happens
756   // when we decided to give a bonus at a particular instruction.
757   Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
758   if (!Record)
759     OS << "; No analysis for the instruction";
760   else {
761     OS << "; cost before = " << Record->CostBefore
762        << ", cost after = " << Record->CostAfter
763        << ", threshold before = " << Record->ThresholdBefore
764        << ", threshold after = " << Record->ThresholdAfter << ", ";
765     OS << "cost delta = " << Record->getCostDelta();
766     if (Record->hasThresholdChanged())
767       OS << ", threshold delta = " << Record->getThresholdDelta();
768   }
769   OS << "\n";
770 }
771 
772 /// If 'V' maps to a SROA candidate, disable SROA for it.
773 void CallAnalyzer::disableSROA(Value *V) {
774   if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
775     disableSROAForArg(SROAArg);
776   }
777 }
778 
779 void CallAnalyzer::disableLoadElimination() {
780   if (EnableLoadElimination) {
781     onDisableLoadElimination();
782     EnableLoadElimination = false;
783   }
784 }
785 
786 /// Accumulate a constant GEP offset into an APInt if possible.
787 ///
788 /// Returns false if unable to compute the offset for any reason. Respects any
789 /// simplified values known during the analysis of this callsite.
790 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
791   unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
792   assert(IntPtrWidth == Offset.getBitWidth());
793 
794   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
795        GTI != GTE; ++GTI) {
796     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
797     if (!OpC)
798       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
799         OpC = dyn_cast<ConstantInt>(SimpleOp);
800     if (!OpC)
801       return false;
802     if (OpC->isZero())
803       continue;
804 
805     // Handle a struct index, which adds its field offset to the pointer.
806     if (StructType *STy = GTI.getStructTypeOrNull()) {
807       unsigned ElementIdx = OpC->getZExtValue();
808       const StructLayout *SL = DL.getStructLayout(STy);
809       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
810       continue;
811     }
812 
813     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
814     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
815   }
816   return true;
817 }
818 
819 /// Use TTI to check whether a GEP is free.
820 ///
821 /// Respects any simplified values known during the analysis of this callsite.
822 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
823   SmallVector<Value *, 4> Operands;
824   Operands.push_back(GEP.getOperand(0));
825   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
826     if (Constant *SimpleOp = SimplifiedValues.lookup(*I))
827       Operands.push_back(SimpleOp);
828     else
829       Operands.push_back(*I);
830   return TargetTransformInfo::TCC_Free ==
831          TTI.getUserCost(&GEP, Operands,
832                          TargetTransformInfo::TCK_SizeAndLatency);
833 }
834 
835 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
836   // Check whether inlining will turn a dynamic alloca into a static
837   // alloca and handle that case.
838   if (I.isArrayAllocation()) {
839     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
840     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
841       Type *Ty = I.getAllocatedType();
842       AllocatedSize = SaturatingMultiplyAdd(
843           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getFixedSize(),
844           AllocatedSize);
845       return Base::visitAlloca(I);
846     }
847   }
848 
849   // Accumulate the allocated size.
850   if (I.isStaticAlloca()) {
851     Type *Ty = I.getAllocatedType();
852     AllocatedSize =
853         SaturatingAdd(DL.getTypeAllocSize(Ty).getFixedSize(), AllocatedSize);
854   }
855 
856   // We will happily inline static alloca instructions.
857   if (I.isStaticAlloca())
858     return Base::visitAlloca(I);
859 
860   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
861   // a variety of reasons, and so we would like to not inline them into
862   // functions which don't currently have a dynamic alloca. This simply
863   // disables inlining altogether in the presence of a dynamic alloca.
864   HasDynamicAlloca = true;
865   return false;
866 }
867 
868 bool CallAnalyzer::visitPHI(PHINode &I) {
869   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
870   // though we don't want to propagate it's bonuses. The idea is to disable
871   // SROA if it *might* be used in an inappropriate manner.
872 
873   // Phi nodes are always zero-cost.
874   // FIXME: Pointer sizes may differ between different address spaces, so do we
875   // need to use correct address space in the call to getPointerSizeInBits here?
876   // Or could we skip the getPointerSizeInBits call completely? As far as I can
877   // see the ZeroOffset is used as a dummy value, so we can probably use any
878   // bit width for the ZeroOffset?
879   APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
880   bool CheckSROA = I.getType()->isPointerTy();
881 
882   // Track the constant or pointer with constant offset we've seen so far.
883   Constant *FirstC = nullptr;
884   std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
885   Value *FirstV = nullptr;
886 
887   for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
888     BasicBlock *Pred = I.getIncomingBlock(i);
889     // If the incoming block is dead, skip the incoming block.
890     if (DeadBlocks.count(Pred))
891       continue;
892     // If the parent block of phi is not the known successor of the incoming
893     // block, skip the incoming block.
894     BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
895     if (KnownSuccessor && KnownSuccessor != I.getParent())
896       continue;
897 
898     Value *V = I.getIncomingValue(i);
899     // If the incoming value is this phi itself, skip the incoming value.
900     if (&I == V)
901       continue;
902 
903     Constant *C = dyn_cast<Constant>(V);
904     if (!C)
905       C = SimplifiedValues.lookup(V);
906 
907     std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
908     if (!C && CheckSROA)
909       BaseAndOffset = ConstantOffsetPtrs.lookup(V);
910 
911     if (!C && !BaseAndOffset.first)
912       // The incoming value is neither a constant nor a pointer with constant
913       // offset, exit early.
914       return true;
915 
916     if (FirstC) {
917       if (FirstC == C)
918         // If we've seen a constant incoming value before and it is the same
919         // constant we see this time, continue checking the next incoming value.
920         continue;
921       // Otherwise early exit because we either see a different constant or saw
922       // a constant before but we have a pointer with constant offset this time.
923       return true;
924     }
925 
926     if (FirstV) {
927       // The same logic as above, but check pointer with constant offset here.
928       if (FirstBaseAndOffset == BaseAndOffset)
929         continue;
930       return true;
931     }
932 
933     if (C) {
934       // This is the 1st time we've seen a constant, record it.
935       FirstC = C;
936       continue;
937     }
938 
939     // The remaining case is that this is the 1st time we've seen a pointer with
940     // constant offset, record it.
941     FirstV = V;
942     FirstBaseAndOffset = BaseAndOffset;
943   }
944 
945   // Check if we can map phi to a constant.
946   if (FirstC) {
947     SimplifiedValues[&I] = FirstC;
948     return true;
949   }
950 
951   // Check if we can map phi to a pointer with constant offset.
952   if (FirstBaseAndOffset.first) {
953     ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
954 
955     if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
956       SROAArgValues[&I] = SROAArg;
957   }
958 
959   return true;
960 }
961 
962 /// Check we can fold GEPs of constant-offset call site argument pointers.
963 /// This requires target data and inbounds GEPs.
964 ///
965 /// \return true if the specified GEP can be folded.
966 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
967   // Check if we have a base + offset for the pointer.
968   std::pair<Value *, APInt> BaseAndOffset =
969       ConstantOffsetPtrs.lookup(I.getPointerOperand());
970   if (!BaseAndOffset.first)
971     return false;
972 
973   // Check if the offset of this GEP is constant, and if so accumulate it
974   // into Offset.
975   if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
976     return false;
977 
978   // Add the result as a new mapping to Base + Offset.
979   ConstantOffsetPtrs[&I] = BaseAndOffset;
980 
981   return true;
982 }
983 
984 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
985   auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
986 
987   // Lambda to check whether a GEP's indices are all constant.
988   auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
989     for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
990       if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
991         return false;
992     return true;
993   };
994 
995   if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
996     if (SROAArg)
997       SROAArgValues[&I] = SROAArg;
998 
999     // Constant GEPs are modeled as free.
1000     return true;
1001   }
1002 
1003   // Variable GEPs will require math and will disable SROA.
1004   if (SROAArg)
1005     disableSROAForArg(SROAArg);
1006   return isGEPFree(I);
1007 }
1008 
1009 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1010 /// \p Evaluate is a callable specific to instruction type that evaluates the
1011 /// instruction when all the operands are constants.
1012 template <typename Callable>
1013 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1014   SmallVector<Constant *, 2> COps;
1015   for (Value *Op : I.operands()) {
1016     Constant *COp = dyn_cast<Constant>(Op);
1017     if (!COp)
1018       COp = SimplifiedValues.lookup(Op);
1019     if (!COp)
1020       return false;
1021     COps.push_back(COp);
1022   }
1023   auto *C = Evaluate(COps);
1024   if (!C)
1025     return false;
1026   SimplifiedValues[&I] = C;
1027   return true;
1028 }
1029 
1030 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1031   // Propagate constants through bitcasts.
1032   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1033         return ConstantExpr::getBitCast(COps[0], I.getType());
1034       }))
1035     return true;
1036 
1037   // Track base/offsets through casts
1038   std::pair<Value *, APInt> BaseAndOffset =
1039       ConstantOffsetPtrs.lookup(I.getOperand(0));
1040   // Casts don't change the offset, just wrap it up.
1041   if (BaseAndOffset.first)
1042     ConstantOffsetPtrs[&I] = BaseAndOffset;
1043 
1044   // Also look for SROA candidates here.
1045   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1046     SROAArgValues[&I] = SROAArg;
1047 
1048   // Bitcasts are always zero cost.
1049   return true;
1050 }
1051 
1052 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1053   // Propagate constants through ptrtoint.
1054   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1055         return ConstantExpr::getPtrToInt(COps[0], I.getType());
1056       }))
1057     return true;
1058 
1059   // Track base/offset pairs when converted to a plain integer provided the
1060   // integer is large enough to represent the pointer.
1061   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1062   unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1063   if (IntegerSize >= DL.getPointerSizeInBits(AS)) {
1064     std::pair<Value *, APInt> BaseAndOffset =
1065         ConstantOffsetPtrs.lookup(I.getOperand(0));
1066     if (BaseAndOffset.first)
1067       ConstantOffsetPtrs[&I] = BaseAndOffset;
1068   }
1069 
1070   // This is really weird. Technically, ptrtoint will disable SROA. However,
1071   // unless that ptrtoint is *used* somewhere in the live basic blocks after
1072   // inlining, it will be nuked, and SROA should proceed. All of the uses which
1073   // would block SROA would also block SROA if applied directly to a pointer,
1074   // and so we can just add the integer in here. The only places where SROA is
1075   // preserved either cannot fire on an integer, or won't in-and-of themselves
1076   // disable SROA (ext) w/o some later use that we would see and disable.
1077   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1078     SROAArgValues[&I] = SROAArg;
1079 
1080   return TargetTransformInfo::TCC_Free ==
1081          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1082 }
1083 
1084 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1085   // Propagate constants through ptrtoint.
1086   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1087         return ConstantExpr::getIntToPtr(COps[0], I.getType());
1088       }))
1089     return true;
1090 
1091   // Track base/offset pairs when round-tripped through a pointer without
1092   // modifications provided the integer is not too large.
1093   Value *Op = I.getOperand(0);
1094   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1095   if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1096     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1097     if (BaseAndOffset.first)
1098       ConstantOffsetPtrs[&I] = BaseAndOffset;
1099   }
1100 
1101   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1102   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1103     SROAArgValues[&I] = SROAArg;
1104 
1105   return TargetTransformInfo::TCC_Free ==
1106          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1107 }
1108 
1109 bool CallAnalyzer::visitCastInst(CastInst &I) {
1110   // Propagate constants through casts.
1111   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1112         return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1113       }))
1114     return true;
1115 
1116   // Disable SROA in the face of arbitrary casts we don't explicitly list
1117   // elsewhere.
1118   disableSROA(I.getOperand(0));
1119 
1120   // If this is a floating-point cast, and the target says this operation
1121   // is expensive, this may eventually become a library call. Treat the cost
1122   // as such.
1123   switch (I.getOpcode()) {
1124   case Instruction::FPTrunc:
1125   case Instruction::FPExt:
1126   case Instruction::UIToFP:
1127   case Instruction::SIToFP:
1128   case Instruction::FPToUI:
1129   case Instruction::FPToSI:
1130     if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1131       onCallPenalty();
1132     break;
1133   default:
1134     break;
1135   }
1136 
1137   return TargetTransformInfo::TCC_Free ==
1138          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1139 }
1140 
1141 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1142   Value *Operand = I.getOperand(0);
1143   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1144         return ConstantFoldInstOperands(&I, COps[0], DL);
1145       }))
1146     return true;
1147 
1148   // Disable any SROA on the argument to arbitrary unary instructions.
1149   disableSROA(Operand);
1150 
1151   return false;
1152 }
1153 
1154 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1155   return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1156 }
1157 
1158 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1159   // Does the *call site* have the NonNull attribute set on an argument?  We
1160   // use the attribute on the call site to memoize any analysis done in the
1161   // caller. This will also trip if the callee function has a non-null
1162   // parameter attribute, but that's a less interesting case because hopefully
1163   // the callee would already have been simplified based on that.
1164   if (Argument *A = dyn_cast<Argument>(V))
1165     if (paramHasAttr(A, Attribute::NonNull))
1166       return true;
1167 
1168   // Is this an alloca in the caller?  This is distinct from the attribute case
1169   // above because attributes aren't updated within the inliner itself and we
1170   // always want to catch the alloca derived case.
1171   if (isAllocaDerivedArg(V))
1172     // We can actually predict the result of comparisons between an
1173     // alloca-derived value and null. Note that this fires regardless of
1174     // SROA firing.
1175     return true;
1176 
1177   return false;
1178 }
1179 
1180 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1181   // If the normal destination of the invoke or the parent block of the call
1182   // site is unreachable-terminated, there is little point in inlining this
1183   // unless there is literally zero cost.
1184   // FIXME: Note that it is possible that an unreachable-terminated block has a
1185   // hot entry. For example, in below scenario inlining hot_call_X() may be
1186   // beneficial :
1187   // main() {
1188   //   hot_call_1();
1189   //   ...
1190   //   hot_call_N()
1191   //   exit(0);
1192   // }
1193   // For now, we are not handling this corner case here as it is rare in real
1194   // code. In future, we should elaborate this based on BPI and BFI in more
1195   // general threshold adjusting heuristics in updateThreshold().
1196   if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1197     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1198       return false;
1199   } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1200     return false;
1201 
1202   return true;
1203 }
1204 
1205 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1206                                             BlockFrequencyInfo *CallerBFI) {
1207   // If global profile summary is available, then callsite's coldness is
1208   // determined based on that.
1209   if (PSI && PSI->hasProfileSummary())
1210     return PSI->isColdCallSite(Call, CallerBFI);
1211 
1212   // Otherwise we need BFI to be available.
1213   if (!CallerBFI)
1214     return false;
1215 
1216   // Determine if the callsite is cold relative to caller's entry. We could
1217   // potentially cache the computation of scaled entry frequency, but the added
1218   // complexity is not worth it unless this scaling shows up high in the
1219   // profiles.
1220   const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1221   auto CallSiteBB = Call.getParent();
1222   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1223   auto CallerEntryFreq =
1224       CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1225   return CallSiteFreq < CallerEntryFreq * ColdProb;
1226 }
1227 
1228 Optional<int>
1229 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1230                                                 BlockFrequencyInfo *CallerBFI) {
1231 
1232   // If global profile summary is available, then callsite's hotness is
1233   // determined based on that.
1234   if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1235     return Params.HotCallSiteThreshold;
1236 
1237   // Otherwise we need BFI to be available and to have a locally hot callsite
1238   // threshold.
1239   if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1240     return None;
1241 
1242   // Determine if the callsite is hot relative to caller's entry. We could
1243   // potentially cache the computation of scaled entry frequency, but the added
1244   // complexity is not worth it unless this scaling shows up high in the
1245   // profiles.
1246   auto CallSiteBB = Call.getParent();
1247   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1248   auto CallerEntryFreq = CallerBFI->getEntryFreq();
1249   if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1250     return Params.LocallyHotCallSiteThreshold;
1251 
1252   // Otherwise treat it normally.
1253   return None;
1254 }
1255 
1256 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1257   // If no size growth is allowed for this inlining, set Threshold to 0.
1258   if (!allowSizeGrowth(Call)) {
1259     Threshold = 0;
1260     return;
1261   }
1262 
1263   Function *Caller = Call.getCaller();
1264 
1265   // return min(A, B) if B is valid.
1266   auto MinIfValid = [](int A, Optional<int> B) {
1267     return B ? std::min(A, B.getValue()) : A;
1268   };
1269 
1270   // return max(A, B) if B is valid.
1271   auto MaxIfValid = [](int A, Optional<int> B) {
1272     return B ? std::max(A, B.getValue()) : A;
1273   };
1274 
1275   // Various bonus percentages. These are multiplied by Threshold to get the
1276   // bonus values.
1277   // SingleBBBonus: This bonus is applied if the callee has a single reachable
1278   // basic block at the given callsite context. This is speculatively applied
1279   // and withdrawn if more than one basic block is seen.
1280   //
1281   // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1282   // of the last call to a static function as inlining such functions is
1283   // guaranteed to reduce code size.
1284   //
1285   // These bonus percentages may be set to 0 based on properties of the caller
1286   // and the callsite.
1287   int SingleBBBonusPercent = 50;
1288   int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1289   int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1290 
1291   // Lambda to set all the above bonus and bonus percentages to 0.
1292   auto DisallowAllBonuses = [&]() {
1293     SingleBBBonusPercent = 0;
1294     VectorBonusPercent = 0;
1295     LastCallToStaticBonus = 0;
1296   };
1297 
1298   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1299   // and reduce the threshold if the caller has the necessary attribute.
1300   if (Caller->hasMinSize()) {
1301     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1302     // For minsize, we want to disable the single BB bonus and the vector
1303     // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1304     // a static function will, at the minimum, eliminate the parameter setup and
1305     // call/return instructions.
1306     SingleBBBonusPercent = 0;
1307     VectorBonusPercent = 0;
1308   } else if (Caller->hasOptSize())
1309     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1310 
1311   // Adjust the threshold based on inlinehint attribute and profile based
1312   // hotness information if the caller does not have MinSize attribute.
1313   if (!Caller->hasMinSize()) {
1314     if (Callee.hasFnAttribute(Attribute::InlineHint))
1315       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1316 
1317     // FIXME: After switching to the new passmanager, simplify the logic below
1318     // by checking only the callsite hotness/coldness as we will reliably
1319     // have local profile information.
1320     //
1321     // Callsite hotness and coldness can be determined if sample profile is
1322     // used (which adds hotness metadata to calls) or if caller's
1323     // BlockFrequencyInfo is available.
1324     BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1325     auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1326     if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1327       LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1328       // FIXME: This should update the threshold only if it exceeds the
1329       // current threshold, but AutoFDO + ThinLTO currently relies on this
1330       // behavior to prevent inlining of hot callsites during ThinLTO
1331       // compile phase.
1332       Threshold = HotCallSiteThreshold.getValue();
1333     } else if (isColdCallSite(Call, CallerBFI)) {
1334       LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1335       // Do not apply bonuses for a cold callsite including the
1336       // LastCallToStatic bonus. While this bonus might result in code size
1337       // reduction, it can cause the size of a non-cold caller to increase
1338       // preventing it from being inlined.
1339       DisallowAllBonuses();
1340       Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1341     } else if (PSI) {
1342       // Use callee's global profile information only if we have no way of
1343       // determining this via callsite information.
1344       if (PSI->isFunctionEntryHot(&Callee)) {
1345         LLVM_DEBUG(dbgs() << "Hot callee.\n");
1346         // If callsite hotness can not be determined, we may still know
1347         // that the callee is hot and treat it as a weaker hint for threshold
1348         // increase.
1349         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1350       } else if (PSI->isFunctionEntryCold(&Callee)) {
1351         LLVM_DEBUG(dbgs() << "Cold callee.\n");
1352         // Do not apply bonuses for a cold callee including the
1353         // LastCallToStatic bonus. While this bonus might result in code size
1354         // reduction, it can cause the size of a non-cold caller to increase
1355         // preventing it from being inlined.
1356         DisallowAllBonuses();
1357         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1358       }
1359     }
1360   }
1361 
1362   // Finally, take the target-specific inlining threshold multiplier into
1363   // account.
1364   Threshold *= TTI.getInliningThresholdMultiplier();
1365 
1366   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1367   VectorBonus = Threshold * VectorBonusPercent / 100;
1368 
1369   bool OnlyOneCallAndLocalLinkage =
1370       F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1371   // If there is only one call of the function, and it has internal linkage,
1372   // the cost of inlining it drops dramatically. It may seem odd to update
1373   // Cost in updateThreshold, but the bonus depends on the logic in this method.
1374   if (OnlyOneCallAndLocalLinkage)
1375     Cost -= LastCallToStaticBonus;
1376 }
1377 
1378 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1379   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1380   // First try to handle simplified comparisons.
1381   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1382         return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1383       }))
1384     return true;
1385 
1386   if (I.getOpcode() == Instruction::FCmp)
1387     return false;
1388 
1389   // Otherwise look for a comparison between constant offset pointers with
1390   // a common base.
1391   Value *LHSBase, *RHSBase;
1392   APInt LHSOffset, RHSOffset;
1393   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1394   if (LHSBase) {
1395     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1396     if (RHSBase && LHSBase == RHSBase) {
1397       // We have common bases, fold the icmp to a constant based on the
1398       // offsets.
1399       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1400       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1401       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1402         SimplifiedValues[&I] = C;
1403         ++NumConstantPtrCmps;
1404         return true;
1405       }
1406     }
1407   }
1408 
1409   // If the comparison is an equality comparison with null, we can simplify it
1410   // if we know the value (argument) can't be null
1411   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1412       isKnownNonNullInCallee(I.getOperand(0))) {
1413     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1414     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1415                                       : ConstantInt::getFalse(I.getType());
1416     return true;
1417   }
1418   return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1419 }
1420 
1421 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1422   // Try to handle a special case: we can fold computing the difference of two
1423   // constant-related pointers.
1424   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1425   Value *LHSBase, *RHSBase;
1426   APInt LHSOffset, RHSOffset;
1427   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1428   if (LHSBase) {
1429     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1430     if (RHSBase && LHSBase == RHSBase) {
1431       // We have common bases, fold the subtract to a constant based on the
1432       // offsets.
1433       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1434       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1435       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1436         SimplifiedValues[&I] = C;
1437         ++NumConstantPtrDiffs;
1438         return true;
1439       }
1440     }
1441   }
1442 
1443   // Otherwise, fall back to the generic logic for simplifying and handling
1444   // instructions.
1445   return Base::visitSub(I);
1446 }
1447 
1448 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1449   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1450   Constant *CLHS = dyn_cast<Constant>(LHS);
1451   if (!CLHS)
1452     CLHS = SimplifiedValues.lookup(LHS);
1453   Constant *CRHS = dyn_cast<Constant>(RHS);
1454   if (!CRHS)
1455     CRHS = SimplifiedValues.lookup(RHS);
1456 
1457   Value *SimpleV = nullptr;
1458   if (auto FI = dyn_cast<FPMathOperator>(&I))
1459     SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1460                             FI->getFastMathFlags(), DL);
1461   else
1462     SimpleV =
1463         SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1464 
1465   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1466     SimplifiedValues[&I] = C;
1467 
1468   if (SimpleV)
1469     return true;
1470 
1471   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1472   disableSROA(LHS);
1473   disableSROA(RHS);
1474 
1475   // If the instruction is floating point, and the target says this operation
1476   // is expensive, this may eventually become a library call. Treat the cost
1477   // as such. Unless it's fneg which can be implemented with an xor.
1478   using namespace llvm::PatternMatch;
1479   if (I.getType()->isFloatingPointTy() &&
1480       TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1481       !match(&I, m_FNeg(m_Value())))
1482     onCallPenalty();
1483 
1484   return false;
1485 }
1486 
1487 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1488   Value *Op = I.getOperand(0);
1489   Constant *COp = dyn_cast<Constant>(Op);
1490   if (!COp)
1491     COp = SimplifiedValues.lookup(Op);
1492 
1493   Value *SimpleV = SimplifyFNegInst(
1494       COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1495 
1496   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1497     SimplifiedValues[&I] = C;
1498 
1499   if (SimpleV)
1500     return true;
1501 
1502   // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1503   disableSROA(Op);
1504 
1505   return false;
1506 }
1507 
1508 bool CallAnalyzer::visitLoad(LoadInst &I) {
1509   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1510     return true;
1511 
1512   // If the data is already loaded from this address and hasn't been clobbered
1513   // by any stores or calls, this load is likely to be redundant and can be
1514   // eliminated.
1515   if (EnableLoadElimination &&
1516       !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1517     onLoadEliminationOpportunity();
1518     return true;
1519   }
1520 
1521   return false;
1522 }
1523 
1524 bool CallAnalyzer::visitStore(StoreInst &I) {
1525   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1526     return true;
1527 
1528   // The store can potentially clobber loads and prevent repeated loads from
1529   // being eliminated.
1530   // FIXME:
1531   // 1. We can probably keep an initial set of eliminatable loads substracted
1532   // from the cost even when we finally see a store. We just need to disable
1533   // *further* accumulation of elimination savings.
1534   // 2. We should probably at some point thread MemorySSA for the callee into
1535   // this and then use that to actually compute *really* precise savings.
1536   disableLoadElimination();
1537   return false;
1538 }
1539 
1540 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1541   // Constant folding for extract value is trivial.
1542   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1543         return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1544       }))
1545     return true;
1546 
1547   // SROA can look through these but give them a cost.
1548   return false;
1549 }
1550 
1551 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1552   // Constant folding for insert value is trivial.
1553   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1554         return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1555                                             /*InsertedValueOperand*/ COps[1],
1556                                             I.getIndices());
1557       }))
1558     return true;
1559 
1560   // SROA can look through these but give them a cost.
1561   return false;
1562 }
1563 
1564 /// Try to simplify a call site.
1565 ///
1566 /// Takes a concrete function and callsite and tries to actually simplify it by
1567 /// analyzing the arguments and call itself with instsimplify. Returns true if
1568 /// it has simplified the callsite to some other entity (a constant), making it
1569 /// free.
1570 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1571   // FIXME: Using the instsimplify logic directly for this is inefficient
1572   // because we have to continually rebuild the argument list even when no
1573   // simplifications can be performed. Until that is fixed with remapping
1574   // inside of instsimplify, directly constant fold calls here.
1575   if (!canConstantFoldCallTo(&Call, F))
1576     return false;
1577 
1578   // Try to re-map the arguments to constants.
1579   SmallVector<Constant *, 4> ConstantArgs;
1580   ConstantArgs.reserve(Call.arg_size());
1581   for (Value *I : Call.args()) {
1582     Constant *C = dyn_cast<Constant>(I);
1583     if (!C)
1584       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1585     if (!C)
1586       return false; // This argument doesn't map to a constant.
1587 
1588     ConstantArgs.push_back(C);
1589   }
1590   if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1591     SimplifiedValues[&Call] = C;
1592     return true;
1593   }
1594 
1595   return false;
1596 }
1597 
1598 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1599   if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1600       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1601     // This aborts the entire analysis.
1602     ExposesReturnsTwice = true;
1603     return false;
1604   }
1605   if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1606     ContainsNoDuplicateCall = true;
1607 
1608   Value *Callee = Call.getCalledOperand();
1609   Function *F = dyn_cast_or_null<Function>(Callee);
1610   bool IsIndirectCall = !F;
1611   if (IsIndirectCall) {
1612     // Check if this happens to be an indirect function call to a known function
1613     // in this inline context. If not, we've done all we can.
1614     F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1615     if (!F) {
1616       onCallArgumentSetup(Call);
1617 
1618       if (!Call.onlyReadsMemory())
1619         disableLoadElimination();
1620       return Base::visitCallBase(Call);
1621     }
1622   }
1623 
1624   assert(F && "Expected a call to a known function");
1625 
1626   // When we have a concrete function, first try to simplify it directly.
1627   if (simplifyCallSite(F, Call))
1628     return true;
1629 
1630   // Next check if it is an intrinsic we know about.
1631   // FIXME: Lift this into part of the InstVisitor.
1632   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1633     switch (II->getIntrinsicID()) {
1634     default:
1635       if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1636         disableLoadElimination();
1637       return Base::visitCallBase(Call);
1638 
1639     case Intrinsic::load_relative:
1640       onLoadRelativeIntrinsic();
1641       return false;
1642 
1643     case Intrinsic::memset:
1644     case Intrinsic::memcpy:
1645     case Intrinsic::memmove:
1646       disableLoadElimination();
1647       // SROA can usually chew through these intrinsics, but they aren't free.
1648       return false;
1649     case Intrinsic::icall_branch_funnel:
1650     case Intrinsic::localescape:
1651       HasUninlineableIntrinsic = true;
1652       return false;
1653     case Intrinsic::vastart:
1654       InitsVargArgs = true;
1655       return false;
1656     }
1657   }
1658 
1659   if (F == Call.getFunction()) {
1660     // This flag will fully abort the analysis, so don't bother with anything
1661     // else.
1662     IsRecursiveCall = true;
1663     return false;
1664   }
1665 
1666   if (TTI.isLoweredToCall(F)) {
1667     onLoweredCall(F, Call, IsIndirectCall);
1668   }
1669 
1670   if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1671     disableLoadElimination();
1672   return Base::visitCallBase(Call);
1673 }
1674 
1675 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1676   // At least one return instruction will be free after inlining.
1677   bool Free = !HasReturn;
1678   HasReturn = true;
1679   return Free;
1680 }
1681 
1682 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1683   // We model unconditional branches as essentially free -- they really
1684   // shouldn't exist at all, but handling them makes the behavior of the
1685   // inliner more regular and predictable. Interestingly, conditional branches
1686   // which will fold away are also free.
1687   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1688          dyn_cast_or_null<ConstantInt>(
1689              SimplifiedValues.lookup(BI.getCondition()));
1690 }
1691 
1692 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1693   bool CheckSROA = SI.getType()->isPointerTy();
1694   Value *TrueVal = SI.getTrueValue();
1695   Value *FalseVal = SI.getFalseValue();
1696 
1697   Constant *TrueC = dyn_cast<Constant>(TrueVal);
1698   if (!TrueC)
1699     TrueC = SimplifiedValues.lookup(TrueVal);
1700   Constant *FalseC = dyn_cast<Constant>(FalseVal);
1701   if (!FalseC)
1702     FalseC = SimplifiedValues.lookup(FalseVal);
1703   Constant *CondC =
1704       dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1705 
1706   if (!CondC) {
1707     // Select C, X, X => X
1708     if (TrueC == FalseC && TrueC) {
1709       SimplifiedValues[&SI] = TrueC;
1710       return true;
1711     }
1712 
1713     if (!CheckSROA)
1714       return Base::visitSelectInst(SI);
1715 
1716     std::pair<Value *, APInt> TrueBaseAndOffset =
1717         ConstantOffsetPtrs.lookup(TrueVal);
1718     std::pair<Value *, APInt> FalseBaseAndOffset =
1719         ConstantOffsetPtrs.lookup(FalseVal);
1720     if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1721       ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1722 
1723       if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1724         SROAArgValues[&SI] = SROAArg;
1725       return true;
1726     }
1727 
1728     return Base::visitSelectInst(SI);
1729   }
1730 
1731   // Select condition is a constant.
1732   Value *SelectedV = CondC->isAllOnesValue()
1733                          ? TrueVal
1734                          : (CondC->isNullValue()) ? FalseVal : nullptr;
1735   if (!SelectedV) {
1736     // Condition is a vector constant that is not all 1s or all 0s.  If all
1737     // operands are constants, ConstantExpr::getSelect() can handle the cases
1738     // such as select vectors.
1739     if (TrueC && FalseC) {
1740       if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1741         SimplifiedValues[&SI] = C;
1742         return true;
1743       }
1744     }
1745     return Base::visitSelectInst(SI);
1746   }
1747 
1748   // Condition is either all 1s or all 0s. SI can be simplified.
1749   if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1750     SimplifiedValues[&SI] = SelectedC;
1751     return true;
1752   }
1753 
1754   if (!CheckSROA)
1755     return true;
1756 
1757   std::pair<Value *, APInt> BaseAndOffset =
1758       ConstantOffsetPtrs.lookup(SelectedV);
1759   if (BaseAndOffset.first) {
1760     ConstantOffsetPtrs[&SI] = BaseAndOffset;
1761 
1762     if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1763       SROAArgValues[&SI] = SROAArg;
1764   }
1765 
1766   return true;
1767 }
1768 
1769 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1770   // We model unconditional switches as free, see the comments on handling
1771   // branches.
1772   if (isa<ConstantInt>(SI.getCondition()))
1773     return true;
1774   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1775     if (isa<ConstantInt>(V))
1776       return true;
1777 
1778   // Assume the most general case where the switch is lowered into
1779   // either a jump table, bit test, or a balanced binary tree consisting of
1780   // case clusters without merging adjacent clusters with the same
1781   // destination. We do not consider the switches that are lowered with a mix
1782   // of jump table/bit test/binary search tree. The cost of the switch is
1783   // proportional to the size of the tree or the size of jump table range.
1784   //
1785   // NB: We convert large switches which are just used to initialize large phi
1786   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1787   // inlining those. It will prevent inlining in cases where the optimization
1788   // does not (yet) fire.
1789 
1790   unsigned JumpTableSize = 0;
1791   BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
1792   unsigned NumCaseCluster =
1793       TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
1794 
1795   onFinalizeSwitch(JumpTableSize, NumCaseCluster);
1796   return false;
1797 }
1798 
1799 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1800   // We never want to inline functions that contain an indirectbr.  This is
1801   // incorrect because all the blockaddress's (in static global initializers
1802   // for example) would be referring to the original function, and this
1803   // indirect jump would jump from the inlined copy of the function into the
1804   // original function which is extremely undefined behavior.
1805   // FIXME: This logic isn't really right; we can safely inline functions with
1806   // indirectbr's as long as no other function or global references the
1807   // blockaddress of a block within the current function.
1808   HasIndirectBr = true;
1809   return false;
1810 }
1811 
1812 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1813   // FIXME: It's not clear that a single instruction is an accurate model for
1814   // the inline cost of a resume instruction.
1815   return false;
1816 }
1817 
1818 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1819   // FIXME: It's not clear that a single instruction is an accurate model for
1820   // the inline cost of a cleanupret instruction.
1821   return false;
1822 }
1823 
1824 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1825   // FIXME: It's not clear that a single instruction is an accurate model for
1826   // the inline cost of a catchret instruction.
1827   return false;
1828 }
1829 
1830 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1831   // FIXME: It might be reasonably to discount the cost of instructions leading
1832   // to unreachable as they have the lowest possible impact on both runtime and
1833   // code size.
1834   return true; // No actual code is needed for unreachable.
1835 }
1836 
1837 bool CallAnalyzer::visitInstruction(Instruction &I) {
1838   // Some instructions are free. All of the free intrinsics can also be
1839   // handled by SROA, etc.
1840   if (TargetTransformInfo::TCC_Free ==
1841       TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
1842     return true;
1843 
1844   // We found something we don't understand or can't handle. Mark any SROA-able
1845   // values in the operand list as no longer viable.
1846   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
1847     disableSROA(*OI);
1848 
1849   return false;
1850 }
1851 
1852 /// Analyze a basic block for its contribution to the inline cost.
1853 ///
1854 /// This method walks the analyzer over every instruction in the given basic
1855 /// block and accounts for their cost during inlining at this callsite. It
1856 /// aborts early if the threshold has been exceeded or an impossible to inline
1857 /// construct has been detected. It returns false if inlining is no longer
1858 /// viable, and true if inlining remains viable.
1859 InlineResult
1860 CallAnalyzer::analyzeBlock(BasicBlock *BB,
1861                            SmallPtrSetImpl<const Value *> &EphValues) {
1862   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1863     // FIXME: Currently, the number of instructions in a function regardless of
1864     // our ability to simplify them during inline to constants or dead code,
1865     // are actually used by the vector bonus heuristic. As long as that's true,
1866     // we have to special case debug intrinsics here to prevent differences in
1867     // inlining due to debug symbols. Eventually, the number of unsimplified
1868     // instructions shouldn't factor into the cost computation, but until then,
1869     // hack around it here.
1870     if (isa<DbgInfoIntrinsic>(I))
1871       continue;
1872 
1873     // Skip ephemeral values.
1874     if (EphValues.count(&*I))
1875       continue;
1876 
1877     ++NumInstructions;
1878     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
1879       ++NumVectorInstructions;
1880 
1881     // If the instruction simplified to a constant, there is no cost to this
1882     // instruction. Visit the instructions using our InstVisitor to account for
1883     // all of the per-instruction logic. The visit tree returns true if we
1884     // consumed the instruction in any way, and false if the instruction's base
1885     // cost should count against inlining.
1886     onInstructionAnalysisStart(&*I);
1887 
1888     if (Base::visit(&*I))
1889       ++NumInstructionsSimplified;
1890     else
1891       onMissedSimplification();
1892 
1893     onInstructionAnalysisFinish(&*I);
1894     using namespace ore;
1895     // If the visit this instruction detected an uninlinable pattern, abort.
1896     InlineResult IR = InlineResult::success();
1897     if (IsRecursiveCall)
1898       IR = InlineResult::failure("recursive");
1899     else if (ExposesReturnsTwice)
1900       IR = InlineResult::failure("exposes returns twice");
1901     else if (HasDynamicAlloca)
1902       IR = InlineResult::failure("dynamic alloca");
1903     else if (HasIndirectBr)
1904       IR = InlineResult::failure("indirect branch");
1905     else if (HasUninlineableIntrinsic)
1906       IR = InlineResult::failure("uninlinable intrinsic");
1907     else if (InitsVargArgs)
1908       IR = InlineResult::failure("varargs");
1909     if (!IR.isSuccess()) {
1910       if (ORE)
1911         ORE->emit([&]() {
1912           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1913                                           &CandidateCall)
1914                  << NV("Callee", &F) << " has uninlinable pattern ("
1915                  << NV("InlineResult", IR.getFailureReason())
1916                  << ") and cost is not fully computed";
1917         });
1918       return IR;
1919     }
1920 
1921     // If the caller is a recursive function then we don't want to inline
1922     // functions which allocate a lot of stack space because it would increase
1923     // the caller stack usage dramatically.
1924     if (IsCallerRecursive &&
1925         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
1926       auto IR =
1927           InlineResult::failure("recursive and allocates too much stack space");
1928       if (ORE)
1929         ORE->emit([&]() {
1930           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
1931                                           &CandidateCall)
1932                  << NV("Callee", &F) << " is "
1933                  << NV("InlineResult", IR.getFailureReason())
1934                  << ". Cost is not fully computed";
1935         });
1936       return IR;
1937     }
1938 
1939     if (shouldStop())
1940       return InlineResult::failure(
1941           "Call site analysis is not favorable to inlining.");
1942   }
1943 
1944   return InlineResult::success();
1945 }
1946 
1947 /// Compute the base pointer and cumulative constant offsets for V.
1948 ///
1949 /// This strips all constant offsets off of V, leaving it the base pointer, and
1950 /// accumulates the total constant offset applied in the returned constant. It
1951 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1952 /// no constant offsets applied.
1953 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1954   if (!V->getType()->isPointerTy())
1955     return nullptr;
1956 
1957   unsigned AS = V->getType()->getPointerAddressSpace();
1958   unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
1959   APInt Offset = APInt::getNullValue(IntPtrWidth);
1960 
1961   // Even though we don't look through PHI nodes, we could be called on an
1962   // instruction in an unreachable block, which may be on a cycle.
1963   SmallPtrSet<Value *, 4> Visited;
1964   Visited.insert(V);
1965   do {
1966     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1967       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
1968         return nullptr;
1969       V = GEP->getPointerOperand();
1970     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
1971       V = cast<Operator>(V)->getOperand(0);
1972     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1973       if (GA->isInterposable())
1974         break;
1975       V = GA->getAliasee();
1976     } else {
1977       break;
1978     }
1979     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1980   } while (Visited.insert(V).second);
1981 
1982   Type *IdxPtrTy = DL.getIndexType(V->getType());
1983   return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
1984 }
1985 
1986 /// Find dead blocks due to deleted CFG edges during inlining.
1987 ///
1988 /// If we know the successor of the current block, \p CurrBB, has to be \p
1989 /// NextBB, the other successors of \p CurrBB are dead if these successors have
1990 /// no live incoming CFG edges.  If one block is found to be dead, we can
1991 /// continue growing the dead block list by checking the successors of the dead
1992 /// blocks to see if all their incoming edges are dead or not.
1993 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
1994   auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
1995     // A CFG edge is dead if the predecessor is dead or the predecessor has a
1996     // known successor which is not the one under exam.
1997     return (DeadBlocks.count(Pred) ||
1998             (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
1999   };
2000 
2001   auto IsNewlyDead = [&](BasicBlock *BB) {
2002     // If all the edges to a block are dead, the block is also dead.
2003     return (!DeadBlocks.count(BB) &&
2004             llvm::all_of(predecessors(BB),
2005                          [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
2006   };
2007 
2008   for (BasicBlock *Succ : successors(CurrBB)) {
2009     if (Succ == NextBB || !IsNewlyDead(Succ))
2010       continue;
2011     SmallVector<BasicBlock *, 4> NewDead;
2012     NewDead.push_back(Succ);
2013     while (!NewDead.empty()) {
2014       BasicBlock *Dead = NewDead.pop_back_val();
2015       if (DeadBlocks.insert(Dead))
2016         // Continue growing the dead block lists.
2017         for (BasicBlock *S : successors(Dead))
2018           if (IsNewlyDead(S))
2019             NewDead.push_back(S);
2020     }
2021   }
2022 }
2023 
2024 /// Analyze a call site for potential inlining.
2025 ///
2026 /// Returns true if inlining this call is viable, and false if it is not
2027 /// viable. It computes the cost and adjusts the threshold based on numerous
2028 /// factors and heuristics. If this method returns false but the computed cost
2029 /// is below the computed threshold, then inlining was forcibly disabled by
2030 /// some artifact of the routine.
2031 InlineResult CallAnalyzer::analyze() {
2032   ++NumCallsAnalyzed;
2033 
2034   auto Result = onAnalysisStart();
2035   if (!Result.isSuccess())
2036     return Result;
2037 
2038   if (F.empty())
2039     return InlineResult::success();
2040 
2041   Function *Caller = CandidateCall.getFunction();
2042   // Check if the caller function is recursive itself.
2043   for (User *U : Caller->users()) {
2044     CallBase *Call = dyn_cast<CallBase>(U);
2045     if (Call && Call->getFunction() == Caller) {
2046       IsCallerRecursive = true;
2047       break;
2048     }
2049   }
2050 
2051   // Populate our simplified values by mapping from function arguments to call
2052   // arguments with known important simplifications.
2053   auto CAI = CandidateCall.arg_begin();
2054   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
2055        FAI != FAE; ++FAI, ++CAI) {
2056     assert(CAI != CandidateCall.arg_end());
2057     if (Constant *C = dyn_cast<Constant>(CAI))
2058       SimplifiedValues[&*FAI] = C;
2059 
2060     Value *PtrArg = *CAI;
2061     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2062       ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
2063 
2064       // We can SROA any pointer arguments derived from alloca instructions.
2065       if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2066         SROAArgValues[&*FAI] = SROAArg;
2067         onInitializeSROAArg(SROAArg);
2068         EnabledSROAAllocas.insert(SROAArg);
2069       }
2070     }
2071   }
2072   NumConstantArgs = SimplifiedValues.size();
2073   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2074   NumAllocaArgs = SROAArgValues.size();
2075 
2076   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2077   // the ephemeral values multiple times (and they're completely determined by
2078   // the callee, so this is purely duplicate work).
2079   SmallPtrSet<const Value *, 32> EphValues;
2080   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2081 
2082   // The worklist of live basic blocks in the callee *after* inlining. We avoid
2083   // adding basic blocks of the callee which can be proven to be dead for this
2084   // particular call site in order to get more accurate cost estimates. This
2085   // requires a somewhat heavyweight iteration pattern: we need to walk the
2086   // basic blocks in a breadth-first order as we insert live successors. To
2087   // accomplish this, prioritizing for small iterations because we exit after
2088   // crossing our threshold, we use a small-size optimized SetVector.
2089   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2090                     SmallPtrSet<BasicBlock *, 16>>
2091       BBSetVector;
2092   BBSetVector BBWorklist;
2093   BBWorklist.insert(&F.getEntryBlock());
2094 
2095   // Note that we *must not* cache the size, this loop grows the worklist.
2096   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2097     if (shouldStop())
2098       break;
2099 
2100     BasicBlock *BB = BBWorklist[Idx];
2101     if (BB->empty())
2102       continue;
2103 
2104     // Disallow inlining a blockaddress with uses other than strictly callbr.
2105     // A blockaddress only has defined behavior for an indirect branch in the
2106     // same function, and we do not currently support inlining indirect
2107     // branches.  But, the inliner may not see an indirect branch that ends up
2108     // being dead code at a particular call site. If the blockaddress escapes
2109     // the function, e.g., via a global variable, inlining may lead to an
2110     // invalid cross-function reference.
2111     // FIXME: pr/39560: continue relaxing this overt restriction.
2112     if (BB->hasAddressTaken())
2113       for (User *U : BlockAddress::get(&*BB)->users())
2114         if (!isa<CallBrInst>(*U))
2115           return InlineResult::failure("blockaddress used outside of callbr");
2116 
2117     // Analyze the cost of this block. If we blow through the threshold, this
2118     // returns false, and we can bail on out.
2119     InlineResult IR = analyzeBlock(BB, EphValues);
2120     if (!IR.isSuccess())
2121       return IR;
2122 
2123     Instruction *TI = BB->getTerminator();
2124 
2125     // Add in the live successors by first checking whether we have terminator
2126     // that may be simplified based on the values simplified by this call.
2127     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2128       if (BI->isConditional()) {
2129         Value *Cond = BI->getCondition();
2130         if (ConstantInt *SimpleCond =
2131                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2132           BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2133           BBWorklist.insert(NextBB);
2134           KnownSuccessors[BB] = NextBB;
2135           findDeadBlocks(BB, NextBB);
2136           continue;
2137         }
2138       }
2139     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2140       Value *Cond = SI->getCondition();
2141       if (ConstantInt *SimpleCond =
2142               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2143         BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2144         BBWorklist.insert(NextBB);
2145         KnownSuccessors[BB] = NextBB;
2146         findDeadBlocks(BB, NextBB);
2147         continue;
2148       }
2149     }
2150 
2151     // If we're unable to select a particular successor, just count all of
2152     // them.
2153     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2154          ++TIdx)
2155       BBWorklist.insert(TI->getSuccessor(TIdx));
2156 
2157     onBlockAnalyzed(BB);
2158   }
2159 
2160   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2161                                     &F == CandidateCall.getCalledFunction();
2162   // If this is a noduplicate call, we can still inline as long as
2163   // inlining this would cause the removal of the caller (so the instruction
2164   // is not actually duplicated, just moved).
2165   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2166     return InlineResult::failure("noduplicate");
2167 
2168   return finalizeAnalysis();
2169 }
2170 
2171 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2172 /// Dump stats about this call's analysis.
2173 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2174 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
2175   if (PrintInstructionComments)
2176     F.print(dbgs(), &Writer);
2177   DEBUG_PRINT_STAT(NumConstantArgs);
2178   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2179   DEBUG_PRINT_STAT(NumAllocaArgs);
2180   DEBUG_PRINT_STAT(NumConstantPtrCmps);
2181   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2182   DEBUG_PRINT_STAT(NumInstructionsSimplified);
2183   DEBUG_PRINT_STAT(NumInstructions);
2184   DEBUG_PRINT_STAT(SROACostSavings);
2185   DEBUG_PRINT_STAT(SROACostSavingsLost);
2186   DEBUG_PRINT_STAT(LoadEliminationCost);
2187   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2188   DEBUG_PRINT_STAT(Cost);
2189   DEBUG_PRINT_STAT(Threshold);
2190 #undef DEBUG_PRINT_STAT
2191 }
2192 #endif
2193 
2194 /// Test that there are no attribute conflicts between Caller and Callee
2195 ///        that prevent inlining.
2196 static bool functionsHaveCompatibleAttributes(
2197     Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2198     function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2199   // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2200   // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2201   // object, and always returns the same object (which is overwritten on each
2202   // GetTLI call). Therefore we copy the first result.
2203   auto CalleeTLI = GetTLI(*Callee);
2204   return TTI.areInlineCompatible(Caller, Callee) &&
2205          GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2206                                              InlineCallerSupersetNoBuiltin) &&
2207          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2208 }
2209 
2210 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2211   int Cost = 0;
2212   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2213     if (Call.isByValArgument(I)) {
2214       // We approximate the number of loads and stores needed by dividing the
2215       // size of the byval type by the target's pointer size.
2216       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2217       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2218       unsigned AS = PTy->getAddressSpace();
2219       unsigned PointerSize = DL.getPointerSizeInBits(AS);
2220       // Ceiling division.
2221       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2222 
2223       // If it generates more than 8 stores it is likely to be expanded as an
2224       // inline memcpy so we take that as an upper bound. Otherwise we assume
2225       // one load and one store per word copied.
2226       // FIXME: The maxStoresPerMemcpy setting from the target should be used
2227       // here instead of a magic number of 8, but it's not available via
2228       // DataLayout.
2229       NumStores = std::min(NumStores, 8U);
2230 
2231       Cost += 2 * NumStores * InlineConstants::InstrCost;
2232     } else {
2233       // For non-byval arguments subtract off one instruction per call
2234       // argument.
2235       Cost += InlineConstants::InstrCost;
2236     }
2237   }
2238   // The call instruction also disappears after inlining.
2239   Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2240   return Cost;
2241 }
2242 
2243 InlineCost llvm::getInlineCost(
2244     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2245     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2246     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2247     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2248     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2249   return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2250                        GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2251 }
2252 
2253 Optional<int> llvm::getInliningCostEstimate(
2254     CallBase &Call, TargetTransformInfo &CalleeTTI,
2255     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2256     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2257     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2258   const InlineParams Params = {/* DefaultThreshold*/ 0,
2259                                /*HintThreshold*/ {},
2260                                /*ColdThreshold*/ {},
2261                                /*OptSizeThreshold*/ {},
2262                                /*OptMinSizeThreshold*/ {},
2263                                /*HotCallSiteThreshold*/ {},
2264                                /*LocallyHotCallSiteThreshold*/ {},
2265                                /*ColdCallSiteThreshold*/ {},
2266                                /*ComputeFullInlineCost*/ true,
2267                                /*EnableDeferral*/ true};
2268 
2269   InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2270                             GetAssumptionCache, GetBFI, PSI, ORE, true,
2271                             /*IgnoreThreshold*/ true);
2272   auto R = CA.analyze();
2273   if (!R.isSuccess())
2274     return None;
2275   return CA.getCost();
2276 }
2277 
2278 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2279     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2280     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2281 
2282   // Cannot inline indirect calls.
2283   if (!Callee)
2284     return InlineResult::failure("indirect call");
2285 
2286   // Never inline calls with byval arguments that does not have the alloca
2287   // address space. Since byval arguments can be replaced with a copy to an
2288   // alloca, the inlined code would need to be adjusted to handle that the
2289   // argument is in the alloca address space (so it is a little bit complicated
2290   // to solve).
2291   unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2292   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2293     if (Call.isByValArgument(I)) {
2294       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2295       if (PTy->getAddressSpace() != AllocaAS)
2296         return InlineResult::failure("byval arguments without alloca"
2297                                      " address space");
2298     }
2299 
2300   // Calls to functions with always-inline attributes should be inlined
2301   // whenever possible.
2302   if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2303     auto IsViable = isInlineViable(*Callee);
2304     if (IsViable.isSuccess())
2305       return InlineResult::success();
2306     return InlineResult::failure(IsViable.getFailureReason());
2307   }
2308 
2309   // Never inline functions with conflicting attributes (unless callee has
2310   // always-inline attribute).
2311   Function *Caller = Call.getCaller();
2312   if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2313     return InlineResult::failure("conflicting attributes");
2314 
2315   // Don't inline this call if the caller has the optnone attribute.
2316   if (Caller->hasOptNone())
2317     return InlineResult::failure("optnone attribute");
2318 
2319   // Don't inline a function that treats null pointer as valid into a caller
2320   // that does not have this attribute.
2321   if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2322     return InlineResult::failure("nullptr definitions incompatible");
2323 
2324   // Don't inline functions which can be interposed at link-time.
2325   if (Callee->isInterposable())
2326     return InlineResult::failure("interposable");
2327 
2328   // Don't inline functions marked noinline.
2329   if (Callee->hasFnAttribute(Attribute::NoInline))
2330     return InlineResult::failure("noinline function attribute");
2331 
2332   // Don't inline call sites marked noinline.
2333   if (Call.isNoInline())
2334     return InlineResult::failure("noinline call site attribute");
2335 
2336   return None;
2337 }
2338 
2339 InlineCost llvm::getInlineCost(
2340     CallBase &Call, Function *Callee, const InlineParams &Params,
2341     TargetTransformInfo &CalleeTTI,
2342     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2343     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2344     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2345     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2346 
2347   auto UserDecision =
2348       llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2349 
2350   if (UserDecision.hasValue()) {
2351     if (UserDecision->isSuccess())
2352       return llvm::InlineCost::getAlways("always inline attribute");
2353     return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2354   }
2355 
2356   LLVM_DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
2357                           << "... (caller:" << Call.getCaller()->getName()
2358                           << ")\n");
2359 
2360   InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2361                             GetAssumptionCache, GetBFI, PSI, ORE);
2362   InlineResult ShouldInline = CA.analyze();
2363 
2364   LLVM_DEBUG(CA.dump());
2365 
2366   // Check if there was a reason to force inlining or no inlining.
2367   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2368     return InlineCost::getNever(ShouldInline.getFailureReason());
2369   if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2370     return InlineCost::getAlways("empty function");
2371 
2372   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2373 }
2374 
2375 InlineResult llvm::isInlineViable(Function &F) {
2376   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2377   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
2378     // Disallow inlining of functions which contain indirect branches.
2379     if (isa<IndirectBrInst>(BI->getTerminator()))
2380       return InlineResult::failure("contains indirect branches");
2381 
2382     // Disallow inlining of blockaddresses which are used by non-callbr
2383     // instructions.
2384     if (BI->hasAddressTaken())
2385       for (User *U : BlockAddress::get(&*BI)->users())
2386         if (!isa<CallBrInst>(*U))
2387           return InlineResult::failure("blockaddress used outside of callbr");
2388 
2389     for (auto &II : *BI) {
2390       CallBase *Call = dyn_cast<CallBase>(&II);
2391       if (!Call)
2392         continue;
2393 
2394       // Disallow recursive calls.
2395       if (&F == Call->getCalledFunction())
2396         return InlineResult::failure("recursive call");
2397 
2398       // Disallow calls which expose returns-twice to a function not previously
2399       // attributed as such.
2400       if (!ReturnsTwice && isa<CallInst>(Call) &&
2401           cast<CallInst>(Call)->canReturnTwice())
2402         return InlineResult::failure("exposes returns-twice attribute");
2403 
2404       if (Call->getCalledFunction())
2405         switch (Call->getCalledFunction()->getIntrinsicID()) {
2406         default:
2407           break;
2408         case llvm::Intrinsic::icall_branch_funnel:
2409           // Disallow inlining of @llvm.icall.branch.funnel because current
2410           // backend can't separate call targets from call arguments.
2411           return InlineResult::failure(
2412               "disallowed inlining of @llvm.icall.branch.funnel");
2413         case llvm::Intrinsic::localescape:
2414           // Disallow inlining functions that call @llvm.localescape. Doing this
2415           // correctly would require major changes to the inliner.
2416           return InlineResult::failure(
2417               "disallowed inlining of @llvm.localescape");
2418         case llvm::Intrinsic::vastart:
2419           // Disallow inlining of functions that initialize VarArgs with
2420           // va_start.
2421           return InlineResult::failure(
2422               "contains VarArgs initialized with va_start");
2423         }
2424     }
2425   }
2426 
2427   return InlineResult::success();
2428 }
2429 
2430 // APIs to create InlineParams based on command line flags and/or other
2431 // parameters.
2432 
2433 InlineParams llvm::getInlineParams(int Threshold) {
2434   InlineParams Params;
2435 
2436   // This field is the threshold to use for a callee by default. This is
2437   // derived from one or more of:
2438   //  * optimization or size-optimization levels,
2439   //  * a value passed to createFunctionInliningPass function, or
2440   //  * the -inline-threshold flag.
2441   //  If the -inline-threshold flag is explicitly specified, that is used
2442   //  irrespective of anything else.
2443   if (InlineThreshold.getNumOccurrences() > 0)
2444     Params.DefaultThreshold = InlineThreshold;
2445   else
2446     Params.DefaultThreshold = Threshold;
2447 
2448   // Set the HintThreshold knob from the -inlinehint-threshold.
2449   Params.HintThreshold = HintThreshold;
2450 
2451   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2452   Params.HotCallSiteThreshold = HotCallSiteThreshold;
2453 
2454   // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2455   // populate LocallyHotCallSiteThreshold. Later, we populate
2456   // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2457   // we know that optimization level is O3 (in the getInlineParams variant that
2458   // takes the opt and size levels).
2459   // FIXME: Remove this check (and make the assignment unconditional) after
2460   // addressing size regression issues at O2.
2461   if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2462     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2463 
2464   // Set the ColdCallSiteThreshold knob from the
2465   // -inline-cold-callsite-threshold.
2466   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2467 
2468   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2469   // -inlinehint-threshold commandline option is not explicitly given. If that
2470   // option is present, then its value applies even for callees with size and
2471   // minsize attributes.
2472   // If the -inline-threshold is not specified, set the ColdThreshold from the
2473   // -inlinecold-threshold even if it is not explicitly passed. If
2474   // -inline-threshold is specified, then -inlinecold-threshold needs to be
2475   // explicitly specified to set the ColdThreshold knob
2476   if (InlineThreshold.getNumOccurrences() == 0) {
2477     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2478     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2479     Params.ColdThreshold = ColdThreshold;
2480   } else if (ColdThreshold.getNumOccurrences() > 0) {
2481     Params.ColdThreshold = ColdThreshold;
2482   }
2483   return Params;
2484 }
2485 
2486 InlineParams llvm::getInlineParams() {
2487   return getInlineParams(DefaultThreshold);
2488 }
2489 
2490 // Compute the default threshold for inlining based on the opt level and the
2491 // size opt level.
2492 static int computeThresholdFromOptLevels(unsigned OptLevel,
2493                                          unsigned SizeOptLevel) {
2494   if (OptLevel > 2)
2495     return InlineConstants::OptAggressiveThreshold;
2496   if (SizeOptLevel == 1) // -Os
2497     return InlineConstants::OptSizeThreshold;
2498   if (SizeOptLevel == 2) // -Oz
2499     return InlineConstants::OptMinSizeThreshold;
2500   return DefaultThreshold;
2501 }
2502 
2503 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2504   auto Params =
2505       getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2506   // At O3, use the value of -locally-hot-callsite-threshold option to populate
2507   // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2508   // when it is specified explicitly.
2509   if (OptLevel > 2)
2510     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2511   return Params;
2512 }
2513