1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "inline-cost"
49 
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51 
52 static cl::opt<int>
53     DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54                      cl::ZeroOrMore,
55                      cl::desc("Default amount of inlining to perform"));
56 
57 static cl::opt<bool> PrintInstructionComments(
58     "print-instruction-comments", cl::Hidden, cl::init(false),
59     cl::desc("Prints comments for instruction based on inline cost analysis"));
60 
61 static cl::opt<int> InlineThreshold(
62     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63     cl::desc("Control the amount of inlining to perform (default = 225)"));
64 
65 static cl::opt<int> HintThreshold(
66     "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67     cl::desc("Threshold for inlining functions with inline hint"));
68 
69 static cl::opt<int>
70     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71                           cl::init(45), cl::ZeroOrMore,
72                           cl::desc("Threshold for inlining cold callsites"));
73 
74 static cl::opt<bool> InlineEnableCostBenefitAnalysis(
75     "inline-enable-cost-benefit-analysis", cl::Hidden, cl::init(false),
76     cl::desc("Enable the cost-benefit analysis for the inliner"));
77 
78 static cl::opt<int> InlineSavingsMultiplier(
79     "inline-savings-multiplier", cl::Hidden, cl::init(8), cl::ZeroOrMore,
80     cl::desc("Multiplier to multiply cycle savings by during inlining"));
81 
82 static cl::opt<int>
83     InlineSizeAllowance("inline-size-allowance", cl::Hidden, cl::init(100),
84                         cl::ZeroOrMore,
85                         cl::desc("The maximum size of a callee that get's "
86                                  "inlined without sufficient cycle savings"));
87 
88 // We introduce this threshold to help performance of instrumentation based
89 // PGO before we actually hook up inliner with analysis passes such as BPI and
90 // BFI.
91 static cl::opt<int> ColdThreshold(
92     "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
93     cl::desc("Threshold for inlining functions with cold attribute"));
94 
95 static cl::opt<int>
96     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
97                          cl::ZeroOrMore,
98                          cl::desc("Threshold for hot callsites "));
99 
100 static cl::opt<int> LocallyHotCallSiteThreshold(
101     "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
102     cl::desc("Threshold for locally hot callsites "));
103 
104 static cl::opt<int> ColdCallSiteRelFreq(
105     "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
106     cl::desc("Maximum block frequency, expressed as a percentage of caller's "
107              "entry frequency, for a callsite to be cold in the absence of "
108              "profile information."));
109 
110 static cl::opt<int> HotCallSiteRelFreq(
111     "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
112     cl::desc("Minimum block frequency, expressed as a multiple of caller's "
113              "entry frequency, for a callsite to be hot in the absence of "
114              "profile information."));
115 
116 static cl::opt<bool> OptComputeFullInlineCost(
117     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
118     cl::desc("Compute the full inline cost of a call site even when the cost "
119              "exceeds the threshold."));
120 
121 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
122     "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
123     cl::ZeroOrMore,
124     cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
125              "attributes."));
126 
127 static cl::opt<bool> DisableGEPConstOperand(
128     "disable-gep-const-evaluation", cl::Hidden, cl::init(false),
129     cl::desc("Disables evaluation of GetElementPtr with constant operands"));
130 
131 namespace {
132 class InlineCostCallAnalyzer;
133 
134 // This struct is used to store information about inline cost of a
135 // particular instruction
136 struct InstructionCostDetail {
137   int CostBefore = 0;
138   int CostAfter = 0;
139   int ThresholdBefore = 0;
140   int ThresholdAfter = 0;
141 
142   int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
143 
144   int getCostDelta() const { return CostAfter - CostBefore; }
145 
146   bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
147 };
148 
149 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
150 private:
151   InlineCostCallAnalyzer *const ICCA;
152 
153 public:
154   InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
155   virtual void emitInstructionAnnot(const Instruction *I,
156                                     formatted_raw_ostream &OS) override;
157 };
158 
159 /// Carry out call site analysis, in order to evaluate inlinability.
160 /// NOTE: the type is currently used as implementation detail of functions such
161 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
162 /// expectation is that they come from the outer scope, from the wrapper
163 /// functions. If we want to support constructing CallAnalyzer objects where
164 /// lambdas are provided inline at construction, or where the object needs to
165 /// otherwise survive past the scope of the provided functions, we need to
166 /// revisit the argument types.
167 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
168   typedef InstVisitor<CallAnalyzer, bool> Base;
169   friend class InstVisitor<CallAnalyzer, bool>;
170 
171 protected:
172   virtual ~CallAnalyzer() {}
173   /// The TargetTransformInfo available for this compilation.
174   const TargetTransformInfo &TTI;
175 
176   /// Getter for the cache of @llvm.assume intrinsics.
177   function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
178 
179   /// Getter for BlockFrequencyInfo
180   function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
181 
182   /// Profile summary information.
183   ProfileSummaryInfo *PSI;
184 
185   /// The called function.
186   Function &F;
187 
188   // Cache the DataLayout since we use it a lot.
189   const DataLayout &DL;
190 
191   /// The OptimizationRemarkEmitter available for this compilation.
192   OptimizationRemarkEmitter *ORE;
193 
194   /// The candidate callsite being analyzed. Please do not use this to do
195   /// analysis in the caller function; we want the inline cost query to be
196   /// easily cacheable. Instead, use the cover function paramHasAttr.
197   CallBase &CandidateCall;
198 
199   /// Extension points for handling callsite features.
200   // Called before a basic block was analyzed.
201   virtual void onBlockStart(const BasicBlock *BB) {}
202 
203   /// Called after a basic block was analyzed.
204   virtual void onBlockAnalyzed(const BasicBlock *BB) {}
205 
206   /// Called before an instruction was analyzed
207   virtual void onInstructionAnalysisStart(const Instruction *I) {}
208 
209   /// Called after an instruction was analyzed
210   virtual void onInstructionAnalysisFinish(const Instruction *I) {}
211 
212   /// Called at the end of the analysis of the callsite. Return the outcome of
213   /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
214   /// the reason it can't.
215   virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
216   /// Called when we're about to start processing a basic block, and every time
217   /// we are done processing an instruction. Return true if there is no point in
218   /// continuing the analysis (e.g. we've determined already the call site is
219   /// too expensive to inline)
220   virtual bool shouldStop() { return false; }
221 
222   /// Called before the analysis of the callee body starts (with callsite
223   /// contexts propagated).  It checks callsite-specific information. Return a
224   /// reason analysis can't continue if that's the case, or 'true' if it may
225   /// continue.
226   virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
227   /// Called if the analysis engine decides SROA cannot be done for the given
228   /// alloca.
229   virtual void onDisableSROA(AllocaInst *Arg) {}
230 
231   /// Called the analysis engine determines load elimination won't happen.
232   virtual void onDisableLoadElimination() {}
233 
234   /// Called to account for a call.
235   virtual void onCallPenalty() {}
236 
237   /// Called to account for the expectation the inlining would result in a load
238   /// elimination.
239   virtual void onLoadEliminationOpportunity() {}
240 
241   /// Called to account for the cost of argument setup for the Call in the
242   /// callee's body (not the callsite currently under analysis).
243   virtual void onCallArgumentSetup(const CallBase &Call) {}
244 
245   /// Called to account for a load relative intrinsic.
246   virtual void onLoadRelativeIntrinsic() {}
247 
248   /// Called to account for a lowered call.
249   virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
250   }
251 
252   /// Account for a jump table of given size. Return false to stop further
253   /// processing the switch instruction
254   virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
255 
256   /// Account for a case cluster of given size. Return false to stop further
257   /// processing of the instruction.
258   virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
259 
260   /// Called at the end of processing a switch instruction, with the given
261   /// number of case clusters.
262   virtual void onFinalizeSwitch(unsigned JumpTableSize,
263                                 unsigned NumCaseCluster) {}
264 
265   /// Called to account for any other instruction not specifically accounted
266   /// for.
267   virtual void onMissedSimplification() {}
268 
269   /// Start accounting potential benefits due to SROA for the given alloca.
270   virtual void onInitializeSROAArg(AllocaInst *Arg) {}
271 
272   /// Account SROA savings for the AllocaInst value.
273   virtual void onAggregateSROAUse(AllocaInst *V) {}
274 
275   bool handleSROA(Value *V, bool DoNotDisable) {
276     // Check for SROA candidates in comparisons.
277     if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
278       if (DoNotDisable) {
279         onAggregateSROAUse(SROAArg);
280         return true;
281       }
282       disableSROAForArg(SROAArg);
283     }
284     return false;
285   }
286 
287   bool IsCallerRecursive = false;
288   bool IsRecursiveCall = false;
289   bool ExposesReturnsTwice = false;
290   bool HasDynamicAlloca = false;
291   bool ContainsNoDuplicateCall = false;
292   bool HasReturn = false;
293   bool HasIndirectBr = false;
294   bool HasUninlineableIntrinsic = false;
295   bool InitsVargArgs = false;
296 
297   /// Number of bytes allocated statically by the callee.
298   uint64_t AllocatedSize = 0;
299   unsigned NumInstructions = 0;
300   unsigned NumVectorInstructions = 0;
301 
302   /// While we walk the potentially-inlined instructions, we build up and
303   /// maintain a mapping of simplified values specific to this callsite. The
304   /// idea is to propagate any special information we have about arguments to
305   /// this call through the inlinable section of the function, and account for
306   /// likely simplifications post-inlining. The most important aspect we track
307   /// is CFG altering simplifications -- when we prove a basic block dead, that
308   /// can cause dramatic shifts in the cost of inlining a function.
309   DenseMap<Value *, Constant *> SimplifiedValues;
310 
311   /// Keep track of the values which map back (through function arguments) to
312   /// allocas on the caller stack which could be simplified through SROA.
313   DenseMap<Value *, AllocaInst *> SROAArgValues;
314 
315   /// Keep track of Allocas for which we believe we may get SROA optimization.
316   DenseSet<AllocaInst *> EnabledSROAAllocas;
317 
318   /// Keep track of values which map to a pointer base and constant offset.
319   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
320 
321   /// Keep track of dead blocks due to the constant arguments.
322   SetVector<BasicBlock *> DeadBlocks;
323 
324   /// The mapping of the blocks to their known unique successors due to the
325   /// constant arguments.
326   DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
327 
328   /// Model the elimination of repeated loads that is expected to happen
329   /// whenever we simplify away the stores that would otherwise cause them to be
330   /// loads.
331   bool EnableLoadElimination;
332   SmallPtrSet<Value *, 16> LoadAddrSet;
333 
334   AllocaInst *getSROAArgForValueOrNull(Value *V) const {
335     auto It = SROAArgValues.find(V);
336     if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
337       return nullptr;
338     return It->second;
339   }
340 
341   // Custom simplification helper routines.
342   bool isAllocaDerivedArg(Value *V);
343   void disableSROAForArg(AllocaInst *SROAArg);
344   void disableSROA(Value *V);
345   void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
346   void disableLoadElimination();
347   bool isGEPFree(GetElementPtrInst &GEP);
348   bool canFoldInboundsGEP(GetElementPtrInst &I);
349   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
350   bool simplifyCallSite(Function *F, CallBase &Call);
351   template <typename Callable>
352   bool simplifyInstruction(Instruction &I, Callable Evaluate);
353   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
354 
355   /// Return true if the given argument to the function being considered for
356   /// inlining has the given attribute set either at the call site or the
357   /// function declaration.  Primarily used to inspect call site specific
358   /// attributes since these can be more precise than the ones on the callee
359   /// itself.
360   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
361 
362   /// Return true if the given value is known non null within the callee if
363   /// inlined through this particular callsite.
364   bool isKnownNonNullInCallee(Value *V);
365 
366   /// Return true if size growth is allowed when inlining the callee at \p Call.
367   bool allowSizeGrowth(CallBase &Call);
368 
369   // Custom analysis routines.
370   InlineResult analyzeBlock(BasicBlock *BB,
371                             SmallPtrSetImpl<const Value *> &EphValues);
372 
373   // Disable several entry points to the visitor so we don't accidentally use
374   // them by declaring but not defining them here.
375   void visit(Module *);
376   void visit(Module &);
377   void visit(Function *);
378   void visit(Function &);
379   void visit(BasicBlock *);
380   void visit(BasicBlock &);
381 
382   // Provide base case for our instruction visit.
383   bool visitInstruction(Instruction &I);
384 
385   // Our visit overrides.
386   bool visitAlloca(AllocaInst &I);
387   bool visitPHI(PHINode &I);
388   bool visitGetElementPtr(GetElementPtrInst &I);
389   bool visitBitCast(BitCastInst &I);
390   bool visitPtrToInt(PtrToIntInst &I);
391   bool visitIntToPtr(IntToPtrInst &I);
392   bool visitCastInst(CastInst &I);
393   bool visitUnaryInstruction(UnaryInstruction &I);
394   bool visitCmpInst(CmpInst &I);
395   bool visitSub(BinaryOperator &I);
396   bool visitBinaryOperator(BinaryOperator &I);
397   bool visitFNeg(UnaryOperator &I);
398   bool visitLoad(LoadInst &I);
399   bool visitStore(StoreInst &I);
400   bool visitExtractValue(ExtractValueInst &I);
401   bool visitInsertValue(InsertValueInst &I);
402   bool visitCallBase(CallBase &Call);
403   bool visitReturnInst(ReturnInst &RI);
404   bool visitBranchInst(BranchInst &BI);
405   bool visitSelectInst(SelectInst &SI);
406   bool visitSwitchInst(SwitchInst &SI);
407   bool visitIndirectBrInst(IndirectBrInst &IBI);
408   bool visitResumeInst(ResumeInst &RI);
409   bool visitCleanupReturnInst(CleanupReturnInst &RI);
410   bool visitCatchReturnInst(CatchReturnInst &RI);
411   bool visitUnreachableInst(UnreachableInst &I);
412 
413 public:
414   CallAnalyzer(
415       Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
416       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
417       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
418       ProfileSummaryInfo *PSI = nullptr,
419       OptimizationRemarkEmitter *ORE = nullptr)
420       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
421         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
422         CandidateCall(Call), EnableLoadElimination(true) {}
423 
424   InlineResult analyze();
425 
426   Optional<Constant*> getSimplifiedValue(Instruction *I) {
427     if (SimplifiedValues.find(I) != SimplifiedValues.end())
428       return SimplifiedValues[I];
429     return None;
430   }
431 
432   // Keep a bunch of stats about the cost savings found so we can print them
433   // out when debugging.
434   unsigned NumConstantArgs = 0;
435   unsigned NumConstantOffsetPtrArgs = 0;
436   unsigned NumAllocaArgs = 0;
437   unsigned NumConstantPtrCmps = 0;
438   unsigned NumConstantPtrDiffs = 0;
439   unsigned NumInstructionsSimplified = 0;
440 
441   void dump();
442 };
443 
444 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
445 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
446 class InlineCostCallAnalyzer final : public CallAnalyzer {
447   const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
448   const bool ComputeFullInlineCost;
449   int LoadEliminationCost = 0;
450   /// Bonus to be applied when percentage of vector instructions in callee is
451   /// high (see more details in updateThreshold).
452   int VectorBonus = 0;
453   /// Bonus to be applied when the callee has only one reachable basic block.
454   int SingleBBBonus = 0;
455 
456   /// Tunable parameters that control the analysis.
457   const InlineParams &Params;
458 
459   // This DenseMap stores the delta change in cost and threshold after
460   // accounting for the given instruction. The map is filled only with the
461   // flag PrintInstructionComments on.
462   DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
463 
464   /// Upper bound for the inlining cost. Bonuses are being applied to account
465   /// for speculative "expected profit" of the inlining decision.
466   int Threshold = 0;
467 
468   /// Attempt to evaluate indirect calls to boost its inline cost.
469   const bool BoostIndirectCalls;
470 
471   /// Ignore the threshold when finalizing analysis.
472   const bool IgnoreThreshold;
473 
474   // True if the cost-benefit-analysis-based inliner is enabled.
475   const bool CostBenefitAnalysisEnabled;
476 
477   /// Inlining cost measured in abstract units, accounts for all the
478   /// instructions expected to be executed for a given function invocation.
479   /// Instructions that are statically proven to be dead based on call-site
480   /// arguments are not counted here.
481   int Cost = 0;
482 
483   // The cumulative cost at the beginning of the basic block being analyzed.  At
484   // the end of analyzing each basic block, "Cost - CostAtBBStart" represents
485   // the size of that basic block.
486   int CostAtBBStart = 0;
487 
488   // The static size of live but cold basic blocks.  This is "static" in the
489   // sense that it's not weighted by profile counts at all.
490   int ColdSize = 0;
491 
492   bool SingleBB = true;
493 
494   unsigned SROACostSavings = 0;
495   unsigned SROACostSavingsLost = 0;
496 
497   /// The mapping of caller Alloca values to their accumulated cost savings. If
498   /// we have to disable SROA for one of the allocas, this tells us how much
499   /// cost must be added.
500   DenseMap<AllocaInst *, int> SROAArgCosts;
501 
502   /// Return true if \p Call is a cold callsite.
503   bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
504 
505   /// Update Threshold based on callsite properties such as callee
506   /// attributes and callee hotness for PGO builds. The Callee is explicitly
507   /// passed to support analyzing indirect calls whose target is inferred by
508   /// analysis.
509   void updateThreshold(CallBase &Call, Function &Callee);
510   /// Return a higher threshold if \p Call is a hot callsite.
511   Optional<int> getHotCallSiteThreshold(CallBase &Call,
512                                         BlockFrequencyInfo *CallerBFI);
513 
514   /// Handle a capped 'int' increment for Cost.
515   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
516     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
517     Cost = (int)std::min(UpperBound, Cost + Inc);
518   }
519 
520   void onDisableSROA(AllocaInst *Arg) override {
521     auto CostIt = SROAArgCosts.find(Arg);
522     if (CostIt == SROAArgCosts.end())
523       return;
524     addCost(CostIt->second);
525     SROACostSavings -= CostIt->second;
526     SROACostSavingsLost += CostIt->second;
527     SROAArgCosts.erase(CostIt);
528   }
529 
530   void onDisableLoadElimination() override {
531     addCost(LoadEliminationCost);
532     LoadEliminationCost = 0;
533   }
534   void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
535   void onCallArgumentSetup(const CallBase &Call) override {
536     // Pay the price of the argument setup. We account for the average 1
537     // instruction per call argument setup here.
538     addCost(Call.arg_size() * InlineConstants::InstrCost);
539   }
540   void onLoadRelativeIntrinsic() override {
541     // This is normally lowered to 4 LLVM instructions.
542     addCost(3 * InlineConstants::InstrCost);
543   }
544   void onLoweredCall(Function *F, CallBase &Call,
545                      bool IsIndirectCall) override {
546     // We account for the average 1 instruction per call argument setup here.
547     addCost(Call.arg_size() * InlineConstants::InstrCost);
548 
549     // If we have a constant that we are calling as a function, we can peer
550     // through it and see the function target. This happens not infrequently
551     // during devirtualization and so we want to give it a hefty bonus for
552     // inlining, but cap that bonus in the event that inlining wouldn't pan out.
553     // Pretend to inline the function, with a custom threshold.
554     if (IsIndirectCall && BoostIndirectCalls) {
555       auto IndirectCallParams = Params;
556       IndirectCallParams.DefaultThreshold =
557           InlineConstants::IndirectCallThreshold;
558       /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
559       /// to instantiate the derived class.
560       InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
561                                 GetAssumptionCache, GetBFI, PSI, ORE, false);
562       if (CA.analyze().isSuccess()) {
563         // We were able to inline the indirect call! Subtract the cost from the
564         // threshold to get the bonus we want to apply, but don't go below zero.
565         Cost -= std::max(0, CA.getThreshold() - CA.getCost());
566       }
567     } else
568       // Otherwise simply add the cost for merely making the call.
569       addCost(InlineConstants::CallPenalty);
570   }
571 
572   void onFinalizeSwitch(unsigned JumpTableSize,
573                         unsigned NumCaseCluster) override {
574     // If suitable for a jump table, consider the cost for the table size and
575     // branch to destination.
576     // Maximum valid cost increased in this function.
577     if (JumpTableSize) {
578       int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
579                        4 * InlineConstants::InstrCost;
580 
581       addCost(JTCost, (int64_t)CostUpperBound);
582       return;
583     }
584     // Considering forming a binary search, we should find the number of nodes
585     // which is same as the number of comparisons when lowered. For a given
586     // number of clusters, n, we can define a recursive function, f(n), to find
587     // the number of nodes in the tree. The recursion is :
588     // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
589     // and f(n) = n, when n <= 3.
590     // This will lead a binary tree where the leaf should be either f(2) or f(3)
591     // when n > 3.  So, the number of comparisons from leaves should be n, while
592     // the number of non-leaf should be :
593     //   2^(log2(n) - 1) - 1
594     //   = 2^log2(n) * 2^-1 - 1
595     //   = n / 2 - 1.
596     // Considering comparisons from leaf and non-leaf nodes, we can estimate the
597     // number of comparisons in a simple closed form :
598     //   n + n / 2 - 1 = n * 3 / 2 - 1
599     if (NumCaseCluster <= 3) {
600       // Suppose a comparison includes one compare and one conditional branch.
601       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
602       return;
603     }
604 
605     int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
606     int64_t SwitchCost =
607         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
608 
609     addCost(SwitchCost, (int64_t)CostUpperBound);
610   }
611   void onMissedSimplification() override {
612     addCost(InlineConstants::InstrCost);
613   }
614 
615   void onInitializeSROAArg(AllocaInst *Arg) override {
616     assert(Arg != nullptr &&
617            "Should not initialize SROA costs for null value.");
618     SROAArgCosts[Arg] = 0;
619   }
620 
621   void onAggregateSROAUse(AllocaInst *SROAArg) override {
622     auto CostIt = SROAArgCosts.find(SROAArg);
623     assert(CostIt != SROAArgCosts.end() &&
624            "expected this argument to have a cost");
625     CostIt->second += InlineConstants::InstrCost;
626     SROACostSavings += InlineConstants::InstrCost;
627   }
628 
629   void onBlockStart(const BasicBlock *BB) override { CostAtBBStart = Cost; }
630 
631   void onBlockAnalyzed(const BasicBlock *BB) override {
632     if (CostBenefitAnalysisEnabled) {
633       // Keep track of the static size of live but cold basic blocks.  For now,
634       // we define a cold basic block to be one that's never executed.
635       assert(GetBFI && "GetBFI must be available");
636       BlockFrequencyInfo *BFI = &(GetBFI(F));
637       assert(BFI && "BFI must be available");
638       auto ProfileCount = BFI->getBlockProfileCount(BB);
639       assert(ProfileCount.hasValue());
640       if (ProfileCount.getValue() == 0)
641         ColdSize += Cost - CostAtBBStart;
642     }
643 
644     auto *TI = BB->getTerminator();
645     // If we had any successors at this point, than post-inlining is likely to
646     // have them as well. Note that we assume any basic blocks which existed
647     // due to branches or switches which folded above will also fold after
648     // inlining.
649     if (SingleBB && TI->getNumSuccessors() > 1) {
650       // Take off the bonus we applied to the threshold.
651       Threshold -= SingleBBBonus;
652       SingleBB = false;
653     }
654   }
655 
656   void onInstructionAnalysisStart(const Instruction *I) override {
657     // This function is called to store the initial cost of inlining before
658     // the given instruction was assessed.
659     if (!PrintInstructionComments)
660       return;
661     InstructionCostDetailMap[I].CostBefore = Cost;
662     InstructionCostDetailMap[I].ThresholdBefore = Threshold;
663   }
664 
665   void onInstructionAnalysisFinish(const Instruction *I) override {
666     // This function is called to find new values of cost and threshold after
667     // the instruction has been assessed.
668     if (!PrintInstructionComments)
669       return;
670     InstructionCostDetailMap[I].CostAfter = Cost;
671     InstructionCostDetailMap[I].ThresholdAfter = Threshold;
672   }
673 
674   bool isCostBenefitAnalysisEnabled() {
675     if (!InlineEnableCostBenefitAnalysis)
676       return false;
677 
678     if (!PSI || !PSI->hasProfileSummary())
679       return false;
680 
681     if (!GetBFI)
682       return false;
683 
684     auto *Caller = CandidateCall.getParent()->getParent();
685     if (!Caller->getEntryCount())
686       return false;
687 
688     BlockFrequencyInfo *CallerBFI = &(GetBFI(*Caller));
689     if (!CallerBFI)
690       return false;
691 
692     // For now, limit to hot call site.
693     if (!PSI->isHotCallSite(CandidateCall, CallerBFI))
694       return false;
695 
696     if (!F.getEntryCount())
697       return false;
698 
699     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
700     if (!CalleeBFI)
701       return false;
702 
703     return true;
704   }
705 
706   // Determine whether we should inline the given call site, taking into account
707   // both the size cost and the cycle savings.  Return None if we don't have
708   // suficient profiling information to determine.
709   Optional<bool> costBenefitAnalysis() {
710     if (!CostBenefitAnalysisEnabled)
711       return None;
712 
713     // buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0
714     // for the prelink phase of the AutoFDO + ThinLTO build.  Honor the logic by
715     // falling back to the cost-based metric.
716     // TODO: Improve this hacky condition.
717     if (Threshold == 0)
718       return None;
719 
720     assert(GetBFI);
721     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
722     assert(CalleeBFI);
723 
724     // The cycle savings expressed as the sum of InlineConstants::InstrCost
725     // multiplied by the estimated dynamic count of each instruction we can
726     // avoid.  Savings come from the call site cost, such as argument setup and
727     // the call instruction, as well as the instructions that are folded.
728     //
729     // We use 128-bit APInt here to avoid potential overflow.  This variable
730     // should stay well below 10^^24 (or 2^^80) in practice.  This "worst" case
731     // assumes that we can avoid or fold a billion instructions, each with a
732     // profile count of 10^^15 -- roughly the number of cycles for a 24-hour
733     // period on a 4GHz machine.
734     APInt CycleSavings(128, 0);
735 
736     for (auto &BB : F) {
737       APInt CurrentSavings(128, 0);
738       for (auto &I : BB) {
739         if (BranchInst *BI = dyn_cast<BranchInst>(&I)) {
740           // Count a conditional branch as savings if it becomes unconditional.
741           if (BI->isConditional() &&
742               dyn_cast_or_null<ConstantInt>(
743                   SimplifiedValues.lookup(BI->getCondition()))) {
744             CurrentSavings += InlineConstants::InstrCost;
745           }
746         } else if (Value *V = dyn_cast<Value>(&I)) {
747           // Count an instruction as savings if we can fold it.
748           if (SimplifiedValues.count(V)) {
749             CurrentSavings += InlineConstants::InstrCost;
750           }
751         }
752         // TODO: Consider other forms of savings like switch statements,
753         // indirect calls becoming direct, SROACostSavings, LoadEliminationCost,
754         // etc.
755       }
756 
757       auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
758       assert(ProfileCount.hasValue());
759       CurrentSavings *= ProfileCount.getValue();
760       CycleSavings += CurrentSavings;
761     }
762 
763     // Compute the cycle savings per call.
764     auto EntryProfileCount = F.getEntryCount();
765     assert(EntryProfileCount.hasValue());
766     auto EntryCount = EntryProfileCount.getCount();
767     CycleSavings += EntryCount / 2;
768     CycleSavings = CycleSavings.udiv(EntryCount);
769 
770     // Compute the total savings for the call site.
771     auto *CallerBB = CandidateCall.getParent();
772     BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent())));
773     CycleSavings += getCallsiteCost(this->CandidateCall, DL);
774     CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue();
775 
776     // Remove the cost of the cold basic blocks.
777     int Size = Cost - ColdSize;
778 
779     // Allow tiny callees to be inlined regardless of whether they meet the
780     // savings threshold.
781     Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1;
782 
783     // Return true if the savings justify the cost of inlining.  Specifically,
784     // we evaluate the following inequality:
785     //
786     //  CycleSavings      PSI->getOrCompHotCountThreshold()
787     // -------------- >= -----------------------------------
788     //       Size              InlineSavingsMultiplier
789     //
790     // Note that the left hand side is specific to a call site.  The right hand
791     // side is a constant for the entire executable.
792     APInt LHS = CycleSavings;
793     LHS *= InlineSavingsMultiplier;
794     APInt RHS(128, PSI->getOrCompHotCountThreshold());
795     RHS *= Size;
796     return LHS.uge(RHS);
797   }
798 
799   InlineResult finalizeAnalysis() override {
800     // Loops generally act a lot like calls in that they act like barriers to
801     // movement, require a certain amount of setup, etc. So when optimising for
802     // size, we penalise any call sites that perform loops. We do this after all
803     // other costs here, so will likely only be dealing with relatively small
804     // functions (and hence DT and LI will hopefully be cheap).
805     auto *Caller = CandidateCall.getFunction();
806     if (Caller->hasMinSize()) {
807       DominatorTree DT(F);
808       LoopInfo LI(DT);
809       int NumLoops = 0;
810       for (Loop *L : LI) {
811         // Ignore loops that will not be executed
812         if (DeadBlocks.count(L->getHeader()))
813           continue;
814         NumLoops++;
815       }
816       addCost(NumLoops * InlineConstants::CallPenalty);
817     }
818 
819     // We applied the maximum possible vector bonus at the beginning. Now,
820     // subtract the excess bonus, if any, from the Threshold before
821     // comparing against Cost.
822     if (NumVectorInstructions <= NumInstructions / 10)
823       Threshold -= VectorBonus;
824     else if (NumVectorInstructions <= NumInstructions / 2)
825       Threshold -= VectorBonus / 2;
826 
827     if (auto Result = costBenefitAnalysis()) {
828       if (Result.getValue())
829         return InlineResult::success();
830       else
831         return InlineResult::failure("Cost over threshold.");
832     }
833 
834     if (IgnoreThreshold || Cost < std::max(1, Threshold))
835       return InlineResult::success();
836     return InlineResult::failure("Cost over threshold.");
837   }
838   bool shouldStop() override {
839     // Bail out the moment we cross the threshold. This means we'll under-count
840     // the cost, but only when undercounting doesn't matter.
841     return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
842   }
843 
844   void onLoadEliminationOpportunity() override {
845     LoadEliminationCost += InlineConstants::InstrCost;
846   }
847 
848   InlineResult onAnalysisStart() override {
849     // Perform some tweaks to the cost and threshold based on the direct
850     // callsite information.
851 
852     // We want to more aggressively inline vector-dense kernels, so up the
853     // threshold, and we'll lower it if the % of vector instructions gets too
854     // low. Note that these bonuses are some what arbitrary and evolved over
855     // time by accident as much as because they are principled bonuses.
856     //
857     // FIXME: It would be nice to remove all such bonuses. At least it would be
858     // nice to base the bonus values on something more scientific.
859     assert(NumInstructions == 0);
860     assert(NumVectorInstructions == 0);
861 
862     // Update the threshold based on callsite properties
863     updateThreshold(CandidateCall, F);
864 
865     // While Threshold depends on commandline options that can take negative
866     // values, we want to enforce the invariant that the computed threshold and
867     // bonuses are non-negative.
868     assert(Threshold >= 0);
869     assert(SingleBBBonus >= 0);
870     assert(VectorBonus >= 0);
871 
872     // Speculatively apply all possible bonuses to Threshold. If cost exceeds
873     // this Threshold any time, and cost cannot decrease, we can stop processing
874     // the rest of the function body.
875     Threshold += (SingleBBBonus + VectorBonus);
876 
877     // Give out bonuses for the callsite, as the instructions setting them up
878     // will be gone after inlining.
879     addCost(-getCallsiteCost(this->CandidateCall, DL));
880 
881     // If this function uses the coldcc calling convention, prefer not to inline
882     // it.
883     if (F.getCallingConv() == CallingConv::Cold)
884       Cost += InlineConstants::ColdccPenalty;
885 
886     // Check if we're done. This can happen due to bonuses and penalties.
887     if (Cost >= Threshold && !ComputeFullInlineCost)
888       return InlineResult::failure("high cost");
889 
890     return InlineResult::success();
891   }
892 
893 public:
894   InlineCostCallAnalyzer(
895       Function &Callee, CallBase &Call, const InlineParams &Params,
896       const TargetTransformInfo &TTI,
897       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
898       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
899       ProfileSummaryInfo *PSI = nullptr,
900       OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
901       bool IgnoreThreshold = false)
902       : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
903         ComputeFullInlineCost(OptComputeFullInlineCost ||
904                               Params.ComputeFullInlineCost || ORE),
905         Params(Params), Threshold(Params.DefaultThreshold),
906         BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
907         CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()),
908         Writer(this) {}
909 
910   /// Annotation Writer for instruction details
911   InlineCostAnnotationWriter Writer;
912 
913   void dump();
914 
915   // Prints the same analysis as dump(), but its definition is not dependent
916   // on the build.
917   void print();
918 
919   Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
920     if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
921       return InstructionCostDetailMap[I];
922     return None;
923   }
924 
925   virtual ~InlineCostCallAnalyzer() {}
926   int getThreshold() { return Threshold; }
927   int getCost() { return Cost; }
928 };
929 } // namespace
930 
931 /// Test whether the given value is an Alloca-derived function argument.
932 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
933   return SROAArgValues.count(V);
934 }
935 
936 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
937   onDisableSROA(SROAArg);
938   EnabledSROAAllocas.erase(SROAArg);
939   disableLoadElimination();
940 }
941 
942 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
943                                                 formatted_raw_ostream &OS) {
944   // The cost of inlining of the given instruction is printed always.
945   // The threshold delta is printed only when it is non-zero. It happens
946   // when we decided to give a bonus at a particular instruction.
947   Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
948   if (!Record)
949     OS << "; No analysis for the instruction";
950   else {
951     OS << "; cost before = " << Record->CostBefore
952        << ", cost after = " << Record->CostAfter
953        << ", threshold before = " << Record->ThresholdBefore
954        << ", threshold after = " << Record->ThresholdAfter << ", ";
955     OS << "cost delta = " << Record->getCostDelta();
956     if (Record->hasThresholdChanged())
957       OS << ", threshold delta = " << Record->getThresholdDelta();
958   }
959   auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I));
960   if (C) {
961     OS << ", simplified to ";
962     C.getValue()->print(OS, true);
963   }
964   OS << "\n";
965 }
966 
967 /// If 'V' maps to a SROA candidate, disable SROA for it.
968 void CallAnalyzer::disableSROA(Value *V) {
969   if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
970     disableSROAForArg(SROAArg);
971   }
972 }
973 
974 void CallAnalyzer::disableLoadElimination() {
975   if (EnableLoadElimination) {
976     onDisableLoadElimination();
977     EnableLoadElimination = false;
978   }
979 }
980 
981 /// Accumulate a constant GEP offset into an APInt if possible.
982 ///
983 /// Returns false if unable to compute the offset for any reason. Respects any
984 /// simplified values known during the analysis of this callsite.
985 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
986   unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
987   assert(IntPtrWidth == Offset.getBitWidth());
988 
989   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
990        GTI != GTE; ++GTI) {
991     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
992     if (!OpC)
993       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
994         OpC = dyn_cast<ConstantInt>(SimpleOp);
995     if (!OpC)
996       return false;
997     if (OpC->isZero())
998       continue;
999 
1000     // Handle a struct index, which adds its field offset to the pointer.
1001     if (StructType *STy = GTI.getStructTypeOrNull()) {
1002       unsigned ElementIdx = OpC->getZExtValue();
1003       const StructLayout *SL = DL.getStructLayout(STy);
1004       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
1005       continue;
1006     }
1007 
1008     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
1009     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
1010   }
1011   return true;
1012 }
1013 
1014 /// Use TTI to check whether a GEP is free.
1015 ///
1016 /// Respects any simplified values known during the analysis of this callsite.
1017 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
1018   SmallVector<Value *, 4> Operands;
1019   Operands.push_back(GEP.getOperand(0));
1020   for (const Use &Op : GEP.indices())
1021     if (Constant *SimpleOp = SimplifiedValues.lookup(Op))
1022       Operands.push_back(SimpleOp);
1023     else
1024       Operands.push_back(Op);
1025   return TargetTransformInfo::TCC_Free ==
1026          TTI.getUserCost(&GEP, Operands,
1027                          TargetTransformInfo::TCK_SizeAndLatency);
1028 }
1029 
1030 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
1031   // Check whether inlining will turn a dynamic alloca into a static
1032   // alloca and handle that case.
1033   if (I.isArrayAllocation()) {
1034     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
1035     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
1036       // Sometimes a dynamic alloca could be converted into a static alloca
1037       // after this constant prop, and become a huge static alloca on an
1038       // unconditional CFG path. Avoid inlining if this is going to happen above
1039       // a threshold.
1040       // FIXME: If the threshold is removed or lowered too much, we could end up
1041       // being too pessimistic and prevent inlining non-problematic code. This
1042       // could result in unintended perf regressions. A better overall strategy
1043       // is needed to track stack usage during inlining.
1044       Type *Ty = I.getAllocatedType();
1045       AllocatedSize = SaturatingMultiplyAdd(
1046           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getKnownMinSize(),
1047           AllocatedSize);
1048       if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) {
1049         HasDynamicAlloca = true;
1050         return false;
1051       }
1052       return Base::visitAlloca(I);
1053     }
1054   }
1055 
1056   // Accumulate the allocated size.
1057   if (I.isStaticAlloca()) {
1058     Type *Ty = I.getAllocatedType();
1059     AllocatedSize =
1060         SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
1061   }
1062 
1063   // We will happily inline static alloca instructions.
1064   if (I.isStaticAlloca())
1065     return Base::visitAlloca(I);
1066 
1067   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
1068   // a variety of reasons, and so we would like to not inline them into
1069   // functions which don't currently have a dynamic alloca. This simply
1070   // disables inlining altogether in the presence of a dynamic alloca.
1071   HasDynamicAlloca = true;
1072   return false;
1073 }
1074 
1075 bool CallAnalyzer::visitPHI(PHINode &I) {
1076   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
1077   // though we don't want to propagate it's bonuses. The idea is to disable
1078   // SROA if it *might* be used in an inappropriate manner.
1079 
1080   // Phi nodes are always zero-cost.
1081   // FIXME: Pointer sizes may differ between different address spaces, so do we
1082   // need to use correct address space in the call to getPointerSizeInBits here?
1083   // Or could we skip the getPointerSizeInBits call completely? As far as I can
1084   // see the ZeroOffset is used as a dummy value, so we can probably use any
1085   // bit width for the ZeroOffset?
1086   APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
1087   bool CheckSROA = I.getType()->isPointerTy();
1088 
1089   // Track the constant or pointer with constant offset we've seen so far.
1090   Constant *FirstC = nullptr;
1091   std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
1092   Value *FirstV = nullptr;
1093 
1094   for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
1095     BasicBlock *Pred = I.getIncomingBlock(i);
1096     // If the incoming block is dead, skip the incoming block.
1097     if (DeadBlocks.count(Pred))
1098       continue;
1099     // If the parent block of phi is not the known successor of the incoming
1100     // block, skip the incoming block.
1101     BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
1102     if (KnownSuccessor && KnownSuccessor != I.getParent())
1103       continue;
1104 
1105     Value *V = I.getIncomingValue(i);
1106     // If the incoming value is this phi itself, skip the incoming value.
1107     if (&I == V)
1108       continue;
1109 
1110     Constant *C = dyn_cast<Constant>(V);
1111     if (!C)
1112       C = SimplifiedValues.lookup(V);
1113 
1114     std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
1115     if (!C && CheckSROA)
1116       BaseAndOffset = ConstantOffsetPtrs.lookup(V);
1117 
1118     if (!C && !BaseAndOffset.first)
1119       // The incoming value is neither a constant nor a pointer with constant
1120       // offset, exit early.
1121       return true;
1122 
1123     if (FirstC) {
1124       if (FirstC == C)
1125         // If we've seen a constant incoming value before and it is the same
1126         // constant we see this time, continue checking the next incoming value.
1127         continue;
1128       // Otherwise early exit because we either see a different constant or saw
1129       // a constant before but we have a pointer with constant offset this time.
1130       return true;
1131     }
1132 
1133     if (FirstV) {
1134       // The same logic as above, but check pointer with constant offset here.
1135       if (FirstBaseAndOffset == BaseAndOffset)
1136         continue;
1137       return true;
1138     }
1139 
1140     if (C) {
1141       // This is the 1st time we've seen a constant, record it.
1142       FirstC = C;
1143       continue;
1144     }
1145 
1146     // The remaining case is that this is the 1st time we've seen a pointer with
1147     // constant offset, record it.
1148     FirstV = V;
1149     FirstBaseAndOffset = BaseAndOffset;
1150   }
1151 
1152   // Check if we can map phi to a constant.
1153   if (FirstC) {
1154     SimplifiedValues[&I] = FirstC;
1155     return true;
1156   }
1157 
1158   // Check if we can map phi to a pointer with constant offset.
1159   if (FirstBaseAndOffset.first) {
1160     ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
1161 
1162     if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
1163       SROAArgValues[&I] = SROAArg;
1164   }
1165 
1166   return true;
1167 }
1168 
1169 /// Check we can fold GEPs of constant-offset call site argument pointers.
1170 /// This requires target data and inbounds GEPs.
1171 ///
1172 /// \return true if the specified GEP can be folded.
1173 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
1174   // Check if we have a base + offset for the pointer.
1175   std::pair<Value *, APInt> BaseAndOffset =
1176       ConstantOffsetPtrs.lookup(I.getPointerOperand());
1177   if (!BaseAndOffset.first)
1178     return false;
1179 
1180   // Check if the offset of this GEP is constant, and if so accumulate it
1181   // into Offset.
1182   if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
1183     return false;
1184 
1185   // Add the result as a new mapping to Base + Offset.
1186   ConstantOffsetPtrs[&I] = BaseAndOffset;
1187 
1188   return true;
1189 }
1190 
1191 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
1192   auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
1193 
1194   // Lambda to check whether a GEP's indices are all constant.
1195   auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
1196     for (const Use &Op : GEP.indices())
1197       if (!isa<Constant>(Op) && !SimplifiedValues.lookup(Op))
1198         return false;
1199     return true;
1200   };
1201 
1202   if (!DisableGEPConstOperand)
1203     if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1204         SmallVector<Constant *, 2> Indices;
1205         for (unsigned int Index = 1 ; Index < COps.size() ; ++Index)
1206             Indices.push_back(COps[Index]);
1207         return ConstantExpr::getGetElementPtr(I.getSourceElementType(), COps[0],
1208                                               Indices, I.isInBounds());
1209         }))
1210       return true;
1211 
1212   if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
1213     if (SROAArg)
1214       SROAArgValues[&I] = SROAArg;
1215 
1216     // Constant GEPs are modeled as free.
1217     return true;
1218   }
1219 
1220   // Variable GEPs will require math and will disable SROA.
1221   if (SROAArg)
1222     disableSROAForArg(SROAArg);
1223   return isGEPFree(I);
1224 }
1225 
1226 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1227 /// \p Evaluate is a callable specific to instruction type that evaluates the
1228 /// instruction when all the operands are constants.
1229 template <typename Callable>
1230 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1231   SmallVector<Constant *, 2> COps;
1232   for (Value *Op : I.operands()) {
1233     Constant *COp = dyn_cast<Constant>(Op);
1234     if (!COp)
1235       COp = SimplifiedValues.lookup(Op);
1236     if (!COp)
1237       return false;
1238     COps.push_back(COp);
1239   }
1240   auto *C = Evaluate(COps);
1241   if (!C)
1242     return false;
1243   SimplifiedValues[&I] = C;
1244   return true;
1245 }
1246 
1247 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1248   // Propagate constants through bitcasts.
1249   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1250         return ConstantExpr::getBitCast(COps[0], I.getType());
1251       }))
1252     return true;
1253 
1254   // Track base/offsets through casts
1255   std::pair<Value *, APInt> BaseAndOffset =
1256       ConstantOffsetPtrs.lookup(I.getOperand(0));
1257   // Casts don't change the offset, just wrap it up.
1258   if (BaseAndOffset.first)
1259     ConstantOffsetPtrs[&I] = BaseAndOffset;
1260 
1261   // Also look for SROA candidates here.
1262   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1263     SROAArgValues[&I] = SROAArg;
1264 
1265   // Bitcasts are always zero cost.
1266   return true;
1267 }
1268 
1269 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1270   // Propagate constants through ptrtoint.
1271   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1272         return ConstantExpr::getPtrToInt(COps[0], I.getType());
1273       }))
1274     return true;
1275 
1276   // Track base/offset pairs when converted to a plain integer provided the
1277   // integer is large enough to represent the pointer.
1278   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1279   unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1280   if (IntegerSize == DL.getPointerSizeInBits(AS)) {
1281     std::pair<Value *, APInt> BaseAndOffset =
1282         ConstantOffsetPtrs.lookup(I.getOperand(0));
1283     if (BaseAndOffset.first)
1284       ConstantOffsetPtrs[&I] = BaseAndOffset;
1285   }
1286 
1287   // This is really weird. Technically, ptrtoint will disable SROA. However,
1288   // unless that ptrtoint is *used* somewhere in the live basic blocks after
1289   // inlining, it will be nuked, and SROA should proceed. All of the uses which
1290   // would block SROA would also block SROA if applied directly to a pointer,
1291   // and so we can just add the integer in here. The only places where SROA is
1292   // preserved either cannot fire on an integer, or won't in-and-of themselves
1293   // disable SROA (ext) w/o some later use that we would see and disable.
1294   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1295     SROAArgValues[&I] = SROAArg;
1296 
1297   return TargetTransformInfo::TCC_Free ==
1298          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1299 }
1300 
1301 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1302   // Propagate constants through ptrtoint.
1303   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1304         return ConstantExpr::getIntToPtr(COps[0], I.getType());
1305       }))
1306     return true;
1307 
1308   // Track base/offset pairs when round-tripped through a pointer without
1309   // modifications provided the integer is not too large.
1310   Value *Op = I.getOperand(0);
1311   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1312   if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1313     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1314     if (BaseAndOffset.first)
1315       ConstantOffsetPtrs[&I] = BaseAndOffset;
1316   }
1317 
1318   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1319   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1320     SROAArgValues[&I] = SROAArg;
1321 
1322   return TargetTransformInfo::TCC_Free ==
1323          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1324 }
1325 
1326 bool CallAnalyzer::visitCastInst(CastInst &I) {
1327   // Propagate constants through casts.
1328   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1329         return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1330       }))
1331     return true;
1332 
1333   // Disable SROA in the face of arbitrary casts we don't explicitly list
1334   // elsewhere.
1335   disableSROA(I.getOperand(0));
1336 
1337   // If this is a floating-point cast, and the target says this operation
1338   // is expensive, this may eventually become a library call. Treat the cost
1339   // as such.
1340   switch (I.getOpcode()) {
1341   case Instruction::FPTrunc:
1342   case Instruction::FPExt:
1343   case Instruction::UIToFP:
1344   case Instruction::SIToFP:
1345   case Instruction::FPToUI:
1346   case Instruction::FPToSI:
1347     if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1348       onCallPenalty();
1349     break;
1350   default:
1351     break;
1352   }
1353 
1354   return TargetTransformInfo::TCC_Free ==
1355          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1356 }
1357 
1358 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1359   Value *Operand = I.getOperand(0);
1360   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1361         return ConstantFoldInstOperands(&I, COps[0], DL);
1362       }))
1363     return true;
1364 
1365   // Disable any SROA on the argument to arbitrary unary instructions.
1366   disableSROA(Operand);
1367 
1368   return false;
1369 }
1370 
1371 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1372   return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1373 }
1374 
1375 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1376   // Does the *call site* have the NonNull attribute set on an argument?  We
1377   // use the attribute on the call site to memoize any analysis done in the
1378   // caller. This will also trip if the callee function has a non-null
1379   // parameter attribute, but that's a less interesting case because hopefully
1380   // the callee would already have been simplified based on that.
1381   if (Argument *A = dyn_cast<Argument>(V))
1382     if (paramHasAttr(A, Attribute::NonNull))
1383       return true;
1384 
1385   // Is this an alloca in the caller?  This is distinct from the attribute case
1386   // above because attributes aren't updated within the inliner itself and we
1387   // always want to catch the alloca derived case.
1388   if (isAllocaDerivedArg(V))
1389     // We can actually predict the result of comparisons between an
1390     // alloca-derived value and null. Note that this fires regardless of
1391     // SROA firing.
1392     return true;
1393 
1394   return false;
1395 }
1396 
1397 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1398   // If the normal destination of the invoke or the parent block of the call
1399   // site is unreachable-terminated, there is little point in inlining this
1400   // unless there is literally zero cost.
1401   // FIXME: Note that it is possible that an unreachable-terminated block has a
1402   // hot entry. For example, in below scenario inlining hot_call_X() may be
1403   // beneficial :
1404   // main() {
1405   //   hot_call_1();
1406   //   ...
1407   //   hot_call_N()
1408   //   exit(0);
1409   // }
1410   // For now, we are not handling this corner case here as it is rare in real
1411   // code. In future, we should elaborate this based on BPI and BFI in more
1412   // general threshold adjusting heuristics in updateThreshold().
1413   if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1414     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1415       return false;
1416   } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1417     return false;
1418 
1419   return true;
1420 }
1421 
1422 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1423                                             BlockFrequencyInfo *CallerBFI) {
1424   // If global profile summary is available, then callsite's coldness is
1425   // determined based on that.
1426   if (PSI && PSI->hasProfileSummary())
1427     return PSI->isColdCallSite(Call, CallerBFI);
1428 
1429   // Otherwise we need BFI to be available.
1430   if (!CallerBFI)
1431     return false;
1432 
1433   // Determine if the callsite is cold relative to caller's entry. We could
1434   // potentially cache the computation of scaled entry frequency, but the added
1435   // complexity is not worth it unless this scaling shows up high in the
1436   // profiles.
1437   const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1438   auto CallSiteBB = Call.getParent();
1439   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1440   auto CallerEntryFreq =
1441       CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1442   return CallSiteFreq < CallerEntryFreq * ColdProb;
1443 }
1444 
1445 Optional<int>
1446 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1447                                                 BlockFrequencyInfo *CallerBFI) {
1448 
1449   // If global profile summary is available, then callsite's hotness is
1450   // determined based on that.
1451   if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1452     return Params.HotCallSiteThreshold;
1453 
1454   // Otherwise we need BFI to be available and to have a locally hot callsite
1455   // threshold.
1456   if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1457     return None;
1458 
1459   // Determine if the callsite is hot relative to caller's entry. We could
1460   // potentially cache the computation of scaled entry frequency, but the added
1461   // complexity is not worth it unless this scaling shows up high in the
1462   // profiles.
1463   auto CallSiteBB = Call.getParent();
1464   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1465   auto CallerEntryFreq = CallerBFI->getEntryFreq();
1466   if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1467     return Params.LocallyHotCallSiteThreshold;
1468 
1469   // Otherwise treat it normally.
1470   return None;
1471 }
1472 
1473 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1474   // If no size growth is allowed for this inlining, set Threshold to 0.
1475   if (!allowSizeGrowth(Call)) {
1476     Threshold = 0;
1477     return;
1478   }
1479 
1480   Function *Caller = Call.getCaller();
1481 
1482   // return min(A, B) if B is valid.
1483   auto MinIfValid = [](int A, Optional<int> B) {
1484     return B ? std::min(A, B.getValue()) : A;
1485   };
1486 
1487   // return max(A, B) if B is valid.
1488   auto MaxIfValid = [](int A, Optional<int> B) {
1489     return B ? std::max(A, B.getValue()) : A;
1490   };
1491 
1492   // Various bonus percentages. These are multiplied by Threshold to get the
1493   // bonus values.
1494   // SingleBBBonus: This bonus is applied if the callee has a single reachable
1495   // basic block at the given callsite context. This is speculatively applied
1496   // and withdrawn if more than one basic block is seen.
1497   //
1498   // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1499   // of the last call to a static function as inlining such functions is
1500   // guaranteed to reduce code size.
1501   //
1502   // These bonus percentages may be set to 0 based on properties of the caller
1503   // and the callsite.
1504   int SingleBBBonusPercent = 50;
1505   int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1506   int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1507 
1508   // Lambda to set all the above bonus and bonus percentages to 0.
1509   auto DisallowAllBonuses = [&]() {
1510     SingleBBBonusPercent = 0;
1511     VectorBonusPercent = 0;
1512     LastCallToStaticBonus = 0;
1513   };
1514 
1515   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1516   // and reduce the threshold if the caller has the necessary attribute.
1517   if (Caller->hasMinSize()) {
1518     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1519     // For minsize, we want to disable the single BB bonus and the vector
1520     // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1521     // a static function will, at the minimum, eliminate the parameter setup and
1522     // call/return instructions.
1523     SingleBBBonusPercent = 0;
1524     VectorBonusPercent = 0;
1525   } else if (Caller->hasOptSize())
1526     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1527 
1528   // Adjust the threshold based on inlinehint attribute and profile based
1529   // hotness information if the caller does not have MinSize attribute.
1530   if (!Caller->hasMinSize()) {
1531     if (Callee.hasFnAttribute(Attribute::InlineHint))
1532       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1533 
1534     // FIXME: After switching to the new passmanager, simplify the logic below
1535     // by checking only the callsite hotness/coldness as we will reliably
1536     // have local profile information.
1537     //
1538     // Callsite hotness and coldness can be determined if sample profile is
1539     // used (which adds hotness metadata to calls) or if caller's
1540     // BlockFrequencyInfo is available.
1541     BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1542     auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1543     if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1544       LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1545       // FIXME: This should update the threshold only if it exceeds the
1546       // current threshold, but AutoFDO + ThinLTO currently relies on this
1547       // behavior to prevent inlining of hot callsites during ThinLTO
1548       // compile phase.
1549       Threshold = HotCallSiteThreshold.getValue();
1550     } else if (isColdCallSite(Call, CallerBFI)) {
1551       LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1552       // Do not apply bonuses for a cold callsite including the
1553       // LastCallToStatic bonus. While this bonus might result in code size
1554       // reduction, it can cause the size of a non-cold caller to increase
1555       // preventing it from being inlined.
1556       DisallowAllBonuses();
1557       Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1558     } else if (PSI) {
1559       // Use callee's global profile information only if we have no way of
1560       // determining this via callsite information.
1561       if (PSI->isFunctionEntryHot(&Callee)) {
1562         LLVM_DEBUG(dbgs() << "Hot callee.\n");
1563         // If callsite hotness can not be determined, we may still know
1564         // that the callee is hot and treat it as a weaker hint for threshold
1565         // increase.
1566         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1567       } else if (PSI->isFunctionEntryCold(&Callee)) {
1568         LLVM_DEBUG(dbgs() << "Cold callee.\n");
1569         // Do not apply bonuses for a cold callee including the
1570         // LastCallToStatic bonus. While this bonus might result in code size
1571         // reduction, it can cause the size of a non-cold caller to increase
1572         // preventing it from being inlined.
1573         DisallowAllBonuses();
1574         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1575       }
1576     }
1577   }
1578 
1579   // Finally, take the target-specific inlining threshold multiplier into
1580   // account.
1581   Threshold *= TTI.getInliningThresholdMultiplier();
1582 
1583   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1584   VectorBonus = Threshold * VectorBonusPercent / 100;
1585 
1586   bool OnlyOneCallAndLocalLinkage =
1587       F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1588   // If there is only one call of the function, and it has internal linkage,
1589   // the cost of inlining it drops dramatically. It may seem odd to update
1590   // Cost in updateThreshold, but the bonus depends on the logic in this method.
1591   if (OnlyOneCallAndLocalLinkage)
1592     Cost -= LastCallToStaticBonus;
1593 }
1594 
1595 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1596   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1597   // First try to handle simplified comparisons.
1598   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1599         return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1600       }))
1601     return true;
1602 
1603   if (I.getOpcode() == Instruction::FCmp)
1604     return false;
1605 
1606   // Otherwise look for a comparison between constant offset pointers with
1607   // a common base.
1608   Value *LHSBase, *RHSBase;
1609   APInt LHSOffset, RHSOffset;
1610   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1611   if (LHSBase) {
1612     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1613     if (RHSBase && LHSBase == RHSBase) {
1614       // We have common bases, fold the icmp to a constant based on the
1615       // offsets.
1616       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1617       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1618       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1619         SimplifiedValues[&I] = C;
1620         ++NumConstantPtrCmps;
1621         return true;
1622       }
1623     }
1624   }
1625 
1626   // If the comparison is an equality comparison with null, we can simplify it
1627   // if we know the value (argument) can't be null
1628   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1629       isKnownNonNullInCallee(I.getOperand(0))) {
1630     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1631     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1632                                       : ConstantInt::getFalse(I.getType());
1633     return true;
1634   }
1635   return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1636 }
1637 
1638 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1639   // Try to handle a special case: we can fold computing the difference of two
1640   // constant-related pointers.
1641   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1642   Value *LHSBase, *RHSBase;
1643   APInt LHSOffset, RHSOffset;
1644   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1645   if (LHSBase) {
1646     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1647     if (RHSBase && LHSBase == RHSBase) {
1648       // We have common bases, fold the subtract to a constant based on the
1649       // offsets.
1650       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1651       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1652       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1653         SimplifiedValues[&I] = C;
1654         ++NumConstantPtrDiffs;
1655         return true;
1656       }
1657     }
1658   }
1659 
1660   // Otherwise, fall back to the generic logic for simplifying and handling
1661   // instructions.
1662   return Base::visitSub(I);
1663 }
1664 
1665 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1666   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1667   Constant *CLHS = dyn_cast<Constant>(LHS);
1668   if (!CLHS)
1669     CLHS = SimplifiedValues.lookup(LHS);
1670   Constant *CRHS = dyn_cast<Constant>(RHS);
1671   if (!CRHS)
1672     CRHS = SimplifiedValues.lookup(RHS);
1673 
1674   Value *SimpleV = nullptr;
1675   if (auto FI = dyn_cast<FPMathOperator>(&I))
1676     SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1677                             FI->getFastMathFlags(), DL);
1678   else
1679     SimpleV =
1680         SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1681 
1682   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1683     SimplifiedValues[&I] = C;
1684 
1685   if (SimpleV)
1686     return true;
1687 
1688   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1689   disableSROA(LHS);
1690   disableSROA(RHS);
1691 
1692   // If the instruction is floating point, and the target says this operation
1693   // is expensive, this may eventually become a library call. Treat the cost
1694   // as such. Unless it's fneg which can be implemented with an xor.
1695   using namespace llvm::PatternMatch;
1696   if (I.getType()->isFloatingPointTy() &&
1697       TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1698       !match(&I, m_FNeg(m_Value())))
1699     onCallPenalty();
1700 
1701   return false;
1702 }
1703 
1704 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1705   Value *Op = I.getOperand(0);
1706   Constant *COp = dyn_cast<Constant>(Op);
1707   if (!COp)
1708     COp = SimplifiedValues.lookup(Op);
1709 
1710   Value *SimpleV = SimplifyFNegInst(
1711       COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1712 
1713   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1714     SimplifiedValues[&I] = C;
1715 
1716   if (SimpleV)
1717     return true;
1718 
1719   // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1720   disableSROA(Op);
1721 
1722   return false;
1723 }
1724 
1725 bool CallAnalyzer::visitLoad(LoadInst &I) {
1726   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1727     return true;
1728 
1729   // If the data is already loaded from this address and hasn't been clobbered
1730   // by any stores or calls, this load is likely to be redundant and can be
1731   // eliminated.
1732   if (EnableLoadElimination &&
1733       !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1734     onLoadEliminationOpportunity();
1735     return true;
1736   }
1737 
1738   return false;
1739 }
1740 
1741 bool CallAnalyzer::visitStore(StoreInst &I) {
1742   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1743     return true;
1744 
1745   // The store can potentially clobber loads and prevent repeated loads from
1746   // being eliminated.
1747   // FIXME:
1748   // 1. We can probably keep an initial set of eliminatable loads substracted
1749   // from the cost even when we finally see a store. We just need to disable
1750   // *further* accumulation of elimination savings.
1751   // 2. We should probably at some point thread MemorySSA for the callee into
1752   // this and then use that to actually compute *really* precise savings.
1753   disableLoadElimination();
1754   return false;
1755 }
1756 
1757 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1758   // Constant folding for extract value is trivial.
1759   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1760         return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1761       }))
1762     return true;
1763 
1764   // SROA can look through these but give them a cost.
1765   return false;
1766 }
1767 
1768 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1769   // Constant folding for insert value is trivial.
1770   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1771         return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1772                                             /*InsertedValueOperand*/ COps[1],
1773                                             I.getIndices());
1774       }))
1775     return true;
1776 
1777   // SROA can look through these but give them a cost.
1778   return false;
1779 }
1780 
1781 /// Try to simplify a call site.
1782 ///
1783 /// Takes a concrete function and callsite and tries to actually simplify it by
1784 /// analyzing the arguments and call itself with instsimplify. Returns true if
1785 /// it has simplified the callsite to some other entity (a constant), making it
1786 /// free.
1787 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1788   // FIXME: Using the instsimplify logic directly for this is inefficient
1789   // because we have to continually rebuild the argument list even when no
1790   // simplifications can be performed. Until that is fixed with remapping
1791   // inside of instsimplify, directly constant fold calls here.
1792   if (!canConstantFoldCallTo(&Call, F))
1793     return false;
1794 
1795   // Try to re-map the arguments to constants.
1796   SmallVector<Constant *, 4> ConstantArgs;
1797   ConstantArgs.reserve(Call.arg_size());
1798   for (Value *I : Call.args()) {
1799     Constant *C = dyn_cast<Constant>(I);
1800     if (!C)
1801       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1802     if (!C)
1803       return false; // This argument doesn't map to a constant.
1804 
1805     ConstantArgs.push_back(C);
1806   }
1807   if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1808     SimplifiedValues[&Call] = C;
1809     return true;
1810   }
1811 
1812   return false;
1813 }
1814 
1815 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1816   if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1817       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1818     // This aborts the entire analysis.
1819     ExposesReturnsTwice = true;
1820     return false;
1821   }
1822   if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1823     ContainsNoDuplicateCall = true;
1824 
1825   Value *Callee = Call.getCalledOperand();
1826   Function *F = dyn_cast_or_null<Function>(Callee);
1827   bool IsIndirectCall = !F;
1828   if (IsIndirectCall) {
1829     // Check if this happens to be an indirect function call to a known function
1830     // in this inline context. If not, we've done all we can.
1831     F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1832     if (!F) {
1833       onCallArgumentSetup(Call);
1834 
1835       if (!Call.onlyReadsMemory())
1836         disableLoadElimination();
1837       return Base::visitCallBase(Call);
1838     }
1839   }
1840 
1841   assert(F && "Expected a call to a known function");
1842 
1843   // When we have a concrete function, first try to simplify it directly.
1844   if (simplifyCallSite(F, Call))
1845     return true;
1846 
1847   // Next check if it is an intrinsic we know about.
1848   // FIXME: Lift this into part of the InstVisitor.
1849   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1850     switch (II->getIntrinsicID()) {
1851     default:
1852       if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1853         disableLoadElimination();
1854       return Base::visitCallBase(Call);
1855 
1856     case Intrinsic::load_relative:
1857       onLoadRelativeIntrinsic();
1858       return false;
1859 
1860     case Intrinsic::memset:
1861     case Intrinsic::memcpy:
1862     case Intrinsic::memmove:
1863       disableLoadElimination();
1864       // SROA can usually chew through these intrinsics, but they aren't free.
1865       return false;
1866     case Intrinsic::icall_branch_funnel:
1867     case Intrinsic::localescape:
1868       HasUninlineableIntrinsic = true;
1869       return false;
1870     case Intrinsic::vastart:
1871       InitsVargArgs = true;
1872       return false;
1873     }
1874   }
1875 
1876   if (F == Call.getFunction()) {
1877     // This flag will fully abort the analysis, so don't bother with anything
1878     // else.
1879     IsRecursiveCall = true;
1880     return false;
1881   }
1882 
1883   if (TTI.isLoweredToCall(F)) {
1884     onLoweredCall(F, Call, IsIndirectCall);
1885   }
1886 
1887   if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1888     disableLoadElimination();
1889   return Base::visitCallBase(Call);
1890 }
1891 
1892 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1893   // At least one return instruction will be free after inlining.
1894   bool Free = !HasReturn;
1895   HasReturn = true;
1896   return Free;
1897 }
1898 
1899 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1900   // We model unconditional branches as essentially free -- they really
1901   // shouldn't exist at all, but handling them makes the behavior of the
1902   // inliner more regular and predictable. Interestingly, conditional branches
1903   // which will fold away are also free.
1904   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1905          dyn_cast_or_null<ConstantInt>(
1906              SimplifiedValues.lookup(BI.getCondition()));
1907 }
1908 
1909 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1910   bool CheckSROA = SI.getType()->isPointerTy();
1911   Value *TrueVal = SI.getTrueValue();
1912   Value *FalseVal = SI.getFalseValue();
1913 
1914   Constant *TrueC = dyn_cast<Constant>(TrueVal);
1915   if (!TrueC)
1916     TrueC = SimplifiedValues.lookup(TrueVal);
1917   Constant *FalseC = dyn_cast<Constant>(FalseVal);
1918   if (!FalseC)
1919     FalseC = SimplifiedValues.lookup(FalseVal);
1920   Constant *CondC =
1921       dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1922 
1923   if (!CondC) {
1924     // Select C, X, X => X
1925     if (TrueC == FalseC && TrueC) {
1926       SimplifiedValues[&SI] = TrueC;
1927       return true;
1928     }
1929 
1930     if (!CheckSROA)
1931       return Base::visitSelectInst(SI);
1932 
1933     std::pair<Value *, APInt> TrueBaseAndOffset =
1934         ConstantOffsetPtrs.lookup(TrueVal);
1935     std::pair<Value *, APInt> FalseBaseAndOffset =
1936         ConstantOffsetPtrs.lookup(FalseVal);
1937     if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1938       ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1939 
1940       if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1941         SROAArgValues[&SI] = SROAArg;
1942       return true;
1943     }
1944 
1945     return Base::visitSelectInst(SI);
1946   }
1947 
1948   // Select condition is a constant.
1949   Value *SelectedV = CondC->isAllOnesValue()
1950                          ? TrueVal
1951                          : (CondC->isNullValue()) ? FalseVal : nullptr;
1952   if (!SelectedV) {
1953     // Condition is a vector constant that is not all 1s or all 0s.  If all
1954     // operands are constants, ConstantExpr::getSelect() can handle the cases
1955     // such as select vectors.
1956     if (TrueC && FalseC) {
1957       if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1958         SimplifiedValues[&SI] = C;
1959         return true;
1960       }
1961     }
1962     return Base::visitSelectInst(SI);
1963   }
1964 
1965   // Condition is either all 1s or all 0s. SI can be simplified.
1966   if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1967     SimplifiedValues[&SI] = SelectedC;
1968     return true;
1969   }
1970 
1971   if (!CheckSROA)
1972     return true;
1973 
1974   std::pair<Value *, APInt> BaseAndOffset =
1975       ConstantOffsetPtrs.lookup(SelectedV);
1976   if (BaseAndOffset.first) {
1977     ConstantOffsetPtrs[&SI] = BaseAndOffset;
1978 
1979     if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1980       SROAArgValues[&SI] = SROAArg;
1981   }
1982 
1983   return true;
1984 }
1985 
1986 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1987   // We model unconditional switches as free, see the comments on handling
1988   // branches.
1989   if (isa<ConstantInt>(SI.getCondition()))
1990     return true;
1991   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1992     if (isa<ConstantInt>(V))
1993       return true;
1994 
1995   // Assume the most general case where the switch is lowered into
1996   // either a jump table, bit test, or a balanced binary tree consisting of
1997   // case clusters without merging adjacent clusters with the same
1998   // destination. We do not consider the switches that are lowered with a mix
1999   // of jump table/bit test/binary search tree. The cost of the switch is
2000   // proportional to the size of the tree or the size of jump table range.
2001   //
2002   // NB: We convert large switches which are just used to initialize large phi
2003   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
2004   // inlining those. It will prevent inlining in cases where the optimization
2005   // does not (yet) fire.
2006 
2007   unsigned JumpTableSize = 0;
2008   BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
2009   unsigned NumCaseCluster =
2010       TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
2011 
2012   onFinalizeSwitch(JumpTableSize, NumCaseCluster);
2013   return false;
2014 }
2015 
2016 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
2017   // We never want to inline functions that contain an indirectbr.  This is
2018   // incorrect because all the blockaddress's (in static global initializers
2019   // for example) would be referring to the original function, and this
2020   // indirect jump would jump from the inlined copy of the function into the
2021   // original function which is extremely undefined behavior.
2022   // FIXME: This logic isn't really right; we can safely inline functions with
2023   // indirectbr's as long as no other function or global references the
2024   // blockaddress of a block within the current function.
2025   HasIndirectBr = true;
2026   return false;
2027 }
2028 
2029 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
2030   // FIXME: It's not clear that a single instruction is an accurate model for
2031   // the inline cost of a resume instruction.
2032   return false;
2033 }
2034 
2035 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
2036   // FIXME: It's not clear that a single instruction is an accurate model for
2037   // the inline cost of a cleanupret instruction.
2038   return false;
2039 }
2040 
2041 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
2042   // FIXME: It's not clear that a single instruction is an accurate model for
2043   // the inline cost of a catchret instruction.
2044   return false;
2045 }
2046 
2047 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
2048   // FIXME: It might be reasonably to discount the cost of instructions leading
2049   // to unreachable as they have the lowest possible impact on both runtime and
2050   // code size.
2051   return true; // No actual code is needed for unreachable.
2052 }
2053 
2054 bool CallAnalyzer::visitInstruction(Instruction &I) {
2055   // Some instructions are free. All of the free intrinsics can also be
2056   // handled by SROA, etc.
2057   if (TargetTransformInfo::TCC_Free ==
2058       TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
2059     return true;
2060 
2061   // We found something we don't understand or can't handle. Mark any SROA-able
2062   // values in the operand list as no longer viable.
2063   for (const Use &Op : I.operands())
2064     disableSROA(Op);
2065 
2066   return false;
2067 }
2068 
2069 /// Analyze a basic block for its contribution to the inline cost.
2070 ///
2071 /// This method walks the analyzer over every instruction in the given basic
2072 /// block and accounts for their cost during inlining at this callsite. It
2073 /// aborts early if the threshold has been exceeded or an impossible to inline
2074 /// construct has been detected. It returns false if inlining is no longer
2075 /// viable, and true if inlining remains viable.
2076 InlineResult
2077 CallAnalyzer::analyzeBlock(BasicBlock *BB,
2078                            SmallPtrSetImpl<const Value *> &EphValues) {
2079   for (Instruction &I : *BB) {
2080     // FIXME: Currently, the number of instructions in a function regardless of
2081     // our ability to simplify them during inline to constants or dead code,
2082     // are actually used by the vector bonus heuristic. As long as that's true,
2083     // we have to special case debug intrinsics here to prevent differences in
2084     // inlining due to debug symbols. Eventually, the number of unsimplified
2085     // instructions shouldn't factor into the cost computation, but until then,
2086     // hack around it here.
2087     if (isa<DbgInfoIntrinsic>(I))
2088       continue;
2089 
2090     // Skip pseudo-probes.
2091     if (isa<PseudoProbeInst>(I))
2092       continue;
2093 
2094     // Skip ephemeral values.
2095     if (EphValues.count(&I))
2096       continue;
2097 
2098     ++NumInstructions;
2099     if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy())
2100       ++NumVectorInstructions;
2101 
2102     // If the instruction simplified to a constant, there is no cost to this
2103     // instruction. Visit the instructions using our InstVisitor to account for
2104     // all of the per-instruction logic. The visit tree returns true if we
2105     // consumed the instruction in any way, and false if the instruction's base
2106     // cost should count against inlining.
2107     onInstructionAnalysisStart(&I);
2108 
2109     if (Base::visit(&I))
2110       ++NumInstructionsSimplified;
2111     else
2112       onMissedSimplification();
2113 
2114     onInstructionAnalysisFinish(&I);
2115     using namespace ore;
2116     // If the visit this instruction detected an uninlinable pattern, abort.
2117     InlineResult IR = InlineResult::success();
2118     if (IsRecursiveCall)
2119       IR = InlineResult::failure("recursive");
2120     else if (ExposesReturnsTwice)
2121       IR = InlineResult::failure("exposes returns twice");
2122     else if (HasDynamicAlloca)
2123       IR = InlineResult::failure("dynamic alloca");
2124     else if (HasIndirectBr)
2125       IR = InlineResult::failure("indirect branch");
2126     else if (HasUninlineableIntrinsic)
2127       IR = InlineResult::failure("uninlinable intrinsic");
2128     else if (InitsVargArgs)
2129       IR = InlineResult::failure("varargs");
2130     if (!IR.isSuccess()) {
2131       if (ORE)
2132         ORE->emit([&]() {
2133           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
2134                                           &CandidateCall)
2135                  << NV("Callee", &F) << " has uninlinable pattern ("
2136                  << NV("InlineResult", IR.getFailureReason())
2137                  << ") and cost is not fully computed";
2138         });
2139       return IR;
2140     }
2141 
2142     // If the caller is a recursive function then we don't want to inline
2143     // functions which allocate a lot of stack space because it would increase
2144     // the caller stack usage dramatically.
2145     if (IsCallerRecursive &&
2146         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
2147       auto IR =
2148           InlineResult::failure("recursive and allocates too much stack space");
2149       if (ORE)
2150         ORE->emit([&]() {
2151           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
2152                                           &CandidateCall)
2153                  << NV("Callee", &F) << " is "
2154                  << NV("InlineResult", IR.getFailureReason())
2155                  << ". Cost is not fully computed";
2156         });
2157       return IR;
2158     }
2159 
2160     if (shouldStop())
2161       return InlineResult::failure(
2162           "Call site analysis is not favorable to inlining.");
2163   }
2164 
2165   return InlineResult::success();
2166 }
2167 
2168 /// Compute the base pointer and cumulative constant offsets for V.
2169 ///
2170 /// This strips all constant offsets off of V, leaving it the base pointer, and
2171 /// accumulates the total constant offset applied in the returned constant. It
2172 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
2173 /// no constant offsets applied.
2174 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
2175   if (!V->getType()->isPointerTy())
2176     return nullptr;
2177 
2178   unsigned AS = V->getType()->getPointerAddressSpace();
2179   unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
2180   APInt Offset = APInt::getNullValue(IntPtrWidth);
2181 
2182   // Even though we don't look through PHI nodes, we could be called on an
2183   // instruction in an unreachable block, which may be on a cycle.
2184   SmallPtrSet<Value *, 4> Visited;
2185   Visited.insert(V);
2186   do {
2187     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2188       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
2189         return nullptr;
2190       V = GEP->getPointerOperand();
2191     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
2192       V = cast<Operator>(V)->getOperand(0);
2193     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2194       if (GA->isInterposable())
2195         break;
2196       V = GA->getAliasee();
2197     } else {
2198       break;
2199     }
2200     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2201   } while (Visited.insert(V).second);
2202 
2203   Type *IdxPtrTy = DL.getIndexType(V->getType());
2204   return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
2205 }
2206 
2207 /// Find dead blocks due to deleted CFG edges during inlining.
2208 ///
2209 /// If we know the successor of the current block, \p CurrBB, has to be \p
2210 /// NextBB, the other successors of \p CurrBB are dead if these successors have
2211 /// no live incoming CFG edges.  If one block is found to be dead, we can
2212 /// continue growing the dead block list by checking the successors of the dead
2213 /// blocks to see if all their incoming edges are dead or not.
2214 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
2215   auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
2216     // A CFG edge is dead if the predecessor is dead or the predecessor has a
2217     // known successor which is not the one under exam.
2218     return (DeadBlocks.count(Pred) ||
2219             (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
2220   };
2221 
2222   auto IsNewlyDead = [&](BasicBlock *BB) {
2223     // If all the edges to a block are dead, the block is also dead.
2224     return (!DeadBlocks.count(BB) &&
2225             llvm::all_of(predecessors(BB),
2226                          [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
2227   };
2228 
2229   for (BasicBlock *Succ : successors(CurrBB)) {
2230     if (Succ == NextBB || !IsNewlyDead(Succ))
2231       continue;
2232     SmallVector<BasicBlock *, 4> NewDead;
2233     NewDead.push_back(Succ);
2234     while (!NewDead.empty()) {
2235       BasicBlock *Dead = NewDead.pop_back_val();
2236       if (DeadBlocks.insert(Dead))
2237         // Continue growing the dead block lists.
2238         for (BasicBlock *S : successors(Dead))
2239           if (IsNewlyDead(S))
2240             NewDead.push_back(S);
2241     }
2242   }
2243 }
2244 
2245 /// Analyze a call site for potential inlining.
2246 ///
2247 /// Returns true if inlining this call is viable, and false if it is not
2248 /// viable. It computes the cost and adjusts the threshold based on numerous
2249 /// factors and heuristics. If this method returns false but the computed cost
2250 /// is below the computed threshold, then inlining was forcibly disabled by
2251 /// some artifact of the routine.
2252 InlineResult CallAnalyzer::analyze() {
2253   ++NumCallsAnalyzed;
2254 
2255   auto Result = onAnalysisStart();
2256   if (!Result.isSuccess())
2257     return Result;
2258 
2259   if (F.empty())
2260     return InlineResult::success();
2261 
2262   Function *Caller = CandidateCall.getFunction();
2263   // Check if the caller function is recursive itself.
2264   for (User *U : Caller->users()) {
2265     CallBase *Call = dyn_cast<CallBase>(U);
2266     if (Call && Call->getFunction() == Caller) {
2267       IsCallerRecursive = true;
2268       break;
2269     }
2270   }
2271 
2272   // Populate our simplified values by mapping from function arguments to call
2273   // arguments with known important simplifications.
2274   auto CAI = CandidateCall.arg_begin();
2275   for (Argument &FAI : F.args()) {
2276     assert(CAI != CandidateCall.arg_end());
2277     if (Constant *C = dyn_cast<Constant>(CAI))
2278       SimplifiedValues[&FAI] = C;
2279 
2280     Value *PtrArg = *CAI;
2281     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2282       ConstantOffsetPtrs[&FAI] = std::make_pair(PtrArg, C->getValue());
2283 
2284       // We can SROA any pointer arguments derived from alloca instructions.
2285       if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2286         SROAArgValues[&FAI] = SROAArg;
2287         onInitializeSROAArg(SROAArg);
2288         EnabledSROAAllocas.insert(SROAArg);
2289       }
2290     }
2291     ++CAI;
2292   }
2293   NumConstantArgs = SimplifiedValues.size();
2294   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2295   NumAllocaArgs = SROAArgValues.size();
2296 
2297   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2298   // the ephemeral values multiple times (and they're completely determined by
2299   // the callee, so this is purely duplicate work).
2300   SmallPtrSet<const Value *, 32> EphValues;
2301   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2302 
2303   // The worklist of live basic blocks in the callee *after* inlining. We avoid
2304   // adding basic blocks of the callee which can be proven to be dead for this
2305   // particular call site in order to get more accurate cost estimates. This
2306   // requires a somewhat heavyweight iteration pattern: we need to walk the
2307   // basic blocks in a breadth-first order as we insert live successors. To
2308   // accomplish this, prioritizing for small iterations because we exit after
2309   // crossing our threshold, we use a small-size optimized SetVector.
2310   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2311                     SmallPtrSet<BasicBlock *, 16>>
2312       BBSetVector;
2313   BBSetVector BBWorklist;
2314   BBWorklist.insert(&F.getEntryBlock());
2315 
2316   // Note that we *must not* cache the size, this loop grows the worklist.
2317   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2318     if (shouldStop())
2319       break;
2320 
2321     BasicBlock *BB = BBWorklist[Idx];
2322     if (BB->empty())
2323       continue;
2324 
2325     onBlockStart(BB);
2326 
2327     // Disallow inlining a blockaddress with uses other than strictly callbr.
2328     // A blockaddress only has defined behavior for an indirect branch in the
2329     // same function, and we do not currently support inlining indirect
2330     // branches.  But, the inliner may not see an indirect branch that ends up
2331     // being dead code at a particular call site. If the blockaddress escapes
2332     // the function, e.g., via a global variable, inlining may lead to an
2333     // invalid cross-function reference.
2334     // FIXME: pr/39560: continue relaxing this overt restriction.
2335     if (BB->hasAddressTaken())
2336       for (User *U : BlockAddress::get(&*BB)->users())
2337         if (!isa<CallBrInst>(*U))
2338           return InlineResult::failure("blockaddress used outside of callbr");
2339 
2340     // Analyze the cost of this block. If we blow through the threshold, this
2341     // returns false, and we can bail on out.
2342     InlineResult IR = analyzeBlock(BB, EphValues);
2343     if (!IR.isSuccess())
2344       return IR;
2345 
2346     Instruction *TI = BB->getTerminator();
2347 
2348     // Add in the live successors by first checking whether we have terminator
2349     // that may be simplified based on the values simplified by this call.
2350     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2351       if (BI->isConditional()) {
2352         Value *Cond = BI->getCondition();
2353         if (ConstantInt *SimpleCond =
2354                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2355           BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2356           BBWorklist.insert(NextBB);
2357           KnownSuccessors[BB] = NextBB;
2358           findDeadBlocks(BB, NextBB);
2359           continue;
2360         }
2361       }
2362     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2363       Value *Cond = SI->getCondition();
2364       if (ConstantInt *SimpleCond =
2365               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2366         BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2367         BBWorklist.insert(NextBB);
2368         KnownSuccessors[BB] = NextBB;
2369         findDeadBlocks(BB, NextBB);
2370         continue;
2371       }
2372     }
2373 
2374     // If we're unable to select a particular successor, just count all of
2375     // them.
2376     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2377          ++TIdx)
2378       BBWorklist.insert(TI->getSuccessor(TIdx));
2379 
2380     onBlockAnalyzed(BB);
2381   }
2382 
2383   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2384                                     &F == CandidateCall.getCalledFunction();
2385   // If this is a noduplicate call, we can still inline as long as
2386   // inlining this would cause the removal of the caller (so the instruction
2387   // is not actually duplicated, just moved).
2388   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2389     return InlineResult::failure("noduplicate");
2390 
2391   return finalizeAnalysis();
2392 }
2393 
2394 void InlineCostCallAnalyzer::print() {
2395 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
2396   if (PrintInstructionComments)
2397     F.print(dbgs(), &Writer);
2398   DEBUG_PRINT_STAT(NumConstantArgs);
2399   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2400   DEBUG_PRINT_STAT(NumAllocaArgs);
2401   DEBUG_PRINT_STAT(NumConstantPtrCmps);
2402   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2403   DEBUG_PRINT_STAT(NumInstructionsSimplified);
2404   DEBUG_PRINT_STAT(NumInstructions);
2405   DEBUG_PRINT_STAT(SROACostSavings);
2406   DEBUG_PRINT_STAT(SROACostSavingsLost);
2407   DEBUG_PRINT_STAT(LoadEliminationCost);
2408   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2409   DEBUG_PRINT_STAT(Cost);
2410   DEBUG_PRINT_STAT(Threshold);
2411 #undef DEBUG_PRINT_STAT
2412 }
2413 
2414 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2415 /// Dump stats about this call's analysis.
2416 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2417   print();
2418 }
2419 #endif
2420 
2421 /// Test that there are no attribute conflicts between Caller and Callee
2422 ///        that prevent inlining.
2423 static bool functionsHaveCompatibleAttributes(
2424     Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2425     function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2426   // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2427   // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2428   // object, and always returns the same object (which is overwritten on each
2429   // GetTLI call). Therefore we copy the first result.
2430   auto CalleeTLI = GetTLI(*Callee);
2431   return TTI.areInlineCompatible(Caller, Callee) &&
2432          GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2433                                              InlineCallerSupersetNoBuiltin) &&
2434          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2435 }
2436 
2437 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2438   int Cost = 0;
2439   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2440     if (Call.isByValArgument(I)) {
2441       // We approximate the number of loads and stores needed by dividing the
2442       // size of the byval type by the target's pointer size.
2443       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2444       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2445       unsigned AS = PTy->getAddressSpace();
2446       unsigned PointerSize = DL.getPointerSizeInBits(AS);
2447       // Ceiling division.
2448       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2449 
2450       // If it generates more than 8 stores it is likely to be expanded as an
2451       // inline memcpy so we take that as an upper bound. Otherwise we assume
2452       // one load and one store per word copied.
2453       // FIXME: The maxStoresPerMemcpy setting from the target should be used
2454       // here instead of a magic number of 8, but it's not available via
2455       // DataLayout.
2456       NumStores = std::min(NumStores, 8U);
2457 
2458       Cost += 2 * NumStores * InlineConstants::InstrCost;
2459     } else {
2460       // For non-byval arguments subtract off one instruction per call
2461       // argument.
2462       Cost += InlineConstants::InstrCost;
2463     }
2464   }
2465   // The call instruction also disappears after inlining.
2466   Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2467   return Cost;
2468 }
2469 
2470 InlineCost llvm::getInlineCost(
2471     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2472     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2473     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2474     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2475     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2476   return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2477                        GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2478 }
2479 
2480 Optional<int> llvm::getInliningCostEstimate(
2481     CallBase &Call, TargetTransformInfo &CalleeTTI,
2482     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2483     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2484     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2485   const InlineParams Params = {/* DefaultThreshold*/ 0,
2486                                /*HintThreshold*/ {},
2487                                /*ColdThreshold*/ {},
2488                                /*OptSizeThreshold*/ {},
2489                                /*OptMinSizeThreshold*/ {},
2490                                /*HotCallSiteThreshold*/ {},
2491                                /*LocallyHotCallSiteThreshold*/ {},
2492                                /*ColdCallSiteThreshold*/ {},
2493                                /*ComputeFullInlineCost*/ true,
2494                                /*EnableDeferral*/ true};
2495 
2496   InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2497                             GetAssumptionCache, GetBFI, PSI, ORE, true,
2498                             /*IgnoreThreshold*/ true);
2499   auto R = CA.analyze();
2500   if (!R.isSuccess())
2501     return None;
2502   return CA.getCost();
2503 }
2504 
2505 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2506     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2507     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2508 
2509   // Cannot inline indirect calls.
2510   if (!Callee)
2511     return InlineResult::failure("indirect call");
2512 
2513   // When callee coroutine function is inlined into caller coroutine function
2514   // before coro-split pass,
2515   // coro-early pass can not handle this quiet well.
2516   // So we won't inline the coroutine function if it have not been unsplited
2517   if (Callee->isPresplitCoroutine())
2518     return InlineResult::failure("unsplited coroutine call");
2519 
2520   // Never inline calls with byval arguments that does not have the alloca
2521   // address space. Since byval arguments can be replaced with a copy to an
2522   // alloca, the inlined code would need to be adjusted to handle that the
2523   // argument is in the alloca address space (so it is a little bit complicated
2524   // to solve).
2525   unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2526   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2527     if (Call.isByValArgument(I)) {
2528       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2529       if (PTy->getAddressSpace() != AllocaAS)
2530         return InlineResult::failure("byval arguments without alloca"
2531                                      " address space");
2532     }
2533 
2534   // Calls to functions with always-inline attributes should be inlined
2535   // whenever possible.
2536   if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2537     auto IsViable = isInlineViable(*Callee);
2538     if (IsViable.isSuccess())
2539       return InlineResult::success();
2540     return InlineResult::failure(IsViable.getFailureReason());
2541   }
2542 
2543   // Never inline functions with conflicting attributes (unless callee has
2544   // always-inline attribute).
2545   Function *Caller = Call.getCaller();
2546   if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2547     return InlineResult::failure("conflicting attributes");
2548 
2549   // Don't inline this call if the caller has the optnone attribute.
2550   if (Caller->hasOptNone())
2551     return InlineResult::failure("optnone attribute");
2552 
2553   // Don't inline a function that treats null pointer as valid into a caller
2554   // that does not have this attribute.
2555   if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2556     return InlineResult::failure("nullptr definitions incompatible");
2557 
2558   // Don't inline functions which can be interposed at link-time.
2559   if (Callee->isInterposable())
2560     return InlineResult::failure("interposable");
2561 
2562   // Don't inline functions marked noinline.
2563   if (Callee->hasFnAttribute(Attribute::NoInline))
2564     return InlineResult::failure("noinline function attribute");
2565 
2566   // Don't inline call sites marked noinline.
2567   if (Call.isNoInline())
2568     return InlineResult::failure("noinline call site attribute");
2569 
2570   // Don't inline functions if one does not have any stack protector attribute
2571   // but the other does.
2572   if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr())
2573     return InlineResult::failure(
2574         "stack protected caller but callee requested no stack protector");
2575   if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr())
2576     return InlineResult::failure(
2577         "stack protected callee but caller requested no stack protector");
2578 
2579   return None;
2580 }
2581 
2582 InlineCost llvm::getInlineCost(
2583     CallBase &Call, Function *Callee, const InlineParams &Params,
2584     TargetTransformInfo &CalleeTTI,
2585     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2586     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2587     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2588     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2589 
2590   auto UserDecision =
2591       llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2592 
2593   if (UserDecision.hasValue()) {
2594     if (UserDecision->isSuccess())
2595       return llvm::InlineCost::getAlways("always inline attribute");
2596     return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2597   }
2598 
2599   LLVM_DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
2600                           << "... (caller:" << Call.getCaller()->getName()
2601                           << ")\n");
2602 
2603   InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2604                             GetAssumptionCache, GetBFI, PSI, ORE);
2605   InlineResult ShouldInline = CA.analyze();
2606 
2607   LLVM_DEBUG(CA.dump());
2608 
2609   // Check if there was a reason to force inlining or no inlining.
2610   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2611     return InlineCost::getNever(ShouldInline.getFailureReason());
2612   if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2613     return InlineCost::getAlways("empty function");
2614 
2615   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2616 }
2617 
2618 InlineResult llvm::isInlineViable(Function &F) {
2619   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2620   for (BasicBlock &BB : F) {
2621     // Disallow inlining of functions which contain indirect branches.
2622     if (isa<IndirectBrInst>(BB.getTerminator()))
2623       return InlineResult::failure("contains indirect branches");
2624 
2625     // Disallow inlining of blockaddresses which are used by non-callbr
2626     // instructions.
2627     if (BB.hasAddressTaken())
2628       for (User *U : BlockAddress::get(&BB)->users())
2629         if (!isa<CallBrInst>(*U))
2630           return InlineResult::failure("blockaddress used outside of callbr");
2631 
2632     for (auto &II : BB) {
2633       CallBase *Call = dyn_cast<CallBase>(&II);
2634       if (!Call)
2635         continue;
2636 
2637       // Disallow recursive calls.
2638       Function *Callee = Call->getCalledFunction();
2639       if (&F == Callee)
2640         return InlineResult::failure("recursive call");
2641 
2642       // Disallow calls which expose returns-twice to a function not previously
2643       // attributed as such.
2644       if (!ReturnsTwice && isa<CallInst>(Call) &&
2645           cast<CallInst>(Call)->canReturnTwice())
2646         return InlineResult::failure("exposes returns-twice attribute");
2647 
2648       if (Callee)
2649         switch (Callee->getIntrinsicID()) {
2650         default:
2651           break;
2652         case llvm::Intrinsic::icall_branch_funnel:
2653           // Disallow inlining of @llvm.icall.branch.funnel because current
2654           // backend can't separate call targets from call arguments.
2655           return InlineResult::failure(
2656               "disallowed inlining of @llvm.icall.branch.funnel");
2657         case llvm::Intrinsic::localescape:
2658           // Disallow inlining functions that call @llvm.localescape. Doing this
2659           // correctly would require major changes to the inliner.
2660           return InlineResult::failure(
2661               "disallowed inlining of @llvm.localescape");
2662         case llvm::Intrinsic::vastart:
2663           // Disallow inlining of functions that initialize VarArgs with
2664           // va_start.
2665           return InlineResult::failure(
2666               "contains VarArgs initialized with va_start");
2667         }
2668     }
2669   }
2670 
2671   return InlineResult::success();
2672 }
2673 
2674 // APIs to create InlineParams based on command line flags and/or other
2675 // parameters.
2676 
2677 InlineParams llvm::getInlineParams(int Threshold) {
2678   InlineParams Params;
2679 
2680   // This field is the threshold to use for a callee by default. This is
2681   // derived from one or more of:
2682   //  * optimization or size-optimization levels,
2683   //  * a value passed to createFunctionInliningPass function, or
2684   //  * the -inline-threshold flag.
2685   //  If the -inline-threshold flag is explicitly specified, that is used
2686   //  irrespective of anything else.
2687   if (InlineThreshold.getNumOccurrences() > 0)
2688     Params.DefaultThreshold = InlineThreshold;
2689   else
2690     Params.DefaultThreshold = Threshold;
2691 
2692   // Set the HintThreshold knob from the -inlinehint-threshold.
2693   Params.HintThreshold = HintThreshold;
2694 
2695   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2696   Params.HotCallSiteThreshold = HotCallSiteThreshold;
2697 
2698   // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2699   // populate LocallyHotCallSiteThreshold. Later, we populate
2700   // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2701   // we know that optimization level is O3 (in the getInlineParams variant that
2702   // takes the opt and size levels).
2703   // FIXME: Remove this check (and make the assignment unconditional) after
2704   // addressing size regression issues at O2.
2705   if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2706     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2707 
2708   // Set the ColdCallSiteThreshold knob from the
2709   // -inline-cold-callsite-threshold.
2710   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2711 
2712   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2713   // -inlinehint-threshold commandline option is not explicitly given. If that
2714   // option is present, then its value applies even for callees with size and
2715   // minsize attributes.
2716   // If the -inline-threshold is not specified, set the ColdThreshold from the
2717   // -inlinecold-threshold even if it is not explicitly passed. If
2718   // -inline-threshold is specified, then -inlinecold-threshold needs to be
2719   // explicitly specified to set the ColdThreshold knob
2720   if (InlineThreshold.getNumOccurrences() == 0) {
2721     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2722     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2723     Params.ColdThreshold = ColdThreshold;
2724   } else if (ColdThreshold.getNumOccurrences() > 0) {
2725     Params.ColdThreshold = ColdThreshold;
2726   }
2727   return Params;
2728 }
2729 
2730 InlineParams llvm::getInlineParams() {
2731   return getInlineParams(DefaultThreshold);
2732 }
2733 
2734 // Compute the default threshold for inlining based on the opt level and the
2735 // size opt level.
2736 static int computeThresholdFromOptLevels(unsigned OptLevel,
2737                                          unsigned SizeOptLevel) {
2738   if (OptLevel > 2)
2739     return InlineConstants::OptAggressiveThreshold;
2740   if (SizeOptLevel == 1) // -Os
2741     return InlineConstants::OptSizeThreshold;
2742   if (SizeOptLevel == 2) // -Oz
2743     return InlineConstants::OptMinSizeThreshold;
2744   return DefaultThreshold;
2745 }
2746 
2747 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2748   auto Params =
2749       getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2750   // At O3, use the value of -locally-hot-callsite-threshold option to populate
2751   // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2752   // when it is specified explicitly.
2753   if (OptLevel > 2)
2754     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2755   return Params;
2756 }
2757 
2758 PreservedAnalyses
2759 InlineCostAnnotationPrinterPass::run(Function &F,
2760                                      FunctionAnalysisManager &FAM) {
2761   PrintInstructionComments = true;
2762   std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&](
2763       Function &F) -> AssumptionCache & {
2764     return FAM.getResult<AssumptionAnalysis>(F);
2765   };
2766   Module *M = F.getParent();
2767   ProfileSummaryInfo PSI(*M);
2768   DataLayout DL(M);
2769   TargetTransformInfo TTI(DL);
2770   // FIXME: Redesign the usage of InlineParams to expand the scope of this pass.
2771   // In the current implementation, the type of InlineParams doesn't matter as
2772   // the pass serves only for verification of inliner's decisions.
2773   // We can add a flag which determines InlineParams for this run. Right now,
2774   // the default InlineParams are used.
2775   const InlineParams Params = llvm::getInlineParams();
2776   for (BasicBlock &BB : F) {
2777     for (Instruction &I : BB) {
2778       if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2779         Function *CalledFunction = CI->getCalledFunction();
2780         if (!CalledFunction || CalledFunction->isDeclaration())
2781           continue;
2782         OptimizationRemarkEmitter ORE(CalledFunction);
2783         InlineCostCallAnalyzer ICCA(*CalledFunction, *CI, Params, TTI,
2784                                     GetAssumptionCache, nullptr, &PSI, &ORE);
2785         ICCA.analyze();
2786         OS << "      Analyzing call of " << CalledFunction->getName()
2787            << "... (caller:" << CI->getCaller()->getName() << ")\n";
2788         ICCA.print();
2789       }
2790     }
2791   }
2792   return PreservedAnalyses::all();
2793 }
2794