1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements inline cost analysis.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/InlineCost.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BlockFrequencyInfo.h"
21 #include "llvm/Analysis/CFG.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ProfileSummaryInfo.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/TargetTransformInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstVisitor.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/FormattedStream.h"
44 #include "llvm/Support/raw_ostream.h"
45 
46 using namespace llvm;
47 
48 #define DEBUG_TYPE "inline-cost"
49 
50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
51 
52 static cl::opt<int>
53     DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
54                      cl::ZeroOrMore,
55                      cl::desc("Default amount of inlining to perform"));
56 
57 static cl::opt<bool> PrintInstructionComments(
58     "print-instruction-comments", cl::Hidden, cl::init(false),
59     cl::desc("Prints comments for instruction based on inline cost analysis"));
60 
61 static cl::opt<int> InlineThreshold(
62     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
63     cl::desc("Control the amount of inlining to perform (default = 225)"));
64 
65 static cl::opt<int> HintThreshold(
66     "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
67     cl::desc("Threshold for inlining functions with inline hint"));
68 
69 static cl::opt<int>
70     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
71                           cl::init(45), cl::ZeroOrMore,
72                           cl::desc("Threshold for inlining cold callsites"));
73 
74 static cl::opt<bool> InlineEnableCostBenefitAnalysis(
75     "inline-enable-cost-benefit-analysis", cl::Hidden, cl::init(false),
76     cl::desc("Enable the cost-benefit analysis for the inliner"));
77 
78 static cl::opt<int> InlineSavingsMultiplier(
79     "inline-savings-multiplier", cl::Hidden, cl::init(8), cl::ZeroOrMore,
80     cl::desc("Multiplier to multiply cycle savings by during inlining"));
81 
82 static cl::opt<int>
83     InlineSizeAllowance("inline-size-allowance", cl::Hidden, cl::init(100),
84                         cl::ZeroOrMore,
85                         cl::desc("The maximum size of a callee that get's "
86                                  "inlined without sufficient cycle savings"));
87 
88 // We introduce this threshold to help performance of instrumentation based
89 // PGO before we actually hook up inliner with analysis passes such as BPI and
90 // BFI.
91 static cl::opt<int> ColdThreshold(
92     "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
93     cl::desc("Threshold for inlining functions with cold attribute"));
94 
95 static cl::opt<int>
96     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
97                          cl::ZeroOrMore,
98                          cl::desc("Threshold for hot callsites "));
99 
100 static cl::opt<int> LocallyHotCallSiteThreshold(
101     "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
102     cl::desc("Threshold for locally hot callsites "));
103 
104 static cl::opt<int> ColdCallSiteRelFreq(
105     "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
106     cl::desc("Maximum block frequency, expressed as a percentage of caller's "
107              "entry frequency, for a callsite to be cold in the absence of "
108              "profile information."));
109 
110 static cl::opt<int> HotCallSiteRelFreq(
111     "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
112     cl::desc("Minimum block frequency, expressed as a multiple of caller's "
113              "entry frequency, for a callsite to be hot in the absence of "
114              "profile information."));
115 
116 static cl::opt<bool> OptComputeFullInlineCost(
117     "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
118     cl::desc("Compute the full inline cost of a call site even when the cost "
119              "exceeds the threshold."));
120 
121 static cl::opt<bool> InlineCallerSupersetNoBuiltin(
122     "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
123     cl::ZeroOrMore,
124     cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
125              "attributes."));
126 
127 static cl::opt<bool> DisableGEPConstOperand(
128     "disable-gep-const-evaluation", cl::Hidden, cl::init(false),
129     cl::desc("Disables evaluation of GetElementPtr with constant operands"));
130 
131 namespace {
132 class InlineCostCallAnalyzer;
133 
134 // This struct is used to store information about inline cost of a
135 // particular instruction
136 struct InstructionCostDetail {
137   int CostBefore = 0;
138   int CostAfter = 0;
139   int ThresholdBefore = 0;
140   int ThresholdAfter = 0;
141 
142   int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
143 
144   int getCostDelta() const { return CostAfter - CostBefore; }
145 
146   bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
147 };
148 
149 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
150 private:
151   InlineCostCallAnalyzer *const ICCA;
152 
153 public:
154   InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
155   virtual void emitInstructionAnnot(const Instruction *I,
156                                     formatted_raw_ostream &OS) override;
157 };
158 
159 /// Carry out call site analysis, in order to evaluate inlinability.
160 /// NOTE: the type is currently used as implementation detail of functions such
161 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the
162 /// expectation is that they come from the outer scope, from the wrapper
163 /// functions. If we want to support constructing CallAnalyzer objects where
164 /// lambdas are provided inline at construction, or where the object needs to
165 /// otherwise survive past the scope of the provided functions, we need to
166 /// revisit the argument types.
167 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
168   typedef InstVisitor<CallAnalyzer, bool> Base;
169   friend class InstVisitor<CallAnalyzer, bool>;
170 
171 protected:
172   virtual ~CallAnalyzer() {}
173   /// The TargetTransformInfo available for this compilation.
174   const TargetTransformInfo &TTI;
175 
176   /// Getter for the cache of @llvm.assume intrinsics.
177   function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
178 
179   /// Getter for BlockFrequencyInfo
180   function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
181 
182   /// Profile summary information.
183   ProfileSummaryInfo *PSI;
184 
185   /// The called function.
186   Function &F;
187 
188   // Cache the DataLayout since we use it a lot.
189   const DataLayout &DL;
190 
191   /// The OptimizationRemarkEmitter available for this compilation.
192   OptimizationRemarkEmitter *ORE;
193 
194   /// The candidate callsite being analyzed. Please do not use this to do
195   /// analysis in the caller function; we want the inline cost query to be
196   /// easily cacheable. Instead, use the cover function paramHasAttr.
197   CallBase &CandidateCall;
198 
199   /// Extension points for handling callsite features.
200   // Called before a basic block was analyzed.
201   virtual void onBlockStart(const BasicBlock *BB) {}
202 
203   /// Called after a basic block was analyzed.
204   virtual void onBlockAnalyzed(const BasicBlock *BB) {}
205 
206   /// Called before an instruction was analyzed
207   virtual void onInstructionAnalysisStart(const Instruction *I) {}
208 
209   /// Called after an instruction was analyzed
210   virtual void onInstructionAnalysisFinish(const Instruction *I) {}
211 
212   /// Called at the end of the analysis of the callsite. Return the outcome of
213   /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
214   /// the reason it can't.
215   virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
216   /// Called when we're about to start processing a basic block, and every time
217   /// we are done processing an instruction. Return true if there is no point in
218   /// continuing the analysis (e.g. we've determined already the call site is
219   /// too expensive to inline)
220   virtual bool shouldStop() { return false; }
221 
222   /// Called before the analysis of the callee body starts (with callsite
223   /// contexts propagated).  It checks callsite-specific information. Return a
224   /// reason analysis can't continue if that's the case, or 'true' if it may
225   /// continue.
226   virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
227   /// Called if the analysis engine decides SROA cannot be done for the given
228   /// alloca.
229   virtual void onDisableSROA(AllocaInst *Arg) {}
230 
231   /// Called the analysis engine determines load elimination won't happen.
232   virtual void onDisableLoadElimination() {}
233 
234   /// Called to account for a call.
235   virtual void onCallPenalty() {}
236 
237   /// Called to account for the expectation the inlining would result in a load
238   /// elimination.
239   virtual void onLoadEliminationOpportunity() {}
240 
241   /// Called to account for the cost of argument setup for the Call in the
242   /// callee's body (not the callsite currently under analysis).
243   virtual void onCallArgumentSetup(const CallBase &Call) {}
244 
245   /// Called to account for a load relative intrinsic.
246   virtual void onLoadRelativeIntrinsic() {}
247 
248   /// Called to account for a lowered call.
249   virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
250   }
251 
252   /// Account for a jump table of given size. Return false to stop further
253   /// processing the switch instruction
254   virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
255 
256   /// Account for a case cluster of given size. Return false to stop further
257   /// processing of the instruction.
258   virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
259 
260   /// Called at the end of processing a switch instruction, with the given
261   /// number of case clusters.
262   virtual void onFinalizeSwitch(unsigned JumpTableSize,
263                                 unsigned NumCaseCluster) {}
264 
265   /// Called to account for any other instruction not specifically accounted
266   /// for.
267   virtual void onMissedSimplification() {}
268 
269   /// Start accounting potential benefits due to SROA for the given alloca.
270   virtual void onInitializeSROAArg(AllocaInst *Arg) {}
271 
272   /// Account SROA savings for the AllocaInst value.
273   virtual void onAggregateSROAUse(AllocaInst *V) {}
274 
275   bool handleSROA(Value *V, bool DoNotDisable) {
276     // Check for SROA candidates in comparisons.
277     if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
278       if (DoNotDisable) {
279         onAggregateSROAUse(SROAArg);
280         return true;
281       }
282       disableSROAForArg(SROAArg);
283     }
284     return false;
285   }
286 
287   bool IsCallerRecursive = false;
288   bool IsRecursiveCall = false;
289   bool ExposesReturnsTwice = false;
290   bool HasDynamicAlloca = false;
291   bool ContainsNoDuplicateCall = false;
292   bool HasReturn = false;
293   bool HasIndirectBr = false;
294   bool HasUninlineableIntrinsic = false;
295   bool InitsVargArgs = false;
296 
297   /// Number of bytes allocated statically by the callee.
298   uint64_t AllocatedSize = 0;
299   unsigned NumInstructions = 0;
300   unsigned NumVectorInstructions = 0;
301 
302   /// While we walk the potentially-inlined instructions, we build up and
303   /// maintain a mapping of simplified values specific to this callsite. The
304   /// idea is to propagate any special information we have about arguments to
305   /// this call through the inlinable section of the function, and account for
306   /// likely simplifications post-inlining. The most important aspect we track
307   /// is CFG altering simplifications -- when we prove a basic block dead, that
308   /// can cause dramatic shifts in the cost of inlining a function.
309   DenseMap<Value *, Constant *> SimplifiedValues;
310 
311   /// Keep track of the values which map back (through function arguments) to
312   /// allocas on the caller stack which could be simplified through SROA.
313   DenseMap<Value *, AllocaInst *> SROAArgValues;
314 
315   /// Keep track of Allocas for which we believe we may get SROA optimization.
316   DenseSet<AllocaInst *> EnabledSROAAllocas;
317 
318   /// Keep track of values which map to a pointer base and constant offset.
319   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
320 
321   /// Keep track of dead blocks due to the constant arguments.
322   SetVector<BasicBlock *> DeadBlocks;
323 
324   /// The mapping of the blocks to their known unique successors due to the
325   /// constant arguments.
326   DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
327 
328   /// Model the elimination of repeated loads that is expected to happen
329   /// whenever we simplify away the stores that would otherwise cause them to be
330   /// loads.
331   bool EnableLoadElimination;
332   SmallPtrSet<Value *, 16> LoadAddrSet;
333 
334   AllocaInst *getSROAArgForValueOrNull(Value *V) const {
335     auto It = SROAArgValues.find(V);
336     if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
337       return nullptr;
338     return It->second;
339   }
340 
341   // Custom simplification helper routines.
342   bool isAllocaDerivedArg(Value *V);
343   void disableSROAForArg(AllocaInst *SROAArg);
344   void disableSROA(Value *V);
345   void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
346   void disableLoadElimination();
347   bool isGEPFree(GetElementPtrInst &GEP);
348   bool canFoldInboundsGEP(GetElementPtrInst &I);
349   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
350   bool simplifyCallSite(Function *F, CallBase &Call);
351   template <typename Callable>
352   bool simplifyInstruction(Instruction &I, Callable Evaluate);
353   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
354 
355   /// Return true if the given argument to the function being considered for
356   /// inlining has the given attribute set either at the call site or the
357   /// function declaration.  Primarily used to inspect call site specific
358   /// attributes since these can be more precise than the ones on the callee
359   /// itself.
360   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
361 
362   /// Return true if the given value is known non null within the callee if
363   /// inlined through this particular callsite.
364   bool isKnownNonNullInCallee(Value *V);
365 
366   /// Return true if size growth is allowed when inlining the callee at \p Call.
367   bool allowSizeGrowth(CallBase &Call);
368 
369   // Custom analysis routines.
370   InlineResult analyzeBlock(BasicBlock *BB,
371                             SmallPtrSetImpl<const Value *> &EphValues);
372 
373   // Disable several entry points to the visitor so we don't accidentally use
374   // them by declaring but not defining them here.
375   void visit(Module *);
376   void visit(Module &);
377   void visit(Function *);
378   void visit(Function &);
379   void visit(BasicBlock *);
380   void visit(BasicBlock &);
381 
382   // Provide base case for our instruction visit.
383   bool visitInstruction(Instruction &I);
384 
385   // Our visit overrides.
386   bool visitAlloca(AllocaInst &I);
387   bool visitPHI(PHINode &I);
388   bool visitGetElementPtr(GetElementPtrInst &I);
389   bool visitBitCast(BitCastInst &I);
390   bool visitPtrToInt(PtrToIntInst &I);
391   bool visitIntToPtr(IntToPtrInst &I);
392   bool visitCastInst(CastInst &I);
393   bool visitUnaryInstruction(UnaryInstruction &I);
394   bool visitCmpInst(CmpInst &I);
395   bool visitSub(BinaryOperator &I);
396   bool visitBinaryOperator(BinaryOperator &I);
397   bool visitFNeg(UnaryOperator &I);
398   bool visitLoad(LoadInst &I);
399   bool visitStore(StoreInst &I);
400   bool visitExtractValue(ExtractValueInst &I);
401   bool visitInsertValue(InsertValueInst &I);
402   bool visitCallBase(CallBase &Call);
403   bool visitReturnInst(ReturnInst &RI);
404   bool visitBranchInst(BranchInst &BI);
405   bool visitSelectInst(SelectInst &SI);
406   bool visitSwitchInst(SwitchInst &SI);
407   bool visitIndirectBrInst(IndirectBrInst &IBI);
408   bool visitResumeInst(ResumeInst &RI);
409   bool visitCleanupReturnInst(CleanupReturnInst &RI);
410   bool visitCatchReturnInst(CatchReturnInst &RI);
411   bool visitUnreachableInst(UnreachableInst &I);
412 
413 public:
414   CallAnalyzer(
415       Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
416       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
417       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
418       ProfileSummaryInfo *PSI = nullptr,
419       OptimizationRemarkEmitter *ORE = nullptr)
420       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
421         PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
422         CandidateCall(Call), EnableLoadElimination(true) {}
423 
424   InlineResult analyze();
425 
426   Optional<Constant*> getSimplifiedValue(Instruction *I) {
427     if (SimplifiedValues.find(I) != SimplifiedValues.end())
428       return SimplifiedValues[I];
429     return None;
430   }
431 
432   // Keep a bunch of stats about the cost savings found so we can print them
433   // out when debugging.
434   unsigned NumConstantArgs = 0;
435   unsigned NumConstantOffsetPtrArgs = 0;
436   unsigned NumAllocaArgs = 0;
437   unsigned NumConstantPtrCmps = 0;
438   unsigned NumConstantPtrDiffs = 0;
439   unsigned NumInstructionsSimplified = 0;
440 
441   void dump();
442 };
443 
444 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
445 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
446 class InlineCostCallAnalyzer final : public CallAnalyzer {
447   const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
448   const bool ComputeFullInlineCost;
449   int LoadEliminationCost = 0;
450   /// Bonus to be applied when percentage of vector instructions in callee is
451   /// high (see more details in updateThreshold).
452   int VectorBonus = 0;
453   /// Bonus to be applied when the callee has only one reachable basic block.
454   int SingleBBBonus = 0;
455 
456   /// Tunable parameters that control the analysis.
457   const InlineParams &Params;
458 
459   // This DenseMap stores the delta change in cost and threshold after
460   // accounting for the given instruction. The map is filled only with the
461   // flag PrintInstructionComments on.
462   DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
463 
464   /// Upper bound for the inlining cost. Bonuses are being applied to account
465   /// for speculative "expected profit" of the inlining decision.
466   int Threshold = 0;
467 
468   /// Attempt to evaluate indirect calls to boost its inline cost.
469   const bool BoostIndirectCalls;
470 
471   /// Ignore the threshold when finalizing analysis.
472   const bool IgnoreThreshold;
473 
474   // True if the cost-benefit-analysis-based inliner is enabled.
475   const bool CostBenefitAnalysisEnabled;
476 
477   /// Inlining cost measured in abstract units, accounts for all the
478   /// instructions expected to be executed for a given function invocation.
479   /// Instructions that are statically proven to be dead based on call-site
480   /// arguments are not counted here.
481   int Cost = 0;
482 
483   // The cumulative cost at the beginning of the basic block being analyzed.  At
484   // the end of analyzing each basic block, "Cost - CostAtBBStart" represents
485   // the size of that basic block.
486   int CostAtBBStart = 0;
487 
488   // The static size of live but cold basic blocks.  This is "static" in the
489   // sense that it's not weighted by profile counts at all.
490   int ColdSize = 0;
491 
492   // Whether inlining is decided by cost-benefit analysis.
493   bool DecidedByCostBenefit = false;
494 
495   bool SingleBB = true;
496 
497   unsigned SROACostSavings = 0;
498   unsigned SROACostSavingsLost = 0;
499 
500   /// The mapping of caller Alloca values to their accumulated cost savings. If
501   /// we have to disable SROA for one of the allocas, this tells us how much
502   /// cost must be added.
503   DenseMap<AllocaInst *, int> SROAArgCosts;
504 
505   /// Return true if \p Call is a cold callsite.
506   bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
507 
508   /// Update Threshold based on callsite properties such as callee
509   /// attributes and callee hotness for PGO builds. The Callee is explicitly
510   /// passed to support analyzing indirect calls whose target is inferred by
511   /// analysis.
512   void updateThreshold(CallBase &Call, Function &Callee);
513   /// Return a higher threshold if \p Call is a hot callsite.
514   Optional<int> getHotCallSiteThreshold(CallBase &Call,
515                                         BlockFrequencyInfo *CallerBFI);
516 
517   /// Handle a capped 'int' increment for Cost.
518   void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
519     assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
520     Cost = (int)std::min(UpperBound, Cost + Inc);
521   }
522 
523   void onDisableSROA(AllocaInst *Arg) override {
524     auto CostIt = SROAArgCosts.find(Arg);
525     if (CostIt == SROAArgCosts.end())
526       return;
527     addCost(CostIt->second);
528     SROACostSavings -= CostIt->second;
529     SROACostSavingsLost += CostIt->second;
530     SROAArgCosts.erase(CostIt);
531   }
532 
533   void onDisableLoadElimination() override {
534     addCost(LoadEliminationCost);
535     LoadEliminationCost = 0;
536   }
537   void onCallPenalty() override { addCost(InlineConstants::CallPenalty); }
538   void onCallArgumentSetup(const CallBase &Call) override {
539     // Pay the price of the argument setup. We account for the average 1
540     // instruction per call argument setup here.
541     addCost(Call.arg_size() * InlineConstants::InstrCost);
542   }
543   void onLoadRelativeIntrinsic() override {
544     // This is normally lowered to 4 LLVM instructions.
545     addCost(3 * InlineConstants::InstrCost);
546   }
547   void onLoweredCall(Function *F, CallBase &Call,
548                      bool IsIndirectCall) override {
549     // We account for the average 1 instruction per call argument setup here.
550     addCost(Call.arg_size() * InlineConstants::InstrCost);
551 
552     // If we have a constant that we are calling as a function, we can peer
553     // through it and see the function target. This happens not infrequently
554     // during devirtualization and so we want to give it a hefty bonus for
555     // inlining, but cap that bonus in the event that inlining wouldn't pan out.
556     // Pretend to inline the function, with a custom threshold.
557     if (IsIndirectCall && BoostIndirectCalls) {
558       auto IndirectCallParams = Params;
559       IndirectCallParams.DefaultThreshold =
560           InlineConstants::IndirectCallThreshold;
561       /// FIXME: if InlineCostCallAnalyzer is derived from, this may need
562       /// to instantiate the derived class.
563       InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
564                                 GetAssumptionCache, GetBFI, PSI, ORE, false);
565       if (CA.analyze().isSuccess()) {
566         // We were able to inline the indirect call! Subtract the cost from the
567         // threshold to get the bonus we want to apply, but don't go below zero.
568         Cost -= std::max(0, CA.getThreshold() - CA.getCost());
569       }
570     } else
571       // Otherwise simply add the cost for merely making the call.
572       addCost(InlineConstants::CallPenalty);
573   }
574 
575   void onFinalizeSwitch(unsigned JumpTableSize,
576                         unsigned NumCaseCluster) override {
577     // If suitable for a jump table, consider the cost for the table size and
578     // branch to destination.
579     // Maximum valid cost increased in this function.
580     if (JumpTableSize) {
581       int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
582                        4 * InlineConstants::InstrCost;
583 
584       addCost(JTCost, (int64_t)CostUpperBound);
585       return;
586     }
587     // Considering forming a binary search, we should find the number of nodes
588     // which is same as the number of comparisons when lowered. For a given
589     // number of clusters, n, we can define a recursive function, f(n), to find
590     // the number of nodes in the tree. The recursion is :
591     // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
592     // and f(n) = n, when n <= 3.
593     // This will lead a binary tree where the leaf should be either f(2) or f(3)
594     // when n > 3.  So, the number of comparisons from leaves should be n, while
595     // the number of non-leaf should be :
596     //   2^(log2(n) - 1) - 1
597     //   = 2^log2(n) * 2^-1 - 1
598     //   = n / 2 - 1.
599     // Considering comparisons from leaf and non-leaf nodes, we can estimate the
600     // number of comparisons in a simple closed form :
601     //   n + n / 2 - 1 = n * 3 / 2 - 1
602     if (NumCaseCluster <= 3) {
603       // Suppose a comparison includes one compare and one conditional branch.
604       addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
605       return;
606     }
607 
608     int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1;
609     int64_t SwitchCost =
610         ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
611 
612     addCost(SwitchCost, (int64_t)CostUpperBound);
613   }
614   void onMissedSimplification() override {
615     addCost(InlineConstants::InstrCost);
616   }
617 
618   void onInitializeSROAArg(AllocaInst *Arg) override {
619     assert(Arg != nullptr &&
620            "Should not initialize SROA costs for null value.");
621     SROAArgCosts[Arg] = 0;
622   }
623 
624   void onAggregateSROAUse(AllocaInst *SROAArg) override {
625     auto CostIt = SROAArgCosts.find(SROAArg);
626     assert(CostIt != SROAArgCosts.end() &&
627            "expected this argument to have a cost");
628     CostIt->second += InlineConstants::InstrCost;
629     SROACostSavings += InlineConstants::InstrCost;
630   }
631 
632   void onBlockStart(const BasicBlock *BB) override { CostAtBBStart = Cost; }
633 
634   void onBlockAnalyzed(const BasicBlock *BB) override {
635     if (CostBenefitAnalysisEnabled) {
636       // Keep track of the static size of live but cold basic blocks.  For now,
637       // we define a cold basic block to be one that's never executed.
638       assert(GetBFI && "GetBFI must be available");
639       BlockFrequencyInfo *BFI = &(GetBFI(F));
640       assert(BFI && "BFI must be available");
641       auto ProfileCount = BFI->getBlockProfileCount(BB);
642       assert(ProfileCount.hasValue());
643       if (ProfileCount.getValue() == 0)
644         ColdSize += Cost - CostAtBBStart;
645     }
646 
647     auto *TI = BB->getTerminator();
648     // If we had any successors at this point, than post-inlining is likely to
649     // have them as well. Note that we assume any basic blocks which existed
650     // due to branches or switches which folded above will also fold after
651     // inlining.
652     if (SingleBB && TI->getNumSuccessors() > 1) {
653       // Take off the bonus we applied to the threshold.
654       Threshold -= SingleBBBonus;
655       SingleBB = false;
656     }
657   }
658 
659   void onInstructionAnalysisStart(const Instruction *I) override {
660     // This function is called to store the initial cost of inlining before
661     // the given instruction was assessed.
662     if (!PrintInstructionComments)
663       return;
664     InstructionCostDetailMap[I].CostBefore = Cost;
665     InstructionCostDetailMap[I].ThresholdBefore = Threshold;
666   }
667 
668   void onInstructionAnalysisFinish(const Instruction *I) override {
669     // This function is called to find new values of cost and threshold after
670     // the instruction has been assessed.
671     if (!PrintInstructionComments)
672       return;
673     InstructionCostDetailMap[I].CostAfter = Cost;
674     InstructionCostDetailMap[I].ThresholdAfter = Threshold;
675   }
676 
677   bool isCostBenefitAnalysisEnabled() {
678     if (!PSI || !PSI->hasProfileSummary())
679       return false;
680 
681     if (!GetBFI)
682       return false;
683 
684     if (InlineEnableCostBenefitAnalysis.getNumOccurrences()) {
685       // Honor the explicit request from the user.
686       if (!InlineEnableCostBenefitAnalysis)
687         return false;
688     } else {
689       // Otherwise, require instrumentation profile.
690       if (!PSI->hasInstrumentationProfile())
691         return false;
692     }
693 
694     auto *Caller = CandidateCall.getParent()->getParent();
695     if (!Caller->getEntryCount())
696       return false;
697 
698     BlockFrequencyInfo *CallerBFI = &(GetBFI(*Caller));
699     if (!CallerBFI)
700       return false;
701 
702     // For now, limit to hot call site.
703     if (!PSI->isHotCallSite(CandidateCall, CallerBFI))
704       return false;
705 
706     // Make sure we have a nonzero entry count.
707     auto EntryCount = F.getEntryCount();
708     if (!EntryCount || !EntryCount.getCount())
709       return false;
710 
711     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
712     if (!CalleeBFI)
713       return false;
714 
715     return true;
716   }
717 
718   // Determine whether we should inline the given call site, taking into account
719   // both the size cost and the cycle savings.  Return None if we don't have
720   // suficient profiling information to determine.
721   Optional<bool> costBenefitAnalysis() {
722     if (!CostBenefitAnalysisEnabled)
723       return None;
724 
725     // buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0
726     // for the prelink phase of the AutoFDO + ThinLTO build.  Honor the logic by
727     // falling back to the cost-based metric.
728     // TODO: Improve this hacky condition.
729     if (Threshold == 0)
730       return None;
731 
732     assert(GetBFI);
733     BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
734     assert(CalleeBFI);
735 
736     // The cycle savings expressed as the sum of InlineConstants::InstrCost
737     // multiplied by the estimated dynamic count of each instruction we can
738     // avoid.  Savings come from the call site cost, such as argument setup and
739     // the call instruction, as well as the instructions that are folded.
740     //
741     // We use 128-bit APInt here to avoid potential overflow.  This variable
742     // should stay well below 10^^24 (or 2^^80) in practice.  This "worst" case
743     // assumes that we can avoid or fold a billion instructions, each with a
744     // profile count of 10^^15 -- roughly the number of cycles for a 24-hour
745     // period on a 4GHz machine.
746     APInt CycleSavings(128, 0);
747 
748     for (auto &BB : F) {
749       APInt CurrentSavings(128, 0);
750       for (auto &I : BB) {
751         if (BranchInst *BI = dyn_cast<BranchInst>(&I)) {
752           // Count a conditional branch as savings if it becomes unconditional.
753           if (BI->isConditional() &&
754               dyn_cast_or_null<ConstantInt>(
755                   SimplifiedValues.lookup(BI->getCondition()))) {
756             CurrentSavings += InlineConstants::InstrCost;
757           }
758         } else if (Value *V = dyn_cast<Value>(&I)) {
759           // Count an instruction as savings if we can fold it.
760           if (SimplifiedValues.count(V)) {
761             CurrentSavings += InlineConstants::InstrCost;
762           }
763         }
764         // TODO: Consider other forms of savings like switch statements,
765         // indirect calls becoming direct, SROACostSavings, LoadEliminationCost,
766         // etc.
767       }
768 
769       auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
770       assert(ProfileCount.hasValue());
771       CurrentSavings *= ProfileCount.getValue();
772       CycleSavings += CurrentSavings;
773     }
774 
775     // Compute the cycle savings per call.
776     auto EntryProfileCount = F.getEntryCount();
777     assert(EntryProfileCount.hasValue() && EntryProfileCount.getCount());
778     auto EntryCount = EntryProfileCount.getCount();
779     CycleSavings += EntryCount / 2;
780     CycleSavings = CycleSavings.udiv(EntryCount);
781 
782     // Compute the total savings for the call site.
783     auto *CallerBB = CandidateCall.getParent();
784     BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent())));
785     CycleSavings += getCallsiteCost(this->CandidateCall, DL);
786     CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue();
787 
788     // Remove the cost of the cold basic blocks.
789     int Size = Cost - ColdSize;
790 
791     // Allow tiny callees to be inlined regardless of whether they meet the
792     // savings threshold.
793     Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1;
794 
795     // Return true if the savings justify the cost of inlining.  Specifically,
796     // we evaluate the following inequality:
797     //
798     //  CycleSavings      PSI->getOrCompHotCountThreshold()
799     // -------------- >= -----------------------------------
800     //       Size              InlineSavingsMultiplier
801     //
802     // Note that the left hand side is specific to a call site.  The right hand
803     // side is a constant for the entire executable.
804     APInt LHS = CycleSavings;
805     LHS *= InlineSavingsMultiplier;
806     APInt RHS(128, PSI->getOrCompHotCountThreshold());
807     RHS *= Size;
808     return LHS.uge(RHS);
809   }
810 
811   InlineResult finalizeAnalysis() override {
812     // Loops generally act a lot like calls in that they act like barriers to
813     // movement, require a certain amount of setup, etc. So when optimising for
814     // size, we penalise any call sites that perform loops. We do this after all
815     // other costs here, so will likely only be dealing with relatively small
816     // functions (and hence DT and LI will hopefully be cheap).
817     auto *Caller = CandidateCall.getFunction();
818     if (Caller->hasMinSize()) {
819       DominatorTree DT(F);
820       LoopInfo LI(DT);
821       int NumLoops = 0;
822       for (Loop *L : LI) {
823         // Ignore loops that will not be executed
824         if (DeadBlocks.count(L->getHeader()))
825           continue;
826         NumLoops++;
827       }
828       addCost(NumLoops * InlineConstants::CallPenalty);
829     }
830 
831     // We applied the maximum possible vector bonus at the beginning. Now,
832     // subtract the excess bonus, if any, from the Threshold before
833     // comparing against Cost.
834     if (NumVectorInstructions <= NumInstructions / 10)
835       Threshold -= VectorBonus;
836     else if (NumVectorInstructions <= NumInstructions / 2)
837       Threshold -= VectorBonus / 2;
838 
839     if (auto Result = costBenefitAnalysis()) {
840       DecidedByCostBenefit = true;
841       if (Result.getValue())
842         return InlineResult::success();
843       else
844         return InlineResult::failure("Cost over threshold.");
845     }
846 
847     if (IgnoreThreshold || Cost < std::max(1, Threshold))
848       return InlineResult::success();
849     return InlineResult::failure("Cost over threshold.");
850   }
851   bool shouldStop() override {
852     // Bail out the moment we cross the threshold. This means we'll under-count
853     // the cost, but only when undercounting doesn't matter.
854     return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost;
855   }
856 
857   void onLoadEliminationOpportunity() override {
858     LoadEliminationCost += InlineConstants::InstrCost;
859   }
860 
861   InlineResult onAnalysisStart() override {
862     // Perform some tweaks to the cost and threshold based on the direct
863     // callsite information.
864 
865     // We want to more aggressively inline vector-dense kernels, so up the
866     // threshold, and we'll lower it if the % of vector instructions gets too
867     // low. Note that these bonuses are some what arbitrary and evolved over
868     // time by accident as much as because they are principled bonuses.
869     //
870     // FIXME: It would be nice to remove all such bonuses. At least it would be
871     // nice to base the bonus values on something more scientific.
872     assert(NumInstructions == 0);
873     assert(NumVectorInstructions == 0);
874 
875     // Update the threshold based on callsite properties
876     updateThreshold(CandidateCall, F);
877 
878     // While Threshold depends on commandline options that can take negative
879     // values, we want to enforce the invariant that the computed threshold and
880     // bonuses are non-negative.
881     assert(Threshold >= 0);
882     assert(SingleBBBonus >= 0);
883     assert(VectorBonus >= 0);
884 
885     // Speculatively apply all possible bonuses to Threshold. If cost exceeds
886     // this Threshold any time, and cost cannot decrease, we can stop processing
887     // the rest of the function body.
888     Threshold += (SingleBBBonus + VectorBonus);
889 
890     // Give out bonuses for the callsite, as the instructions setting them up
891     // will be gone after inlining.
892     addCost(-getCallsiteCost(this->CandidateCall, DL));
893 
894     // If this function uses the coldcc calling convention, prefer not to inline
895     // it.
896     if (F.getCallingConv() == CallingConv::Cold)
897       Cost += InlineConstants::ColdccPenalty;
898 
899     // Check if we're done. This can happen due to bonuses and penalties.
900     if (Cost >= Threshold && !ComputeFullInlineCost)
901       return InlineResult::failure("high cost");
902 
903     return InlineResult::success();
904   }
905 
906 public:
907   InlineCostCallAnalyzer(
908       Function &Callee, CallBase &Call, const InlineParams &Params,
909       const TargetTransformInfo &TTI,
910       function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
911       function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
912       ProfileSummaryInfo *PSI = nullptr,
913       OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
914       bool IgnoreThreshold = false)
915       : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
916         ComputeFullInlineCost(OptComputeFullInlineCost ||
917                               Params.ComputeFullInlineCost || ORE ||
918                               isCostBenefitAnalysisEnabled()),
919         Params(Params), Threshold(Params.DefaultThreshold),
920         BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold),
921         CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()),
922         Writer(this) {}
923 
924   /// Annotation Writer for instruction details
925   InlineCostAnnotationWriter Writer;
926 
927   void dump();
928 
929   // Prints the same analysis as dump(), but its definition is not dependent
930   // on the build.
931   void print();
932 
933   Optional<InstructionCostDetail> getCostDetails(const Instruction *I) {
934     if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end())
935       return InstructionCostDetailMap[I];
936     return None;
937   }
938 
939   virtual ~InlineCostCallAnalyzer() {}
940   int getThreshold() { return Threshold; }
941   int getCost() { return Cost; }
942   bool wasDecidedByCostBenefit() { return DecidedByCostBenefit; }
943 };
944 } // namespace
945 
946 /// Test whether the given value is an Alloca-derived function argument.
947 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
948   return SROAArgValues.count(V);
949 }
950 
951 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) {
952   onDisableSROA(SROAArg);
953   EnabledSROAAllocas.erase(SROAArg);
954   disableLoadElimination();
955 }
956 
957 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I,
958                                                 formatted_raw_ostream &OS) {
959   // The cost of inlining of the given instruction is printed always.
960   // The threshold delta is printed only when it is non-zero. It happens
961   // when we decided to give a bonus at a particular instruction.
962   Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I);
963   if (!Record)
964     OS << "; No analysis for the instruction";
965   else {
966     OS << "; cost before = " << Record->CostBefore
967        << ", cost after = " << Record->CostAfter
968        << ", threshold before = " << Record->ThresholdBefore
969        << ", threshold after = " << Record->ThresholdAfter << ", ";
970     OS << "cost delta = " << Record->getCostDelta();
971     if (Record->hasThresholdChanged())
972       OS << ", threshold delta = " << Record->getThresholdDelta();
973   }
974   auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I));
975   if (C) {
976     OS << ", simplified to ";
977     C.getValue()->print(OS, true);
978   }
979   OS << "\n";
980 }
981 
982 /// If 'V' maps to a SROA candidate, disable SROA for it.
983 void CallAnalyzer::disableSROA(Value *V) {
984   if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
985     disableSROAForArg(SROAArg);
986   }
987 }
988 
989 void CallAnalyzer::disableLoadElimination() {
990   if (EnableLoadElimination) {
991     onDisableLoadElimination();
992     EnableLoadElimination = false;
993   }
994 }
995 
996 /// Accumulate a constant GEP offset into an APInt if possible.
997 ///
998 /// Returns false if unable to compute the offset for any reason. Respects any
999 /// simplified values known during the analysis of this callsite.
1000 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
1001   unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType());
1002   assert(IntPtrWidth == Offset.getBitWidth());
1003 
1004   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1005        GTI != GTE; ++GTI) {
1006     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
1007     if (!OpC)
1008       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
1009         OpC = dyn_cast<ConstantInt>(SimpleOp);
1010     if (!OpC)
1011       return false;
1012     if (OpC->isZero())
1013       continue;
1014 
1015     // Handle a struct index, which adds its field offset to the pointer.
1016     if (StructType *STy = GTI.getStructTypeOrNull()) {
1017       unsigned ElementIdx = OpC->getZExtValue();
1018       const StructLayout *SL = DL.getStructLayout(STy);
1019       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
1020       continue;
1021     }
1022 
1023     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
1024     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
1025   }
1026   return true;
1027 }
1028 
1029 /// Use TTI to check whether a GEP is free.
1030 ///
1031 /// Respects any simplified values known during the analysis of this callsite.
1032 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
1033   SmallVector<Value *, 4> Operands;
1034   Operands.push_back(GEP.getOperand(0));
1035   for (const Use &Op : GEP.indices())
1036     if (Constant *SimpleOp = SimplifiedValues.lookup(Op))
1037       Operands.push_back(SimpleOp);
1038     else
1039       Operands.push_back(Op);
1040   return TargetTransformInfo::TCC_Free ==
1041          TTI.getUserCost(&GEP, Operands,
1042                          TargetTransformInfo::TCK_SizeAndLatency);
1043 }
1044 
1045 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
1046   // Check whether inlining will turn a dynamic alloca into a static
1047   // alloca and handle that case.
1048   if (I.isArrayAllocation()) {
1049     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
1050     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
1051       // Sometimes a dynamic alloca could be converted into a static alloca
1052       // after this constant prop, and become a huge static alloca on an
1053       // unconditional CFG path. Avoid inlining if this is going to happen above
1054       // a threshold.
1055       // FIXME: If the threshold is removed or lowered too much, we could end up
1056       // being too pessimistic and prevent inlining non-problematic code. This
1057       // could result in unintended perf regressions. A better overall strategy
1058       // is needed to track stack usage during inlining.
1059       Type *Ty = I.getAllocatedType();
1060       AllocatedSize = SaturatingMultiplyAdd(
1061           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getKnownMinSize(),
1062           AllocatedSize);
1063       if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) {
1064         HasDynamicAlloca = true;
1065         return false;
1066       }
1067       return Base::visitAlloca(I);
1068     }
1069   }
1070 
1071   // Accumulate the allocated size.
1072   if (I.isStaticAlloca()) {
1073     Type *Ty = I.getAllocatedType();
1074     AllocatedSize =
1075         SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize);
1076   }
1077 
1078   // We will happily inline static alloca instructions.
1079   if (I.isStaticAlloca())
1080     return Base::visitAlloca(I);
1081 
1082   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
1083   // a variety of reasons, and so we would like to not inline them into
1084   // functions which don't currently have a dynamic alloca. This simply
1085   // disables inlining altogether in the presence of a dynamic alloca.
1086   HasDynamicAlloca = true;
1087   return false;
1088 }
1089 
1090 bool CallAnalyzer::visitPHI(PHINode &I) {
1091   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
1092   // though we don't want to propagate it's bonuses. The idea is to disable
1093   // SROA if it *might* be used in an inappropriate manner.
1094 
1095   // Phi nodes are always zero-cost.
1096   // FIXME: Pointer sizes may differ between different address spaces, so do we
1097   // need to use correct address space in the call to getPointerSizeInBits here?
1098   // Or could we skip the getPointerSizeInBits call completely? As far as I can
1099   // see the ZeroOffset is used as a dummy value, so we can probably use any
1100   // bit width for the ZeroOffset?
1101   APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0));
1102   bool CheckSROA = I.getType()->isPointerTy();
1103 
1104   // Track the constant or pointer with constant offset we've seen so far.
1105   Constant *FirstC = nullptr;
1106   std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset};
1107   Value *FirstV = nullptr;
1108 
1109   for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) {
1110     BasicBlock *Pred = I.getIncomingBlock(i);
1111     // If the incoming block is dead, skip the incoming block.
1112     if (DeadBlocks.count(Pred))
1113       continue;
1114     // If the parent block of phi is not the known successor of the incoming
1115     // block, skip the incoming block.
1116     BasicBlock *KnownSuccessor = KnownSuccessors[Pred];
1117     if (KnownSuccessor && KnownSuccessor != I.getParent())
1118       continue;
1119 
1120     Value *V = I.getIncomingValue(i);
1121     // If the incoming value is this phi itself, skip the incoming value.
1122     if (&I == V)
1123       continue;
1124 
1125     Constant *C = dyn_cast<Constant>(V);
1126     if (!C)
1127       C = SimplifiedValues.lookup(V);
1128 
1129     std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset};
1130     if (!C && CheckSROA)
1131       BaseAndOffset = ConstantOffsetPtrs.lookup(V);
1132 
1133     if (!C && !BaseAndOffset.first)
1134       // The incoming value is neither a constant nor a pointer with constant
1135       // offset, exit early.
1136       return true;
1137 
1138     if (FirstC) {
1139       if (FirstC == C)
1140         // If we've seen a constant incoming value before and it is the same
1141         // constant we see this time, continue checking the next incoming value.
1142         continue;
1143       // Otherwise early exit because we either see a different constant or saw
1144       // a constant before but we have a pointer with constant offset this time.
1145       return true;
1146     }
1147 
1148     if (FirstV) {
1149       // The same logic as above, but check pointer with constant offset here.
1150       if (FirstBaseAndOffset == BaseAndOffset)
1151         continue;
1152       return true;
1153     }
1154 
1155     if (C) {
1156       // This is the 1st time we've seen a constant, record it.
1157       FirstC = C;
1158       continue;
1159     }
1160 
1161     // The remaining case is that this is the 1st time we've seen a pointer with
1162     // constant offset, record it.
1163     FirstV = V;
1164     FirstBaseAndOffset = BaseAndOffset;
1165   }
1166 
1167   // Check if we can map phi to a constant.
1168   if (FirstC) {
1169     SimplifiedValues[&I] = FirstC;
1170     return true;
1171   }
1172 
1173   // Check if we can map phi to a pointer with constant offset.
1174   if (FirstBaseAndOffset.first) {
1175     ConstantOffsetPtrs[&I] = FirstBaseAndOffset;
1176 
1177     if (auto *SROAArg = getSROAArgForValueOrNull(FirstV))
1178       SROAArgValues[&I] = SROAArg;
1179   }
1180 
1181   return true;
1182 }
1183 
1184 /// Check we can fold GEPs of constant-offset call site argument pointers.
1185 /// This requires target data and inbounds GEPs.
1186 ///
1187 /// \return true if the specified GEP can be folded.
1188 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) {
1189   // Check if we have a base + offset for the pointer.
1190   std::pair<Value *, APInt> BaseAndOffset =
1191       ConstantOffsetPtrs.lookup(I.getPointerOperand());
1192   if (!BaseAndOffset.first)
1193     return false;
1194 
1195   // Check if the offset of this GEP is constant, and if so accumulate it
1196   // into Offset.
1197   if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second))
1198     return false;
1199 
1200   // Add the result as a new mapping to Base + Offset.
1201   ConstantOffsetPtrs[&I] = BaseAndOffset;
1202 
1203   return true;
1204 }
1205 
1206 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
1207   auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand());
1208 
1209   // Lambda to check whether a GEP's indices are all constant.
1210   auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) {
1211     for (const Use &Op : GEP.indices())
1212       if (!isa<Constant>(Op) && !SimplifiedValues.lookup(Op))
1213         return false;
1214     return true;
1215   };
1216 
1217   if (!DisableGEPConstOperand)
1218     if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1219         SmallVector<Constant *, 2> Indices;
1220         for (unsigned int Index = 1 ; Index < COps.size() ; ++Index)
1221             Indices.push_back(COps[Index]);
1222         return ConstantExpr::getGetElementPtr(I.getSourceElementType(), COps[0],
1223                                               Indices, I.isInBounds());
1224         }))
1225       return true;
1226 
1227   if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) {
1228     if (SROAArg)
1229       SROAArgValues[&I] = SROAArg;
1230 
1231     // Constant GEPs are modeled as free.
1232     return true;
1233   }
1234 
1235   // Variable GEPs will require math and will disable SROA.
1236   if (SROAArg)
1237     disableSROAForArg(SROAArg);
1238   return isGEPFree(I);
1239 }
1240 
1241 /// Simplify \p I if its operands are constants and update SimplifiedValues.
1242 /// \p Evaluate is a callable specific to instruction type that evaluates the
1243 /// instruction when all the operands are constants.
1244 template <typename Callable>
1245 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) {
1246   SmallVector<Constant *, 2> COps;
1247   for (Value *Op : I.operands()) {
1248     Constant *COp = dyn_cast<Constant>(Op);
1249     if (!COp)
1250       COp = SimplifiedValues.lookup(Op);
1251     if (!COp)
1252       return false;
1253     COps.push_back(COp);
1254   }
1255   auto *C = Evaluate(COps);
1256   if (!C)
1257     return false;
1258   SimplifiedValues[&I] = C;
1259   return true;
1260 }
1261 
1262 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
1263   // Propagate constants through bitcasts.
1264   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1265         return ConstantExpr::getBitCast(COps[0], I.getType());
1266       }))
1267     return true;
1268 
1269   // Track base/offsets through casts
1270   std::pair<Value *, APInt> BaseAndOffset =
1271       ConstantOffsetPtrs.lookup(I.getOperand(0));
1272   // Casts don't change the offset, just wrap it up.
1273   if (BaseAndOffset.first)
1274     ConstantOffsetPtrs[&I] = BaseAndOffset;
1275 
1276   // Also look for SROA candidates here.
1277   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1278     SROAArgValues[&I] = SROAArg;
1279 
1280   // Bitcasts are always zero cost.
1281   return true;
1282 }
1283 
1284 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
1285   // Propagate constants through ptrtoint.
1286   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1287         return ConstantExpr::getPtrToInt(COps[0], I.getType());
1288       }))
1289     return true;
1290 
1291   // Track base/offset pairs when converted to a plain integer provided the
1292   // integer is large enough to represent the pointer.
1293   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
1294   unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace();
1295   if (IntegerSize == DL.getPointerSizeInBits(AS)) {
1296     std::pair<Value *, APInt> BaseAndOffset =
1297         ConstantOffsetPtrs.lookup(I.getOperand(0));
1298     if (BaseAndOffset.first)
1299       ConstantOffsetPtrs[&I] = BaseAndOffset;
1300   }
1301 
1302   // This is really weird. Technically, ptrtoint will disable SROA. However,
1303   // unless that ptrtoint is *used* somewhere in the live basic blocks after
1304   // inlining, it will be nuked, and SROA should proceed. All of the uses which
1305   // would block SROA would also block SROA if applied directly to a pointer,
1306   // and so we can just add the integer in here. The only places where SROA is
1307   // preserved either cannot fire on an integer, or won't in-and-of themselves
1308   // disable SROA (ext) w/o some later use that we would see and disable.
1309   if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0)))
1310     SROAArgValues[&I] = SROAArg;
1311 
1312   return TargetTransformInfo::TCC_Free ==
1313          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1314 }
1315 
1316 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
1317   // Propagate constants through ptrtoint.
1318   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1319         return ConstantExpr::getIntToPtr(COps[0], I.getType());
1320       }))
1321     return true;
1322 
1323   // Track base/offset pairs when round-tripped through a pointer without
1324   // modifications provided the integer is not too large.
1325   Value *Op = I.getOperand(0);
1326   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
1327   if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) {
1328     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
1329     if (BaseAndOffset.first)
1330       ConstantOffsetPtrs[&I] = BaseAndOffset;
1331   }
1332 
1333   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
1334   if (auto *SROAArg = getSROAArgForValueOrNull(Op))
1335     SROAArgValues[&I] = SROAArg;
1336 
1337   return TargetTransformInfo::TCC_Free ==
1338          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1339 }
1340 
1341 bool CallAnalyzer::visitCastInst(CastInst &I) {
1342   // Propagate constants through casts.
1343   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1344         return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType());
1345       }))
1346     return true;
1347 
1348   // Disable SROA in the face of arbitrary casts we don't explicitly list
1349   // elsewhere.
1350   disableSROA(I.getOperand(0));
1351 
1352   // If this is a floating-point cast, and the target says this operation
1353   // is expensive, this may eventually become a library call. Treat the cost
1354   // as such.
1355   switch (I.getOpcode()) {
1356   case Instruction::FPTrunc:
1357   case Instruction::FPExt:
1358   case Instruction::UIToFP:
1359   case Instruction::SIToFP:
1360   case Instruction::FPToUI:
1361   case Instruction::FPToSI:
1362     if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive)
1363       onCallPenalty();
1364     break;
1365   default:
1366     break;
1367   }
1368 
1369   return TargetTransformInfo::TCC_Free ==
1370          TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1371 }
1372 
1373 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
1374   Value *Operand = I.getOperand(0);
1375   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1376         return ConstantFoldInstOperands(&I, COps[0], DL);
1377       }))
1378     return true;
1379 
1380   // Disable any SROA on the argument to arbitrary unary instructions.
1381   disableSROA(Operand);
1382 
1383   return false;
1384 }
1385 
1386 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
1387   return CandidateCall.paramHasAttr(A->getArgNo(), Attr);
1388 }
1389 
1390 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
1391   // Does the *call site* have the NonNull attribute set on an argument?  We
1392   // use the attribute on the call site to memoize any analysis done in the
1393   // caller. This will also trip if the callee function has a non-null
1394   // parameter attribute, but that's a less interesting case because hopefully
1395   // the callee would already have been simplified based on that.
1396   if (Argument *A = dyn_cast<Argument>(V))
1397     if (paramHasAttr(A, Attribute::NonNull))
1398       return true;
1399 
1400   // Is this an alloca in the caller?  This is distinct from the attribute case
1401   // above because attributes aren't updated within the inliner itself and we
1402   // always want to catch the alloca derived case.
1403   if (isAllocaDerivedArg(V))
1404     // We can actually predict the result of comparisons between an
1405     // alloca-derived value and null. Note that this fires regardless of
1406     // SROA firing.
1407     return true;
1408 
1409   return false;
1410 }
1411 
1412 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) {
1413   // If the normal destination of the invoke or the parent block of the call
1414   // site is unreachable-terminated, there is little point in inlining this
1415   // unless there is literally zero cost.
1416   // FIXME: Note that it is possible that an unreachable-terminated block has a
1417   // hot entry. For example, in below scenario inlining hot_call_X() may be
1418   // beneficial :
1419   // main() {
1420   //   hot_call_1();
1421   //   ...
1422   //   hot_call_N()
1423   //   exit(0);
1424   // }
1425   // For now, we are not handling this corner case here as it is rare in real
1426   // code. In future, we should elaborate this based on BPI and BFI in more
1427   // general threshold adjusting heuristics in updateThreshold().
1428   if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
1429     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
1430       return false;
1431   } else if (isa<UnreachableInst>(Call.getParent()->getTerminator()))
1432     return false;
1433 
1434   return true;
1435 }
1436 
1437 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call,
1438                                             BlockFrequencyInfo *CallerBFI) {
1439   // If global profile summary is available, then callsite's coldness is
1440   // determined based on that.
1441   if (PSI && PSI->hasProfileSummary())
1442     return PSI->isColdCallSite(Call, CallerBFI);
1443 
1444   // Otherwise we need BFI to be available.
1445   if (!CallerBFI)
1446     return false;
1447 
1448   // Determine if the callsite is cold relative to caller's entry. We could
1449   // potentially cache the computation of scaled entry frequency, but the added
1450   // complexity is not worth it unless this scaling shows up high in the
1451   // profiles.
1452   const BranchProbability ColdProb(ColdCallSiteRelFreq, 100);
1453   auto CallSiteBB = Call.getParent();
1454   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB);
1455   auto CallerEntryFreq =
1456       CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock()));
1457   return CallSiteFreq < CallerEntryFreq * ColdProb;
1458 }
1459 
1460 Optional<int>
1461 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call,
1462                                                 BlockFrequencyInfo *CallerBFI) {
1463 
1464   // If global profile summary is available, then callsite's hotness is
1465   // determined based on that.
1466   if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI))
1467     return Params.HotCallSiteThreshold;
1468 
1469   // Otherwise we need BFI to be available and to have a locally hot callsite
1470   // threshold.
1471   if (!CallerBFI || !Params.LocallyHotCallSiteThreshold)
1472     return None;
1473 
1474   // Determine if the callsite is hot relative to caller's entry. We could
1475   // potentially cache the computation of scaled entry frequency, but the added
1476   // complexity is not worth it unless this scaling shows up high in the
1477   // profiles.
1478   auto CallSiteBB = Call.getParent();
1479   auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency();
1480   auto CallerEntryFreq = CallerBFI->getEntryFreq();
1481   if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq)
1482     return Params.LocallyHotCallSiteThreshold;
1483 
1484   // Otherwise treat it normally.
1485   return None;
1486 }
1487 
1488 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) {
1489   // If no size growth is allowed for this inlining, set Threshold to 0.
1490   if (!allowSizeGrowth(Call)) {
1491     Threshold = 0;
1492     return;
1493   }
1494 
1495   Function *Caller = Call.getCaller();
1496 
1497   // return min(A, B) if B is valid.
1498   auto MinIfValid = [](int A, Optional<int> B) {
1499     return B ? std::min(A, B.getValue()) : A;
1500   };
1501 
1502   // return max(A, B) if B is valid.
1503   auto MaxIfValid = [](int A, Optional<int> B) {
1504     return B ? std::max(A, B.getValue()) : A;
1505   };
1506 
1507   // Various bonus percentages. These are multiplied by Threshold to get the
1508   // bonus values.
1509   // SingleBBBonus: This bonus is applied if the callee has a single reachable
1510   // basic block at the given callsite context. This is speculatively applied
1511   // and withdrawn if more than one basic block is seen.
1512   //
1513   // LstCallToStaticBonus: This large bonus is applied to ensure the inlining
1514   // of the last call to a static function as inlining such functions is
1515   // guaranteed to reduce code size.
1516   //
1517   // These bonus percentages may be set to 0 based on properties of the caller
1518   // and the callsite.
1519   int SingleBBBonusPercent = 50;
1520   int VectorBonusPercent = TTI.getInlinerVectorBonusPercent();
1521   int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus;
1522 
1523   // Lambda to set all the above bonus and bonus percentages to 0.
1524   auto DisallowAllBonuses = [&]() {
1525     SingleBBBonusPercent = 0;
1526     VectorBonusPercent = 0;
1527     LastCallToStaticBonus = 0;
1528   };
1529 
1530   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
1531   // and reduce the threshold if the caller has the necessary attribute.
1532   if (Caller->hasMinSize()) {
1533     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
1534     // For minsize, we want to disable the single BB bonus and the vector
1535     // bonuses, but not the last-call-to-static bonus. Inlining the last call to
1536     // a static function will, at the minimum, eliminate the parameter setup and
1537     // call/return instructions.
1538     SingleBBBonusPercent = 0;
1539     VectorBonusPercent = 0;
1540   } else if (Caller->hasOptSize())
1541     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
1542 
1543   // Adjust the threshold based on inlinehint attribute and profile based
1544   // hotness information if the caller does not have MinSize attribute.
1545   if (!Caller->hasMinSize()) {
1546     if (Callee.hasFnAttribute(Attribute::InlineHint))
1547       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1548 
1549     // FIXME: After switching to the new passmanager, simplify the logic below
1550     // by checking only the callsite hotness/coldness as we will reliably
1551     // have local profile information.
1552     //
1553     // Callsite hotness and coldness can be determined if sample profile is
1554     // used (which adds hotness metadata to calls) or if caller's
1555     // BlockFrequencyInfo is available.
1556     BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr;
1557     auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI);
1558     if (!Caller->hasOptSize() && HotCallSiteThreshold) {
1559       LLVM_DEBUG(dbgs() << "Hot callsite.\n");
1560       // FIXME: This should update the threshold only if it exceeds the
1561       // current threshold, but AutoFDO + ThinLTO currently relies on this
1562       // behavior to prevent inlining of hot callsites during ThinLTO
1563       // compile phase.
1564       Threshold = HotCallSiteThreshold.getValue();
1565     } else if (isColdCallSite(Call, CallerBFI)) {
1566       LLVM_DEBUG(dbgs() << "Cold callsite.\n");
1567       // Do not apply bonuses for a cold callsite including the
1568       // LastCallToStatic bonus. While this bonus might result in code size
1569       // reduction, it can cause the size of a non-cold caller to increase
1570       // preventing it from being inlined.
1571       DisallowAllBonuses();
1572       Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
1573     } else if (PSI) {
1574       // Use callee's global profile information only if we have no way of
1575       // determining this via callsite information.
1576       if (PSI->isFunctionEntryHot(&Callee)) {
1577         LLVM_DEBUG(dbgs() << "Hot callee.\n");
1578         // If callsite hotness can not be determined, we may still know
1579         // that the callee is hot and treat it as a weaker hint for threshold
1580         // increase.
1581         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
1582       } else if (PSI->isFunctionEntryCold(&Callee)) {
1583         LLVM_DEBUG(dbgs() << "Cold callee.\n");
1584         // Do not apply bonuses for a cold callee including the
1585         // LastCallToStatic bonus. While this bonus might result in code size
1586         // reduction, it can cause the size of a non-cold caller to increase
1587         // preventing it from being inlined.
1588         DisallowAllBonuses();
1589         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
1590       }
1591     }
1592   }
1593 
1594   Threshold += TTI.adjustInliningThreshold(&Call);
1595 
1596   // Finally, take the target-specific inlining threshold multiplier into
1597   // account.
1598   Threshold *= TTI.getInliningThresholdMultiplier();
1599 
1600   SingleBBBonus = Threshold * SingleBBBonusPercent / 100;
1601   VectorBonus = Threshold * VectorBonusPercent / 100;
1602 
1603   bool OnlyOneCallAndLocalLinkage =
1604       F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction();
1605   // If there is only one call of the function, and it has internal linkage,
1606   // the cost of inlining it drops dramatically. It may seem odd to update
1607   // Cost in updateThreshold, but the bonus depends on the logic in this method.
1608   if (OnlyOneCallAndLocalLinkage)
1609     Cost -= LastCallToStaticBonus;
1610 }
1611 
1612 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
1613   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1614   // First try to handle simplified comparisons.
1615   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1616         return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]);
1617       }))
1618     return true;
1619 
1620   if (I.getOpcode() == Instruction::FCmp)
1621     return false;
1622 
1623   // Otherwise look for a comparison between constant offset pointers with
1624   // a common base.
1625   Value *LHSBase, *RHSBase;
1626   APInt LHSOffset, RHSOffset;
1627   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1628   if (LHSBase) {
1629     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1630     if (RHSBase && LHSBase == RHSBase) {
1631       // We have common bases, fold the icmp to a constant based on the
1632       // offsets.
1633       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1634       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1635       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
1636         SimplifiedValues[&I] = C;
1637         ++NumConstantPtrCmps;
1638         return true;
1639       }
1640     }
1641   }
1642 
1643   // If the comparison is an equality comparison with null, we can simplify it
1644   // if we know the value (argument) can't be null
1645   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
1646       isKnownNonNullInCallee(I.getOperand(0))) {
1647     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
1648     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
1649                                       : ConstantInt::getFalse(I.getType());
1650     return true;
1651   }
1652   return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1)));
1653 }
1654 
1655 bool CallAnalyzer::visitSub(BinaryOperator &I) {
1656   // Try to handle a special case: we can fold computing the difference of two
1657   // constant-related pointers.
1658   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1659   Value *LHSBase, *RHSBase;
1660   APInt LHSOffset, RHSOffset;
1661   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
1662   if (LHSBase) {
1663     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
1664     if (RHSBase && LHSBase == RHSBase) {
1665       // We have common bases, fold the subtract to a constant based on the
1666       // offsets.
1667       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
1668       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
1669       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
1670         SimplifiedValues[&I] = C;
1671         ++NumConstantPtrDiffs;
1672         return true;
1673       }
1674     }
1675   }
1676 
1677   // Otherwise, fall back to the generic logic for simplifying and handling
1678   // instructions.
1679   return Base::visitSub(I);
1680 }
1681 
1682 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
1683   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1684   Constant *CLHS = dyn_cast<Constant>(LHS);
1685   if (!CLHS)
1686     CLHS = SimplifiedValues.lookup(LHS);
1687   Constant *CRHS = dyn_cast<Constant>(RHS);
1688   if (!CRHS)
1689     CRHS = SimplifiedValues.lookup(RHS);
1690 
1691   Value *SimpleV = nullptr;
1692   if (auto FI = dyn_cast<FPMathOperator>(&I))
1693     SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS,
1694                             FI->getFastMathFlags(), DL);
1695   else
1696     SimpleV =
1697         SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL);
1698 
1699   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1700     SimplifiedValues[&I] = C;
1701 
1702   if (SimpleV)
1703     return true;
1704 
1705   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
1706   disableSROA(LHS);
1707   disableSROA(RHS);
1708 
1709   // If the instruction is floating point, and the target says this operation
1710   // is expensive, this may eventually become a library call. Treat the cost
1711   // as such. Unless it's fneg which can be implemented with an xor.
1712   using namespace llvm::PatternMatch;
1713   if (I.getType()->isFloatingPointTy() &&
1714       TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive &&
1715       !match(&I, m_FNeg(m_Value())))
1716     onCallPenalty();
1717 
1718   return false;
1719 }
1720 
1721 bool CallAnalyzer::visitFNeg(UnaryOperator &I) {
1722   Value *Op = I.getOperand(0);
1723   Constant *COp = dyn_cast<Constant>(Op);
1724   if (!COp)
1725     COp = SimplifiedValues.lookup(Op);
1726 
1727   Value *SimpleV = SimplifyFNegInst(
1728       COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL);
1729 
1730   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV))
1731     SimplifiedValues[&I] = C;
1732 
1733   if (SimpleV)
1734     return true;
1735 
1736   // Disable any SROA on arguments to arbitrary, unsimplified fneg.
1737   disableSROA(Op);
1738 
1739   return false;
1740 }
1741 
1742 bool CallAnalyzer::visitLoad(LoadInst &I) {
1743   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1744     return true;
1745 
1746   // If the data is already loaded from this address and hasn't been clobbered
1747   // by any stores or calls, this load is likely to be redundant and can be
1748   // eliminated.
1749   if (EnableLoadElimination &&
1750       !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) {
1751     onLoadEliminationOpportunity();
1752     return true;
1753   }
1754 
1755   return false;
1756 }
1757 
1758 bool CallAnalyzer::visitStore(StoreInst &I) {
1759   if (handleSROA(I.getPointerOperand(), I.isSimple()))
1760     return true;
1761 
1762   // The store can potentially clobber loads and prevent repeated loads from
1763   // being eliminated.
1764   // FIXME:
1765   // 1. We can probably keep an initial set of eliminatable loads substracted
1766   // from the cost even when we finally see a store. We just need to disable
1767   // *further* accumulation of elimination savings.
1768   // 2. We should probably at some point thread MemorySSA for the callee into
1769   // this and then use that to actually compute *really* precise savings.
1770   disableLoadElimination();
1771   return false;
1772 }
1773 
1774 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
1775   // Constant folding for extract value is trivial.
1776   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1777         return ConstantExpr::getExtractValue(COps[0], I.getIndices());
1778       }))
1779     return true;
1780 
1781   // SROA can look through these but give them a cost.
1782   return false;
1783 }
1784 
1785 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
1786   // Constant folding for insert value is trivial.
1787   if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) {
1788         return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0],
1789                                             /*InsertedValueOperand*/ COps[1],
1790                                             I.getIndices());
1791       }))
1792     return true;
1793 
1794   // SROA can look through these but give them a cost.
1795   return false;
1796 }
1797 
1798 /// Try to simplify a call site.
1799 ///
1800 /// Takes a concrete function and callsite and tries to actually simplify it by
1801 /// analyzing the arguments and call itself with instsimplify. Returns true if
1802 /// it has simplified the callsite to some other entity (a constant), making it
1803 /// free.
1804 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) {
1805   // FIXME: Using the instsimplify logic directly for this is inefficient
1806   // because we have to continually rebuild the argument list even when no
1807   // simplifications can be performed. Until that is fixed with remapping
1808   // inside of instsimplify, directly constant fold calls here.
1809   if (!canConstantFoldCallTo(&Call, F))
1810     return false;
1811 
1812   // Try to re-map the arguments to constants.
1813   SmallVector<Constant *, 4> ConstantArgs;
1814   ConstantArgs.reserve(Call.arg_size());
1815   for (Value *I : Call.args()) {
1816     Constant *C = dyn_cast<Constant>(I);
1817     if (!C)
1818       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I));
1819     if (!C)
1820       return false; // This argument doesn't map to a constant.
1821 
1822     ConstantArgs.push_back(C);
1823   }
1824   if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) {
1825     SimplifiedValues[&Call] = C;
1826     return true;
1827   }
1828 
1829   return false;
1830 }
1831 
1832 bool CallAnalyzer::visitCallBase(CallBase &Call) {
1833   if (Call.hasFnAttr(Attribute::ReturnsTwice) &&
1834       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
1835     // This aborts the entire analysis.
1836     ExposesReturnsTwice = true;
1837     return false;
1838   }
1839   if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate())
1840     ContainsNoDuplicateCall = true;
1841 
1842   Value *Callee = Call.getCalledOperand();
1843   Function *F = dyn_cast_or_null<Function>(Callee);
1844   bool IsIndirectCall = !F;
1845   if (IsIndirectCall) {
1846     // Check if this happens to be an indirect function call to a known function
1847     // in this inline context. If not, we've done all we can.
1848     F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
1849     if (!F) {
1850       onCallArgumentSetup(Call);
1851 
1852       if (!Call.onlyReadsMemory())
1853         disableLoadElimination();
1854       return Base::visitCallBase(Call);
1855     }
1856   }
1857 
1858   assert(F && "Expected a call to a known function");
1859 
1860   // When we have a concrete function, first try to simplify it directly.
1861   if (simplifyCallSite(F, Call))
1862     return true;
1863 
1864   // Next check if it is an intrinsic we know about.
1865   // FIXME: Lift this into part of the InstVisitor.
1866   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) {
1867     switch (II->getIntrinsicID()) {
1868     default:
1869       if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II))
1870         disableLoadElimination();
1871       return Base::visitCallBase(Call);
1872 
1873     case Intrinsic::load_relative:
1874       onLoadRelativeIntrinsic();
1875       return false;
1876 
1877     case Intrinsic::memset:
1878     case Intrinsic::memcpy:
1879     case Intrinsic::memmove:
1880       disableLoadElimination();
1881       // SROA can usually chew through these intrinsics, but they aren't free.
1882       return false;
1883     case Intrinsic::icall_branch_funnel:
1884     case Intrinsic::localescape:
1885       HasUninlineableIntrinsic = true;
1886       return false;
1887     case Intrinsic::vastart:
1888       InitsVargArgs = true;
1889       return false;
1890     }
1891   }
1892 
1893   if (F == Call.getFunction()) {
1894     // This flag will fully abort the analysis, so don't bother with anything
1895     // else.
1896     IsRecursiveCall = true;
1897     return false;
1898   }
1899 
1900   if (TTI.isLoweredToCall(F)) {
1901     onLoweredCall(F, Call, IsIndirectCall);
1902   }
1903 
1904   if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory())))
1905     disableLoadElimination();
1906   return Base::visitCallBase(Call);
1907 }
1908 
1909 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1910   // At least one return instruction will be free after inlining.
1911   bool Free = !HasReturn;
1912   HasReturn = true;
1913   return Free;
1914 }
1915 
1916 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1917   // We model unconditional branches as essentially free -- they really
1918   // shouldn't exist at all, but handling them makes the behavior of the
1919   // inliner more regular and predictable. Interestingly, conditional branches
1920   // which will fold away are also free.
1921   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1922          dyn_cast_or_null<ConstantInt>(
1923              SimplifiedValues.lookup(BI.getCondition()));
1924 }
1925 
1926 bool CallAnalyzer::visitSelectInst(SelectInst &SI) {
1927   bool CheckSROA = SI.getType()->isPointerTy();
1928   Value *TrueVal = SI.getTrueValue();
1929   Value *FalseVal = SI.getFalseValue();
1930 
1931   Constant *TrueC = dyn_cast<Constant>(TrueVal);
1932   if (!TrueC)
1933     TrueC = SimplifiedValues.lookup(TrueVal);
1934   Constant *FalseC = dyn_cast<Constant>(FalseVal);
1935   if (!FalseC)
1936     FalseC = SimplifiedValues.lookup(FalseVal);
1937   Constant *CondC =
1938       dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition()));
1939 
1940   if (!CondC) {
1941     // Select C, X, X => X
1942     if (TrueC == FalseC && TrueC) {
1943       SimplifiedValues[&SI] = TrueC;
1944       return true;
1945     }
1946 
1947     if (!CheckSROA)
1948       return Base::visitSelectInst(SI);
1949 
1950     std::pair<Value *, APInt> TrueBaseAndOffset =
1951         ConstantOffsetPtrs.lookup(TrueVal);
1952     std::pair<Value *, APInt> FalseBaseAndOffset =
1953         ConstantOffsetPtrs.lookup(FalseVal);
1954     if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) {
1955       ConstantOffsetPtrs[&SI] = TrueBaseAndOffset;
1956 
1957       if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal))
1958         SROAArgValues[&SI] = SROAArg;
1959       return true;
1960     }
1961 
1962     return Base::visitSelectInst(SI);
1963   }
1964 
1965   // Select condition is a constant.
1966   Value *SelectedV = CondC->isAllOnesValue()
1967                          ? TrueVal
1968                          : (CondC->isNullValue()) ? FalseVal : nullptr;
1969   if (!SelectedV) {
1970     // Condition is a vector constant that is not all 1s or all 0s.  If all
1971     // operands are constants, ConstantExpr::getSelect() can handle the cases
1972     // such as select vectors.
1973     if (TrueC && FalseC) {
1974       if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) {
1975         SimplifiedValues[&SI] = C;
1976         return true;
1977       }
1978     }
1979     return Base::visitSelectInst(SI);
1980   }
1981 
1982   // Condition is either all 1s or all 0s. SI can be simplified.
1983   if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) {
1984     SimplifiedValues[&SI] = SelectedC;
1985     return true;
1986   }
1987 
1988   if (!CheckSROA)
1989     return true;
1990 
1991   std::pair<Value *, APInt> BaseAndOffset =
1992       ConstantOffsetPtrs.lookup(SelectedV);
1993   if (BaseAndOffset.first) {
1994     ConstantOffsetPtrs[&SI] = BaseAndOffset;
1995 
1996     if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV))
1997       SROAArgValues[&SI] = SROAArg;
1998   }
1999 
2000   return true;
2001 }
2002 
2003 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
2004   // We model unconditional switches as free, see the comments on handling
2005   // branches.
2006   if (isa<ConstantInt>(SI.getCondition()))
2007     return true;
2008   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
2009     if (isa<ConstantInt>(V))
2010       return true;
2011 
2012   // Assume the most general case where the switch is lowered into
2013   // either a jump table, bit test, or a balanced binary tree consisting of
2014   // case clusters without merging adjacent clusters with the same
2015   // destination. We do not consider the switches that are lowered with a mix
2016   // of jump table/bit test/binary search tree. The cost of the switch is
2017   // proportional to the size of the tree or the size of jump table range.
2018   //
2019   // NB: We convert large switches which are just used to initialize large phi
2020   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
2021   // inlining those. It will prevent inlining in cases where the optimization
2022   // does not (yet) fire.
2023 
2024   unsigned JumpTableSize = 0;
2025   BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr;
2026   unsigned NumCaseCluster =
2027       TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI);
2028 
2029   onFinalizeSwitch(JumpTableSize, NumCaseCluster);
2030   return false;
2031 }
2032 
2033 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
2034   // We never want to inline functions that contain an indirectbr.  This is
2035   // incorrect because all the blockaddress's (in static global initializers
2036   // for example) would be referring to the original function, and this
2037   // indirect jump would jump from the inlined copy of the function into the
2038   // original function which is extremely undefined behavior.
2039   // FIXME: This logic isn't really right; we can safely inline functions with
2040   // indirectbr's as long as no other function or global references the
2041   // blockaddress of a block within the current function.
2042   HasIndirectBr = true;
2043   return false;
2044 }
2045 
2046 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
2047   // FIXME: It's not clear that a single instruction is an accurate model for
2048   // the inline cost of a resume instruction.
2049   return false;
2050 }
2051 
2052 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
2053   // FIXME: It's not clear that a single instruction is an accurate model for
2054   // the inline cost of a cleanupret instruction.
2055   return false;
2056 }
2057 
2058 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
2059   // FIXME: It's not clear that a single instruction is an accurate model for
2060   // the inline cost of a catchret instruction.
2061   return false;
2062 }
2063 
2064 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
2065   // FIXME: It might be reasonably to discount the cost of instructions leading
2066   // to unreachable as they have the lowest possible impact on both runtime and
2067   // code size.
2068   return true; // No actual code is needed for unreachable.
2069 }
2070 
2071 bool CallAnalyzer::visitInstruction(Instruction &I) {
2072   // Some instructions are free. All of the free intrinsics can also be
2073   // handled by SROA, etc.
2074   if (TargetTransformInfo::TCC_Free ==
2075       TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency))
2076     return true;
2077 
2078   // We found something we don't understand or can't handle. Mark any SROA-able
2079   // values in the operand list as no longer viable.
2080   for (const Use &Op : I.operands())
2081     disableSROA(Op);
2082 
2083   return false;
2084 }
2085 
2086 /// Analyze a basic block for its contribution to the inline cost.
2087 ///
2088 /// This method walks the analyzer over every instruction in the given basic
2089 /// block and accounts for their cost during inlining at this callsite. It
2090 /// aborts early if the threshold has been exceeded or an impossible to inline
2091 /// construct has been detected. It returns false if inlining is no longer
2092 /// viable, and true if inlining remains viable.
2093 InlineResult
2094 CallAnalyzer::analyzeBlock(BasicBlock *BB,
2095                            SmallPtrSetImpl<const Value *> &EphValues) {
2096   for (Instruction &I : *BB) {
2097     // FIXME: Currently, the number of instructions in a function regardless of
2098     // our ability to simplify them during inline to constants or dead code,
2099     // are actually used by the vector bonus heuristic. As long as that's true,
2100     // we have to special case debug intrinsics here to prevent differences in
2101     // inlining due to debug symbols. Eventually, the number of unsimplified
2102     // instructions shouldn't factor into the cost computation, but until then,
2103     // hack around it here.
2104     if (isa<DbgInfoIntrinsic>(I))
2105       continue;
2106 
2107     // Skip pseudo-probes.
2108     if (isa<PseudoProbeInst>(I))
2109       continue;
2110 
2111     // Skip ephemeral values.
2112     if (EphValues.count(&I))
2113       continue;
2114 
2115     ++NumInstructions;
2116     if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy())
2117       ++NumVectorInstructions;
2118 
2119     // If the instruction simplified to a constant, there is no cost to this
2120     // instruction. Visit the instructions using our InstVisitor to account for
2121     // all of the per-instruction logic. The visit tree returns true if we
2122     // consumed the instruction in any way, and false if the instruction's base
2123     // cost should count against inlining.
2124     onInstructionAnalysisStart(&I);
2125 
2126     if (Base::visit(&I))
2127       ++NumInstructionsSimplified;
2128     else
2129       onMissedSimplification();
2130 
2131     onInstructionAnalysisFinish(&I);
2132     using namespace ore;
2133     // If the visit this instruction detected an uninlinable pattern, abort.
2134     InlineResult IR = InlineResult::success();
2135     if (IsRecursiveCall)
2136       IR = InlineResult::failure("recursive");
2137     else if (ExposesReturnsTwice)
2138       IR = InlineResult::failure("exposes returns twice");
2139     else if (HasDynamicAlloca)
2140       IR = InlineResult::failure("dynamic alloca");
2141     else if (HasIndirectBr)
2142       IR = InlineResult::failure("indirect branch");
2143     else if (HasUninlineableIntrinsic)
2144       IR = InlineResult::failure("uninlinable intrinsic");
2145     else if (InitsVargArgs)
2146       IR = InlineResult::failure("varargs");
2147     if (!IR.isSuccess()) {
2148       if (ORE)
2149         ORE->emit([&]() {
2150           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
2151                                           &CandidateCall)
2152                  << NV("Callee", &F) << " has uninlinable pattern ("
2153                  << NV("InlineResult", IR.getFailureReason())
2154                  << ") and cost is not fully computed";
2155         });
2156       return IR;
2157     }
2158 
2159     // If the caller is a recursive function then we don't want to inline
2160     // functions which allocate a lot of stack space because it would increase
2161     // the caller stack usage dramatically.
2162     if (IsCallerRecursive &&
2163         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) {
2164       auto IR =
2165           InlineResult::failure("recursive and allocates too much stack space");
2166       if (ORE)
2167         ORE->emit([&]() {
2168           return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline",
2169                                           &CandidateCall)
2170                  << NV("Callee", &F) << " is "
2171                  << NV("InlineResult", IR.getFailureReason())
2172                  << ". Cost is not fully computed";
2173         });
2174       return IR;
2175     }
2176 
2177     if (shouldStop())
2178       return InlineResult::failure(
2179           "Call site analysis is not favorable to inlining.");
2180   }
2181 
2182   return InlineResult::success();
2183 }
2184 
2185 /// Compute the base pointer and cumulative constant offsets for V.
2186 ///
2187 /// This strips all constant offsets off of V, leaving it the base pointer, and
2188 /// accumulates the total constant offset applied in the returned constant. It
2189 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
2190 /// no constant offsets applied.
2191 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
2192   if (!V->getType()->isPointerTy())
2193     return nullptr;
2194 
2195   unsigned AS = V->getType()->getPointerAddressSpace();
2196   unsigned IntPtrWidth = DL.getIndexSizeInBits(AS);
2197   APInt Offset = APInt::getNullValue(IntPtrWidth);
2198 
2199   // Even though we don't look through PHI nodes, we could be called on an
2200   // instruction in an unreachable block, which may be on a cycle.
2201   SmallPtrSet<Value *, 4> Visited;
2202   Visited.insert(V);
2203   do {
2204     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
2205       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
2206         return nullptr;
2207       V = GEP->getPointerOperand();
2208     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
2209       V = cast<Operator>(V)->getOperand(0);
2210     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2211       if (GA->isInterposable())
2212         break;
2213       V = GA->getAliasee();
2214     } else {
2215       break;
2216     }
2217     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
2218   } while (Visited.insert(V).second);
2219 
2220   Type *IdxPtrTy = DL.getIndexType(V->getType());
2221   return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset));
2222 }
2223 
2224 /// Find dead blocks due to deleted CFG edges during inlining.
2225 ///
2226 /// If we know the successor of the current block, \p CurrBB, has to be \p
2227 /// NextBB, the other successors of \p CurrBB are dead if these successors have
2228 /// no live incoming CFG edges.  If one block is found to be dead, we can
2229 /// continue growing the dead block list by checking the successors of the dead
2230 /// blocks to see if all their incoming edges are dead or not.
2231 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
2232   auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
2233     // A CFG edge is dead if the predecessor is dead or the predecessor has a
2234     // known successor which is not the one under exam.
2235     return (DeadBlocks.count(Pred) ||
2236             (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
2237   };
2238 
2239   auto IsNewlyDead = [&](BasicBlock *BB) {
2240     // If all the edges to a block are dead, the block is also dead.
2241     return (!DeadBlocks.count(BB) &&
2242             llvm::all_of(predecessors(BB),
2243                          [&](BasicBlock *P) { return IsEdgeDead(P, BB); }));
2244   };
2245 
2246   for (BasicBlock *Succ : successors(CurrBB)) {
2247     if (Succ == NextBB || !IsNewlyDead(Succ))
2248       continue;
2249     SmallVector<BasicBlock *, 4> NewDead;
2250     NewDead.push_back(Succ);
2251     while (!NewDead.empty()) {
2252       BasicBlock *Dead = NewDead.pop_back_val();
2253       if (DeadBlocks.insert(Dead))
2254         // Continue growing the dead block lists.
2255         for (BasicBlock *S : successors(Dead))
2256           if (IsNewlyDead(S))
2257             NewDead.push_back(S);
2258     }
2259   }
2260 }
2261 
2262 /// Analyze a call site for potential inlining.
2263 ///
2264 /// Returns true if inlining this call is viable, and false if it is not
2265 /// viable. It computes the cost and adjusts the threshold based on numerous
2266 /// factors and heuristics. If this method returns false but the computed cost
2267 /// is below the computed threshold, then inlining was forcibly disabled by
2268 /// some artifact of the routine.
2269 InlineResult CallAnalyzer::analyze() {
2270   ++NumCallsAnalyzed;
2271 
2272   auto Result = onAnalysisStart();
2273   if (!Result.isSuccess())
2274     return Result;
2275 
2276   if (F.empty())
2277     return InlineResult::success();
2278 
2279   Function *Caller = CandidateCall.getFunction();
2280   // Check if the caller function is recursive itself.
2281   for (User *U : Caller->users()) {
2282     CallBase *Call = dyn_cast<CallBase>(U);
2283     if (Call && Call->getFunction() == Caller) {
2284       IsCallerRecursive = true;
2285       break;
2286     }
2287   }
2288 
2289   // Populate our simplified values by mapping from function arguments to call
2290   // arguments with known important simplifications.
2291   auto CAI = CandidateCall.arg_begin();
2292   for (Argument &FAI : F.args()) {
2293     assert(CAI != CandidateCall.arg_end());
2294     if (Constant *C = dyn_cast<Constant>(CAI))
2295       SimplifiedValues[&FAI] = C;
2296 
2297     Value *PtrArg = *CAI;
2298     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
2299       ConstantOffsetPtrs[&FAI] = std::make_pair(PtrArg, C->getValue());
2300 
2301       // We can SROA any pointer arguments derived from alloca instructions.
2302       if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) {
2303         SROAArgValues[&FAI] = SROAArg;
2304         onInitializeSROAArg(SROAArg);
2305         EnabledSROAAllocas.insert(SROAArg);
2306       }
2307     }
2308     ++CAI;
2309   }
2310   NumConstantArgs = SimplifiedValues.size();
2311   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
2312   NumAllocaArgs = SROAArgValues.size();
2313 
2314   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
2315   // the ephemeral values multiple times (and they're completely determined by
2316   // the callee, so this is purely duplicate work).
2317   SmallPtrSet<const Value *, 32> EphValues;
2318   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
2319 
2320   // The worklist of live basic blocks in the callee *after* inlining. We avoid
2321   // adding basic blocks of the callee which can be proven to be dead for this
2322   // particular call site in order to get more accurate cost estimates. This
2323   // requires a somewhat heavyweight iteration pattern: we need to walk the
2324   // basic blocks in a breadth-first order as we insert live successors. To
2325   // accomplish this, prioritizing for small iterations because we exit after
2326   // crossing our threshold, we use a small-size optimized SetVector.
2327   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
2328                     SmallPtrSet<BasicBlock *, 16>>
2329       BBSetVector;
2330   BBSetVector BBWorklist;
2331   BBWorklist.insert(&F.getEntryBlock());
2332 
2333   // Note that we *must not* cache the size, this loop grows the worklist.
2334   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
2335     if (shouldStop())
2336       break;
2337 
2338     BasicBlock *BB = BBWorklist[Idx];
2339     if (BB->empty())
2340       continue;
2341 
2342     onBlockStart(BB);
2343 
2344     // Disallow inlining a blockaddress with uses other than strictly callbr.
2345     // A blockaddress only has defined behavior for an indirect branch in the
2346     // same function, and we do not currently support inlining indirect
2347     // branches.  But, the inliner may not see an indirect branch that ends up
2348     // being dead code at a particular call site. If the blockaddress escapes
2349     // the function, e.g., via a global variable, inlining may lead to an
2350     // invalid cross-function reference.
2351     // FIXME: pr/39560: continue relaxing this overt restriction.
2352     if (BB->hasAddressTaken())
2353       for (User *U : BlockAddress::get(&*BB)->users())
2354         if (!isa<CallBrInst>(*U))
2355           return InlineResult::failure("blockaddress used outside of callbr");
2356 
2357     // Analyze the cost of this block. If we blow through the threshold, this
2358     // returns false, and we can bail on out.
2359     InlineResult IR = analyzeBlock(BB, EphValues);
2360     if (!IR.isSuccess())
2361       return IR;
2362 
2363     Instruction *TI = BB->getTerminator();
2364 
2365     // Add in the live successors by first checking whether we have terminator
2366     // that may be simplified based on the values simplified by this call.
2367     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
2368       if (BI->isConditional()) {
2369         Value *Cond = BI->getCondition();
2370         if (ConstantInt *SimpleCond =
2371                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2372           BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0);
2373           BBWorklist.insert(NextBB);
2374           KnownSuccessors[BB] = NextBB;
2375           findDeadBlocks(BB, NextBB);
2376           continue;
2377         }
2378       }
2379     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
2380       Value *Cond = SI->getCondition();
2381       if (ConstantInt *SimpleCond =
2382               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
2383         BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor();
2384         BBWorklist.insert(NextBB);
2385         KnownSuccessors[BB] = NextBB;
2386         findDeadBlocks(BB, NextBB);
2387         continue;
2388       }
2389     }
2390 
2391     // If we're unable to select a particular successor, just count all of
2392     // them.
2393     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
2394          ++TIdx)
2395       BBWorklist.insert(TI->getSuccessor(TIdx));
2396 
2397     onBlockAnalyzed(BB);
2398   }
2399 
2400   bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
2401                                     &F == CandidateCall.getCalledFunction();
2402   // If this is a noduplicate call, we can still inline as long as
2403   // inlining this would cause the removal of the caller (so the instruction
2404   // is not actually duplicated, just moved).
2405   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
2406     return InlineResult::failure("noduplicate");
2407 
2408   return finalizeAnalysis();
2409 }
2410 
2411 void InlineCostCallAnalyzer::print() {
2412 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
2413   if (PrintInstructionComments)
2414     F.print(dbgs(), &Writer);
2415   DEBUG_PRINT_STAT(NumConstantArgs);
2416   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
2417   DEBUG_PRINT_STAT(NumAllocaArgs);
2418   DEBUG_PRINT_STAT(NumConstantPtrCmps);
2419   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
2420   DEBUG_PRINT_STAT(NumInstructionsSimplified);
2421   DEBUG_PRINT_STAT(NumInstructions);
2422   DEBUG_PRINT_STAT(SROACostSavings);
2423   DEBUG_PRINT_STAT(SROACostSavingsLost);
2424   DEBUG_PRINT_STAT(LoadEliminationCost);
2425   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
2426   DEBUG_PRINT_STAT(Cost);
2427   DEBUG_PRINT_STAT(Threshold);
2428 #undef DEBUG_PRINT_STAT
2429 }
2430 
2431 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2432 /// Dump stats about this call's analysis.
2433 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() {
2434   print();
2435 }
2436 #endif
2437 
2438 /// Test that there are no attribute conflicts between Caller and Callee
2439 ///        that prevent inlining.
2440 static bool functionsHaveCompatibleAttributes(
2441     Function *Caller, Function *Callee, TargetTransformInfo &TTI,
2442     function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) {
2443   // Note that CalleeTLI must be a copy not a reference. The legacy pass manager
2444   // caches the most recently created TLI in the TargetLibraryInfoWrapperPass
2445   // object, and always returns the same object (which is overwritten on each
2446   // GetTLI call). Therefore we copy the first result.
2447   auto CalleeTLI = GetTLI(*Callee);
2448   return TTI.areInlineCompatible(Caller, Callee) &&
2449          GetTLI(*Caller).areInlineCompatible(CalleeTLI,
2450                                              InlineCallerSupersetNoBuiltin) &&
2451          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
2452 }
2453 
2454 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) {
2455   int Cost = 0;
2456   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) {
2457     if (Call.isByValArgument(I)) {
2458       // We approximate the number of loads and stores needed by dividing the
2459       // size of the byval type by the target's pointer size.
2460       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2461       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
2462       unsigned AS = PTy->getAddressSpace();
2463       unsigned PointerSize = DL.getPointerSizeInBits(AS);
2464       // Ceiling division.
2465       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
2466 
2467       // If it generates more than 8 stores it is likely to be expanded as an
2468       // inline memcpy so we take that as an upper bound. Otherwise we assume
2469       // one load and one store per word copied.
2470       // FIXME: The maxStoresPerMemcpy setting from the target should be used
2471       // here instead of a magic number of 8, but it's not available via
2472       // DataLayout.
2473       NumStores = std::min(NumStores, 8U);
2474 
2475       Cost += 2 * NumStores * InlineConstants::InstrCost;
2476     } else {
2477       // For non-byval arguments subtract off one instruction per call
2478       // argument.
2479       Cost += InlineConstants::InstrCost;
2480     }
2481   }
2482   // The call instruction also disappears after inlining.
2483   Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
2484   return Cost;
2485 }
2486 
2487 InlineCost llvm::getInlineCost(
2488     CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
2489     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2490     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2491     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2492     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2493   return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI,
2494                        GetAssumptionCache, GetTLI, GetBFI, PSI, ORE);
2495 }
2496 
2497 Optional<int> llvm::getInliningCostEstimate(
2498     CallBase &Call, TargetTransformInfo &CalleeTTI,
2499     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2500     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2501     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2502   const InlineParams Params = {/* DefaultThreshold*/ 0,
2503                                /*HintThreshold*/ {},
2504                                /*ColdThreshold*/ {},
2505                                /*OptSizeThreshold*/ {},
2506                                /*OptMinSizeThreshold*/ {},
2507                                /*HotCallSiteThreshold*/ {},
2508                                /*LocallyHotCallSiteThreshold*/ {},
2509                                /*ColdCallSiteThreshold*/ {},
2510                                /*ComputeFullInlineCost*/ true,
2511                                /*EnableDeferral*/ true};
2512 
2513   InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI,
2514                             GetAssumptionCache, GetBFI, PSI, ORE, true,
2515                             /*IgnoreThreshold*/ true);
2516   auto R = CA.analyze();
2517   if (!R.isSuccess())
2518     return None;
2519   return CA.getCost();
2520 }
2521 
2522 Optional<InlineResult> llvm::getAttributeBasedInliningDecision(
2523     CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI,
2524     function_ref<const TargetLibraryInfo &(Function &)> GetTLI) {
2525 
2526   // Cannot inline indirect calls.
2527   if (!Callee)
2528     return InlineResult::failure("indirect call");
2529 
2530   // When callee coroutine function is inlined into caller coroutine function
2531   // before coro-split pass,
2532   // coro-early pass can not handle this quiet well.
2533   // So we won't inline the coroutine function if it have not been unsplited
2534   if (Callee->isPresplitCoroutine())
2535     return InlineResult::failure("unsplited coroutine call");
2536 
2537   // Never inline calls with byval arguments that does not have the alloca
2538   // address space. Since byval arguments can be replaced with a copy to an
2539   // alloca, the inlined code would need to be adjusted to handle that the
2540   // argument is in the alloca address space (so it is a little bit complicated
2541   // to solve).
2542   unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace();
2543   for (unsigned I = 0, E = Call.arg_size(); I != E; ++I)
2544     if (Call.isByValArgument(I)) {
2545       PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType());
2546       if (PTy->getAddressSpace() != AllocaAS)
2547         return InlineResult::failure("byval arguments without alloca"
2548                                      " address space");
2549     }
2550 
2551   // Calls to functions with always-inline attributes should be inlined
2552   // whenever possible.
2553   if (Call.hasFnAttr(Attribute::AlwaysInline)) {
2554     auto IsViable = isInlineViable(*Callee);
2555     if (IsViable.isSuccess())
2556       return InlineResult::success();
2557     return InlineResult::failure(IsViable.getFailureReason());
2558   }
2559 
2560   // Never inline functions with conflicting attributes (unless callee has
2561   // always-inline attribute).
2562   Function *Caller = Call.getCaller();
2563   if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI))
2564     return InlineResult::failure("conflicting attributes");
2565 
2566   // Don't inline this call if the caller has the optnone attribute.
2567   if (Caller->hasOptNone())
2568     return InlineResult::failure("optnone attribute");
2569 
2570   // Don't inline a function that treats null pointer as valid into a caller
2571   // that does not have this attribute.
2572   if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined())
2573     return InlineResult::failure("nullptr definitions incompatible");
2574 
2575   // Don't inline functions which can be interposed at link-time.
2576   if (Callee->isInterposable())
2577     return InlineResult::failure("interposable");
2578 
2579   // Don't inline functions marked noinline.
2580   if (Callee->hasFnAttribute(Attribute::NoInline))
2581     return InlineResult::failure("noinline function attribute");
2582 
2583   // Don't inline call sites marked noinline.
2584   if (Call.isNoInline())
2585     return InlineResult::failure("noinline call site attribute");
2586 
2587   // Don't inline functions if one does not have any stack protector attribute
2588   // but the other does.
2589   if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr())
2590     return InlineResult::failure(
2591         "stack protected caller but callee requested no stack protector");
2592   if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr())
2593     return InlineResult::failure(
2594         "stack protected callee but caller requested no stack protector");
2595 
2596   return None;
2597 }
2598 
2599 InlineCost llvm::getInlineCost(
2600     CallBase &Call, Function *Callee, const InlineParams &Params,
2601     TargetTransformInfo &CalleeTTI,
2602     function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
2603     function_ref<const TargetLibraryInfo &(Function &)> GetTLI,
2604     function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2605     ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) {
2606 
2607   auto UserDecision =
2608       llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI);
2609 
2610   if (UserDecision.hasValue()) {
2611     if (UserDecision->isSuccess())
2612       return llvm::InlineCost::getAlways("always inline attribute");
2613     return llvm::InlineCost::getNever(UserDecision->getFailureReason());
2614   }
2615 
2616   LLVM_DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
2617                           << "... (caller:" << Call.getCaller()->getName()
2618                           << ")\n");
2619 
2620   InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI,
2621                             GetAssumptionCache, GetBFI, PSI, ORE);
2622   InlineResult ShouldInline = CA.analyze();
2623 
2624   LLVM_DEBUG(CA.dump());
2625 
2626   // Always make cost benefit based decision explicit.
2627   // We use always/never here since threshold is not meaningful,
2628   // as it's not what drives cost-benefit analysis.
2629   if (CA.wasDecidedByCostBenefit()) {
2630     if (ShouldInline.isSuccess())
2631       return InlineCost::getAlways("benefit over cost");
2632     else
2633       return InlineCost::getNever("cost over benefit");
2634   }
2635 
2636   // Check if there was a reason to force inlining or no inlining.
2637   if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold())
2638     return InlineCost::getNever(ShouldInline.getFailureReason());
2639   if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold())
2640     return InlineCost::getAlways("empty function");
2641 
2642   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
2643 }
2644 
2645 InlineResult llvm::isInlineViable(Function &F) {
2646   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
2647   for (BasicBlock &BB : F) {
2648     // Disallow inlining of functions which contain indirect branches.
2649     if (isa<IndirectBrInst>(BB.getTerminator()))
2650       return InlineResult::failure("contains indirect branches");
2651 
2652     // Disallow inlining of blockaddresses which are used by non-callbr
2653     // instructions.
2654     if (BB.hasAddressTaken())
2655       for (User *U : BlockAddress::get(&BB)->users())
2656         if (!isa<CallBrInst>(*U))
2657           return InlineResult::failure("blockaddress used outside of callbr");
2658 
2659     for (auto &II : BB) {
2660       CallBase *Call = dyn_cast<CallBase>(&II);
2661       if (!Call)
2662         continue;
2663 
2664       // Disallow recursive calls.
2665       Function *Callee = Call->getCalledFunction();
2666       if (&F == Callee)
2667         return InlineResult::failure("recursive call");
2668 
2669       // Disallow calls which expose returns-twice to a function not previously
2670       // attributed as such.
2671       if (!ReturnsTwice && isa<CallInst>(Call) &&
2672           cast<CallInst>(Call)->canReturnTwice())
2673         return InlineResult::failure("exposes returns-twice attribute");
2674 
2675       if (Callee)
2676         switch (Callee->getIntrinsicID()) {
2677         default:
2678           break;
2679         case llvm::Intrinsic::icall_branch_funnel:
2680           // Disallow inlining of @llvm.icall.branch.funnel because current
2681           // backend can't separate call targets from call arguments.
2682           return InlineResult::failure(
2683               "disallowed inlining of @llvm.icall.branch.funnel");
2684         case llvm::Intrinsic::localescape:
2685           // Disallow inlining functions that call @llvm.localescape. Doing this
2686           // correctly would require major changes to the inliner.
2687           return InlineResult::failure(
2688               "disallowed inlining of @llvm.localescape");
2689         case llvm::Intrinsic::vastart:
2690           // Disallow inlining of functions that initialize VarArgs with
2691           // va_start.
2692           return InlineResult::failure(
2693               "contains VarArgs initialized with va_start");
2694         }
2695     }
2696   }
2697 
2698   return InlineResult::success();
2699 }
2700 
2701 // APIs to create InlineParams based on command line flags and/or other
2702 // parameters.
2703 
2704 InlineParams llvm::getInlineParams(int Threshold) {
2705   InlineParams Params;
2706 
2707   // This field is the threshold to use for a callee by default. This is
2708   // derived from one or more of:
2709   //  * optimization or size-optimization levels,
2710   //  * a value passed to createFunctionInliningPass function, or
2711   //  * the -inline-threshold flag.
2712   //  If the -inline-threshold flag is explicitly specified, that is used
2713   //  irrespective of anything else.
2714   if (InlineThreshold.getNumOccurrences() > 0)
2715     Params.DefaultThreshold = InlineThreshold;
2716   else
2717     Params.DefaultThreshold = Threshold;
2718 
2719   // Set the HintThreshold knob from the -inlinehint-threshold.
2720   Params.HintThreshold = HintThreshold;
2721 
2722   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
2723   Params.HotCallSiteThreshold = HotCallSiteThreshold;
2724 
2725   // If the -locally-hot-callsite-threshold is explicitly specified, use it to
2726   // populate LocallyHotCallSiteThreshold. Later, we populate
2727   // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if
2728   // we know that optimization level is O3 (in the getInlineParams variant that
2729   // takes the opt and size levels).
2730   // FIXME: Remove this check (and make the assignment unconditional) after
2731   // addressing size regression issues at O2.
2732   if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0)
2733     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2734 
2735   // Set the ColdCallSiteThreshold knob from the
2736   // -inline-cold-callsite-threshold.
2737   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
2738 
2739   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
2740   // -inlinehint-threshold commandline option is not explicitly given. If that
2741   // option is present, then its value applies even for callees with size and
2742   // minsize attributes.
2743   // If the -inline-threshold is not specified, set the ColdThreshold from the
2744   // -inlinecold-threshold even if it is not explicitly passed. If
2745   // -inline-threshold is specified, then -inlinecold-threshold needs to be
2746   // explicitly specified to set the ColdThreshold knob
2747   if (InlineThreshold.getNumOccurrences() == 0) {
2748     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
2749     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
2750     Params.ColdThreshold = ColdThreshold;
2751   } else if (ColdThreshold.getNumOccurrences() > 0) {
2752     Params.ColdThreshold = ColdThreshold;
2753   }
2754   return Params;
2755 }
2756 
2757 InlineParams llvm::getInlineParams() {
2758   return getInlineParams(DefaultThreshold);
2759 }
2760 
2761 // Compute the default threshold for inlining based on the opt level and the
2762 // size opt level.
2763 static int computeThresholdFromOptLevels(unsigned OptLevel,
2764                                          unsigned SizeOptLevel) {
2765   if (OptLevel > 2)
2766     return InlineConstants::OptAggressiveThreshold;
2767   if (SizeOptLevel == 1) // -Os
2768     return InlineConstants::OptSizeThreshold;
2769   if (SizeOptLevel == 2) // -Oz
2770     return InlineConstants::OptMinSizeThreshold;
2771   return DefaultThreshold;
2772 }
2773 
2774 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
2775   auto Params =
2776       getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
2777   // At O3, use the value of -locally-hot-callsite-threshold option to populate
2778   // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only
2779   // when it is specified explicitly.
2780   if (OptLevel > 2)
2781     Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold;
2782   return Params;
2783 }
2784 
2785 PreservedAnalyses
2786 InlineCostAnnotationPrinterPass::run(Function &F,
2787                                      FunctionAnalysisManager &FAM) {
2788   PrintInstructionComments = true;
2789   std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&](
2790       Function &F) -> AssumptionCache & {
2791     return FAM.getResult<AssumptionAnalysis>(F);
2792   };
2793   Module *M = F.getParent();
2794   ProfileSummaryInfo PSI(*M);
2795   DataLayout DL(M);
2796   TargetTransformInfo TTI(DL);
2797   // FIXME: Redesign the usage of InlineParams to expand the scope of this pass.
2798   // In the current implementation, the type of InlineParams doesn't matter as
2799   // the pass serves only for verification of inliner's decisions.
2800   // We can add a flag which determines InlineParams for this run. Right now,
2801   // the default InlineParams are used.
2802   const InlineParams Params = llvm::getInlineParams();
2803   for (BasicBlock &BB : F) {
2804     for (Instruction &I : BB) {
2805       if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2806         Function *CalledFunction = CI->getCalledFunction();
2807         if (!CalledFunction || CalledFunction->isDeclaration())
2808           continue;
2809         OptimizationRemarkEmitter ORE(CalledFunction);
2810         InlineCostCallAnalyzer ICCA(*CalledFunction, *CI, Params, TTI,
2811                                     GetAssumptionCache, nullptr, &PSI, &ORE);
2812         ICCA.analyze();
2813         OS << "      Analyzing call of " << CalledFunction->getName()
2814            << "... (caller:" << CI->getCaller()->getName() << ")\n";
2815         ICCA.print();
2816       }
2817     }
2818   }
2819   return PreservedAnalyses::all();
2820 }
2821