1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements inline cost analysis.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/InlineCost.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/BlockFrequencyInfo.h"
22 #include "llvm/Analysis/CodeMetrics.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/ProfileSummaryInfo.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/GetElementPtrTypeIterator.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/InstVisitor.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "inline-cost"
41 
42 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
43 
44 static cl::opt<int> InlineThreshold(
45     "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
46     cl::desc("Control the amount of inlining to perform (default = 225)"));
47 
48 static cl::opt<int> HintThreshold(
49     "inlinehint-threshold", cl::Hidden, cl::init(325),
50     cl::desc("Threshold for inlining functions with inline hint"));
51 
52 static cl::opt<int>
53     ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
54                           cl::init(45),
55                           cl::desc("Threshold for inlining cold callsites"));
56 
57 // We introduce this threshold to help performance of instrumentation based
58 // PGO before we actually hook up inliner with analysis passes such as BPI and
59 // BFI.
60 static cl::opt<int> ColdThreshold(
61     "inlinecold-threshold", cl::Hidden, cl::init(225),
62     cl::desc("Threshold for inlining functions with cold attribute"));
63 
64 static cl::opt<int>
65     HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
66                          cl::ZeroOrMore,
67                          cl::desc("Threshold for hot callsites "));
68 
69 namespace {
70 
71 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
72   typedef InstVisitor<CallAnalyzer, bool> Base;
73   friend class InstVisitor<CallAnalyzer, bool>;
74 
75   /// The TargetTransformInfo available for this compilation.
76   const TargetTransformInfo &TTI;
77 
78   /// Getter for the cache of @llvm.assume intrinsics.
79   std::function<AssumptionCache &(Function &)> &GetAssumptionCache;
80 
81   /// Getter for BlockFrequencyInfo
82   Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI;
83 
84   /// Profile summary information.
85   ProfileSummaryInfo *PSI;
86 
87   /// The called function.
88   Function &F;
89 
90   /// The candidate callsite being analyzed. Please do not use this to do
91   /// analysis in the caller function; we want the inline cost query to be
92   /// easily cacheable. Instead, use the cover function paramHasAttr.
93   CallSite CandidateCS;
94 
95   /// Tunable parameters that control the analysis.
96   const InlineParams &Params;
97 
98   int Threshold;
99   int Cost;
100 
101   bool IsCallerRecursive;
102   bool IsRecursiveCall;
103   bool ExposesReturnsTwice;
104   bool HasDynamicAlloca;
105   bool ContainsNoDuplicateCall;
106   bool HasReturn;
107   bool HasIndirectBr;
108   bool HasFrameEscape;
109 
110   /// Number of bytes allocated statically by the callee.
111   uint64_t AllocatedSize;
112   unsigned NumInstructions, NumVectorInstructions;
113   int FiftyPercentVectorBonus, TenPercentVectorBonus;
114   int VectorBonus;
115 
116   /// While we walk the potentially-inlined instructions, we build up and
117   /// maintain a mapping of simplified values specific to this callsite. The
118   /// idea is to propagate any special information we have about arguments to
119   /// this call through the inlinable section of the function, and account for
120   /// likely simplifications post-inlining. The most important aspect we track
121   /// is CFG altering simplifications -- when we prove a basic block dead, that
122   /// can cause dramatic shifts in the cost of inlining a function.
123   DenseMap<Value *, Constant *> SimplifiedValues;
124 
125   /// Keep track of the values which map back (through function arguments) to
126   /// allocas on the caller stack which could be simplified through SROA.
127   DenseMap<Value *, Value *> SROAArgValues;
128 
129   /// The mapping of caller Alloca values to their accumulated cost savings. If
130   /// we have to disable SROA for one of the allocas, this tells us how much
131   /// cost must be added.
132   DenseMap<Value *, int> SROAArgCosts;
133 
134   /// Keep track of values which map to a pointer base and constant offset.
135   DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
136 
137   // Custom simplification helper routines.
138   bool isAllocaDerivedArg(Value *V);
139   bool lookupSROAArgAndCost(Value *V, Value *&Arg,
140                             DenseMap<Value *, int>::iterator &CostIt);
141   void disableSROA(DenseMap<Value *, int>::iterator CostIt);
142   void disableSROA(Value *V);
143   void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
144                           int InstructionCost);
145   bool isGEPOffsetConstant(GetElementPtrInst &GEP);
146   bool isGEPFree(GetElementPtrInst &GEP);
147   bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
148   bool simplifyCallSite(Function *F, CallSite CS);
149   ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
150 
151   /// Return true if the given argument to the function being considered for
152   /// inlining has the given attribute set either at the call site or the
153   /// function declaration.  Primarily used to inspect call site specific
154   /// attributes since these can be more precise than the ones on the callee
155   /// itself.
156   bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
157 
158   /// Return true if the given value is known non null within the callee if
159   /// inlined through this particular callsite.
160   bool isKnownNonNullInCallee(Value *V);
161 
162   /// Update Threshold based on callsite properties such as callee
163   /// attributes and callee hotness for PGO builds. The Callee is explicitly
164   /// passed to support analyzing indirect calls whose target is inferred by
165   /// analysis.
166   void updateThreshold(CallSite CS, Function &Callee);
167 
168   /// Return true if size growth is allowed when inlining the callee at CS.
169   bool allowSizeGrowth(CallSite CS);
170 
171   // Custom analysis routines.
172   bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues);
173 
174   // Disable several entry points to the visitor so we don't accidentally use
175   // them by declaring but not defining them here.
176   void visit(Module *);
177   void visit(Module &);
178   void visit(Function *);
179   void visit(Function &);
180   void visit(BasicBlock *);
181   void visit(BasicBlock &);
182 
183   // Provide base case for our instruction visit.
184   bool visitInstruction(Instruction &I);
185 
186   // Our visit overrides.
187   bool visitAlloca(AllocaInst &I);
188   bool visitPHI(PHINode &I);
189   bool visitGetElementPtr(GetElementPtrInst &I);
190   bool visitBitCast(BitCastInst &I);
191   bool visitPtrToInt(PtrToIntInst &I);
192   bool visitIntToPtr(IntToPtrInst &I);
193   bool visitCastInst(CastInst &I);
194   bool visitUnaryInstruction(UnaryInstruction &I);
195   bool visitCmpInst(CmpInst &I);
196   bool visitSub(BinaryOperator &I);
197   bool visitBinaryOperator(BinaryOperator &I);
198   bool visitLoad(LoadInst &I);
199   bool visitStore(StoreInst &I);
200   bool visitExtractValue(ExtractValueInst &I);
201   bool visitInsertValue(InsertValueInst &I);
202   bool visitCallSite(CallSite CS);
203   bool visitReturnInst(ReturnInst &RI);
204   bool visitBranchInst(BranchInst &BI);
205   bool visitSwitchInst(SwitchInst &SI);
206   bool visitIndirectBrInst(IndirectBrInst &IBI);
207   bool visitResumeInst(ResumeInst &RI);
208   bool visitCleanupReturnInst(CleanupReturnInst &RI);
209   bool visitCatchReturnInst(CatchReturnInst &RI);
210   bool visitUnreachableInst(UnreachableInst &I);
211 
212 public:
213   CallAnalyzer(const TargetTransformInfo &TTI,
214                std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
215                Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI,
216                ProfileSummaryInfo *PSI, Function &Callee, CallSite CSArg,
217                const InlineParams &Params)
218       : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
219         PSI(PSI), F(Callee), CandidateCS(CSArg), Params(Params),
220         Threshold(Params.DefaultThreshold), Cost(0), IsCallerRecursive(false),
221         IsRecursiveCall(false), ExposesReturnsTwice(false),
222         HasDynamicAlloca(false), ContainsNoDuplicateCall(false),
223         HasReturn(false), HasIndirectBr(false), HasFrameEscape(false),
224         AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
225         FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
226         NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
227         NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
228         NumInstructionsSimplified(0), SROACostSavings(0),
229         SROACostSavingsLost(0) {}
230 
231   bool analyzeCall(CallSite CS);
232 
233   int getThreshold() { return Threshold; }
234   int getCost() { return Cost; }
235 
236   // Keep a bunch of stats about the cost savings found so we can print them
237   // out when debugging.
238   unsigned NumConstantArgs;
239   unsigned NumConstantOffsetPtrArgs;
240   unsigned NumAllocaArgs;
241   unsigned NumConstantPtrCmps;
242   unsigned NumConstantPtrDiffs;
243   unsigned NumInstructionsSimplified;
244   unsigned SROACostSavings;
245   unsigned SROACostSavingsLost;
246 
247   void dump();
248 };
249 
250 } // namespace
251 
252 /// \brief Test whether the given value is an Alloca-derived function argument.
253 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
254   return SROAArgValues.count(V);
255 }
256 
257 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
258 /// Returns false if V does not map to a SROA-candidate.
259 bool CallAnalyzer::lookupSROAArgAndCost(
260     Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
261   if (SROAArgValues.empty() || SROAArgCosts.empty())
262     return false;
263 
264   DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
265   if (ArgIt == SROAArgValues.end())
266     return false;
267 
268   Arg = ArgIt->second;
269   CostIt = SROAArgCosts.find(Arg);
270   return CostIt != SROAArgCosts.end();
271 }
272 
273 /// \brief Disable SROA for the candidate marked by this cost iterator.
274 ///
275 /// This marks the candidate as no longer viable for SROA, and adds the cost
276 /// savings associated with it back into the inline cost measurement.
277 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
278   // If we're no longer able to perform SROA we need to undo its cost savings
279   // and prevent subsequent analysis.
280   Cost += CostIt->second;
281   SROACostSavings -= CostIt->second;
282   SROACostSavingsLost += CostIt->second;
283   SROAArgCosts.erase(CostIt);
284 }
285 
286 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
287 void CallAnalyzer::disableSROA(Value *V) {
288   Value *SROAArg;
289   DenseMap<Value *, int>::iterator CostIt;
290   if (lookupSROAArgAndCost(V, SROAArg, CostIt))
291     disableSROA(CostIt);
292 }
293 
294 /// \brief Accumulate the given cost for a particular SROA candidate.
295 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
296                                       int InstructionCost) {
297   CostIt->second += InstructionCost;
298   SROACostSavings += InstructionCost;
299 }
300 
301 /// \brief Check whether a GEP's indices are all constant.
302 ///
303 /// Respects any simplified values known during the analysis of this callsite.
304 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
305   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
306     if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
307       return false;
308 
309   return true;
310 }
311 
312 /// \brief Accumulate a constant GEP offset into an APInt if possible.
313 ///
314 /// Returns false if unable to compute the offset for any reason. Respects any
315 /// simplified values known during the analysis of this callsite.
316 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
317   const DataLayout &DL = F.getParent()->getDataLayout();
318   unsigned IntPtrWidth = DL.getPointerSizeInBits();
319   assert(IntPtrWidth == Offset.getBitWidth());
320 
321   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
322        GTI != GTE; ++GTI) {
323     ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
324     if (!OpC)
325       if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
326         OpC = dyn_cast<ConstantInt>(SimpleOp);
327     if (!OpC)
328       return false;
329     if (OpC->isZero())
330       continue;
331 
332     // Handle a struct index, which adds its field offset to the pointer.
333     if (StructType *STy = GTI.getStructTypeOrNull()) {
334       unsigned ElementIdx = OpC->getZExtValue();
335       const StructLayout *SL = DL.getStructLayout(STy);
336       Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
337       continue;
338     }
339 
340     APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
341     Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
342   }
343   return true;
344 }
345 
346 /// \brief Use TTI to check whether a GEP is free.
347 ///
348 /// Respects any simplified values known during the analysis of this callsite.
349 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) {
350   SmallVector<Value *, 4> Indices;
351   for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
352     if (Constant *SimpleOp = SimplifiedValues.lookup(*I))
353        Indices.push_back(SimpleOp);
354      else
355        Indices.push_back(*I);
356   return TargetTransformInfo::TCC_Free ==
357          TTI.getGEPCost(GEP.getSourceElementType(), GEP.getPointerOperand(),
358                         Indices);
359 }
360 
361 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
362   // Check whether inlining will turn a dynamic alloca into a static
363   // alloca and handle that case.
364   if (I.isArrayAllocation()) {
365     Constant *Size = SimplifiedValues.lookup(I.getArraySize());
366     if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) {
367       const DataLayout &DL = F.getParent()->getDataLayout();
368       Type *Ty = I.getAllocatedType();
369       AllocatedSize = SaturatingMultiplyAdd(
370           AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty), AllocatedSize);
371       return Base::visitAlloca(I);
372     }
373   }
374 
375   // Accumulate the allocated size.
376   if (I.isStaticAlloca()) {
377     const DataLayout &DL = F.getParent()->getDataLayout();
378     Type *Ty = I.getAllocatedType();
379     AllocatedSize = SaturatingAdd(DL.getTypeAllocSize(Ty), AllocatedSize);
380   }
381 
382   // We will happily inline static alloca instructions.
383   if (I.isStaticAlloca())
384     return Base::visitAlloca(I);
385 
386   // FIXME: This is overly conservative. Dynamic allocas are inefficient for
387   // a variety of reasons, and so we would like to not inline them into
388   // functions which don't currently have a dynamic alloca. This simply
389   // disables inlining altogether in the presence of a dynamic alloca.
390   HasDynamicAlloca = true;
391   return false;
392 }
393 
394 bool CallAnalyzer::visitPHI(PHINode &I) {
395   // FIXME: We should potentially be tracking values through phi nodes,
396   // especially when they collapse to a single value due to deleted CFG edges
397   // during inlining.
398 
399   // FIXME: We need to propagate SROA *disabling* through phi nodes, even
400   // though we don't want to propagate it's bonuses. The idea is to disable
401   // SROA if it *might* be used in an inappropriate manner.
402 
403   // Phi nodes are always zero-cost.
404   return true;
405 }
406 
407 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
408   Value *SROAArg;
409   DenseMap<Value *, int>::iterator CostIt;
410   bool SROACandidate =
411       lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt);
412 
413   // Try to fold GEPs of constant-offset call site argument pointers. This
414   // requires target data and inbounds GEPs.
415   if (I.isInBounds()) {
416     // Check if we have a base + offset for the pointer.
417     Value *Ptr = I.getPointerOperand();
418     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
419     if (BaseAndOffset.first) {
420       // Check if the offset of this GEP is constant, and if so accumulate it
421       // into Offset.
422       if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
423         // Non-constant GEPs aren't folded, and disable SROA.
424         if (SROACandidate)
425           disableSROA(CostIt);
426         return isGEPFree(I);
427       }
428 
429       // Add the result as a new mapping to Base + Offset.
430       ConstantOffsetPtrs[&I] = BaseAndOffset;
431 
432       // Also handle SROA candidates here, we already know that the GEP is
433       // all-constant indexed.
434       if (SROACandidate)
435         SROAArgValues[&I] = SROAArg;
436 
437       return true;
438     }
439   }
440 
441   if (isGEPOffsetConstant(I)) {
442     if (SROACandidate)
443       SROAArgValues[&I] = SROAArg;
444 
445     // Constant GEPs are modeled as free.
446     return true;
447   }
448 
449   // Variable GEPs will require math and will disable SROA.
450   if (SROACandidate)
451     disableSROA(CostIt);
452   return isGEPFree(I);
453 }
454 
455 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
456   // Propagate constants through bitcasts.
457   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
458   if (!COp)
459     COp = SimplifiedValues.lookup(I.getOperand(0));
460   if (COp)
461     if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
462       SimplifiedValues[&I] = C;
463       return true;
464     }
465 
466   // Track base/offsets through casts
467   std::pair<Value *, APInt> BaseAndOffset =
468       ConstantOffsetPtrs.lookup(I.getOperand(0));
469   // Casts don't change the offset, just wrap it up.
470   if (BaseAndOffset.first)
471     ConstantOffsetPtrs[&I] = BaseAndOffset;
472 
473   // Also look for SROA candidates here.
474   Value *SROAArg;
475   DenseMap<Value *, int>::iterator CostIt;
476   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
477     SROAArgValues[&I] = SROAArg;
478 
479   // Bitcasts are always zero cost.
480   return true;
481 }
482 
483 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
484   // Propagate constants through ptrtoint.
485   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
486   if (!COp)
487     COp = SimplifiedValues.lookup(I.getOperand(0));
488   if (COp)
489     if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
490       SimplifiedValues[&I] = C;
491       return true;
492     }
493 
494   // Track base/offset pairs when converted to a plain integer provided the
495   // integer is large enough to represent the pointer.
496   unsigned IntegerSize = I.getType()->getScalarSizeInBits();
497   const DataLayout &DL = F.getParent()->getDataLayout();
498   if (IntegerSize >= DL.getPointerSizeInBits()) {
499     std::pair<Value *, APInt> BaseAndOffset =
500         ConstantOffsetPtrs.lookup(I.getOperand(0));
501     if (BaseAndOffset.first)
502       ConstantOffsetPtrs[&I] = BaseAndOffset;
503   }
504 
505   // This is really weird. Technically, ptrtoint will disable SROA. However,
506   // unless that ptrtoint is *used* somewhere in the live basic blocks after
507   // inlining, it will be nuked, and SROA should proceed. All of the uses which
508   // would block SROA would also block SROA if applied directly to a pointer,
509   // and so we can just add the integer in here. The only places where SROA is
510   // preserved either cannot fire on an integer, or won't in-and-of themselves
511   // disable SROA (ext) w/o some later use that we would see and disable.
512   Value *SROAArg;
513   DenseMap<Value *, int>::iterator CostIt;
514   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
515     SROAArgValues[&I] = SROAArg;
516 
517   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
518 }
519 
520 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
521   // Propagate constants through ptrtoint.
522   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
523   if (!COp)
524     COp = SimplifiedValues.lookup(I.getOperand(0));
525   if (COp)
526     if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
527       SimplifiedValues[&I] = C;
528       return true;
529     }
530 
531   // Track base/offset pairs when round-tripped through a pointer without
532   // modifications provided the integer is not too large.
533   Value *Op = I.getOperand(0);
534   unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
535   const DataLayout &DL = F.getParent()->getDataLayout();
536   if (IntegerSize <= DL.getPointerSizeInBits()) {
537     std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
538     if (BaseAndOffset.first)
539       ConstantOffsetPtrs[&I] = BaseAndOffset;
540   }
541 
542   // "Propagate" SROA here in the same manner as we do for ptrtoint above.
543   Value *SROAArg;
544   DenseMap<Value *, int>::iterator CostIt;
545   if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
546     SROAArgValues[&I] = SROAArg;
547 
548   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
549 }
550 
551 bool CallAnalyzer::visitCastInst(CastInst &I) {
552   // Propagate constants through ptrtoint.
553   Constant *COp = dyn_cast<Constant>(I.getOperand(0));
554   if (!COp)
555     COp = SimplifiedValues.lookup(I.getOperand(0));
556   if (COp)
557     if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
558       SimplifiedValues[&I] = C;
559       return true;
560     }
561 
562   // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
563   disableSROA(I.getOperand(0));
564 
565   return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
566 }
567 
568 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
569   Value *Operand = I.getOperand(0);
570   Constant *COp = dyn_cast<Constant>(Operand);
571   if (!COp)
572     COp = SimplifiedValues.lookup(Operand);
573   if (COp) {
574     const DataLayout &DL = F.getParent()->getDataLayout();
575     if (Constant *C = ConstantFoldInstOperands(&I, COp, DL)) {
576       SimplifiedValues[&I] = C;
577       return true;
578     }
579   }
580 
581   // Disable any SROA on the argument to arbitrary unary operators.
582   disableSROA(Operand);
583 
584   return false;
585 }
586 
587 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) {
588   unsigned ArgNo = A->getArgNo();
589   return CandidateCS.paramHasAttr(ArgNo + 1, Attr);
590 }
591 
592 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) {
593   // Does the *call site* have the NonNull attribute set on an argument?  We
594   // use the attribute on the call site to memoize any analysis done in the
595   // caller. This will also trip if the callee function has a non-null
596   // parameter attribute, but that's a less interesting case because hopefully
597   // the callee would already have been simplified based on that.
598   if (Argument *A = dyn_cast<Argument>(V))
599     if (paramHasAttr(A, Attribute::NonNull))
600       return true;
601 
602   // Is this an alloca in the caller?  This is distinct from the attribute case
603   // above because attributes aren't updated within the inliner itself and we
604   // always want to catch the alloca derived case.
605   if (isAllocaDerivedArg(V))
606     // We can actually predict the result of comparisons between an
607     // alloca-derived value and null. Note that this fires regardless of
608     // SROA firing.
609     return true;
610 
611   return false;
612 }
613 
614 bool CallAnalyzer::allowSizeGrowth(CallSite CS) {
615   // If the normal destination of the invoke or the parent block of the call
616   // site is unreachable-terminated, there is little point in inlining this
617   // unless there is literally zero cost.
618   // FIXME: Note that it is possible that an unreachable-terminated block has a
619   // hot entry. For example, in below scenario inlining hot_call_X() may be
620   // beneficial :
621   // main() {
622   //   hot_call_1();
623   //   ...
624   //   hot_call_N()
625   //   exit(0);
626   // }
627   // For now, we are not handling this corner case here as it is rare in real
628   // code. In future, we should elaborate this based on BPI and BFI in more
629   // general threshold adjusting heuristics in updateThreshold().
630   Instruction *Instr = CS.getInstruction();
631   if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
632     if (isa<UnreachableInst>(II->getNormalDest()->getTerminator()))
633       return false;
634   } else if (isa<UnreachableInst>(Instr->getParent()->getTerminator()))
635     return false;
636 
637   return true;
638 }
639 
640 void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
641   // If no size growth is allowed for this inlining, set Threshold to 0.
642   if (!allowSizeGrowth(CS)) {
643     Threshold = 0;
644     return;
645   }
646 
647   Function *Caller = CS.getCaller();
648 
649   // return min(A, B) if B is valid.
650   auto MinIfValid = [](int A, Optional<int> B) {
651     return B ? std::min(A, B.getValue()) : A;
652   };
653 
654   // return max(A, B) if B is valid.
655   auto MaxIfValid = [](int A, Optional<int> B) {
656     return B ? std::max(A, B.getValue()) : A;
657   };
658 
659   // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available
660   // and reduce the threshold if the caller has the necessary attribute.
661   if (Caller->optForMinSize())
662     Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold);
663   else if (Caller->optForSize())
664     Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
665 
666   // Adjust the threshold based on inlinehint attribute and profile based
667   // hotness information if the caller does not have MinSize attribute.
668   if (!Caller->optForMinSize()) {
669     if (Callee.hasFnAttribute(Attribute::InlineHint))
670       Threshold = MaxIfValid(Threshold, Params.HintThreshold);
671     if (PSI) {
672       BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr;
673       if (PSI->isHotCallSite(CS, CallerBFI)) {
674         DEBUG(dbgs() << "Hot callsite.\n");
675         Threshold = MaxIfValid(Threshold, Params.HotCallSiteThreshold);
676       } else if (PSI->isFunctionEntryHot(&Callee)) {
677         DEBUG(dbgs() << "Hot callee.\n");
678         // If callsite hotness can not be determined, we may still know
679         // that the callee is hot and treat it as a weaker hint for threshold
680         // increase.
681         Threshold = MaxIfValid(Threshold, Params.HintThreshold);
682       } else if (PSI->isColdCallSite(CS, CallerBFI)) {
683         DEBUG(dbgs() << "Cold callsite.\n");
684         Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold);
685       } else if (PSI->isFunctionEntryCold(&Callee)) {
686         DEBUG(dbgs() << "Cold callee.\n");
687         Threshold = MinIfValid(Threshold, Params.ColdThreshold);
688       }
689     }
690   }
691 
692   // Finally, take the target-specific inlining threshold multiplier into
693   // account.
694   Threshold *= TTI.getInliningThresholdMultiplier();
695 }
696 
697 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
698   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
699   // First try to handle simplified comparisons.
700   if (!isa<Constant>(LHS))
701     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
702       LHS = SimpleLHS;
703   if (!isa<Constant>(RHS))
704     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
705       RHS = SimpleRHS;
706   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
707     if (Constant *CRHS = dyn_cast<Constant>(RHS))
708       if (Constant *C =
709               ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
710         SimplifiedValues[&I] = C;
711         return true;
712       }
713   }
714 
715   if (I.getOpcode() == Instruction::FCmp)
716     return false;
717 
718   // Otherwise look for a comparison between constant offset pointers with
719   // a common base.
720   Value *LHSBase, *RHSBase;
721   APInt LHSOffset, RHSOffset;
722   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
723   if (LHSBase) {
724     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
725     if (RHSBase && LHSBase == RHSBase) {
726       // We have common bases, fold the icmp to a constant based on the
727       // offsets.
728       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
729       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
730       if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
731         SimplifiedValues[&I] = C;
732         ++NumConstantPtrCmps;
733         return true;
734       }
735     }
736   }
737 
738   // If the comparison is an equality comparison with null, we can simplify it
739   // if we know the value (argument) can't be null
740   if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) &&
741       isKnownNonNullInCallee(I.getOperand(0))) {
742     bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
743     SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
744                                       : ConstantInt::getFalse(I.getType());
745     return true;
746   }
747   // Finally check for SROA candidates in comparisons.
748   Value *SROAArg;
749   DenseMap<Value *, int>::iterator CostIt;
750   if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
751     if (isa<ConstantPointerNull>(I.getOperand(1))) {
752       accumulateSROACost(CostIt, InlineConstants::InstrCost);
753       return true;
754     }
755 
756     disableSROA(CostIt);
757   }
758 
759   return false;
760 }
761 
762 bool CallAnalyzer::visitSub(BinaryOperator &I) {
763   // Try to handle a special case: we can fold computing the difference of two
764   // constant-related pointers.
765   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
766   Value *LHSBase, *RHSBase;
767   APInt LHSOffset, RHSOffset;
768   std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
769   if (LHSBase) {
770     std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
771     if (RHSBase && LHSBase == RHSBase) {
772       // We have common bases, fold the subtract to a constant based on the
773       // offsets.
774       Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
775       Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
776       if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
777         SimplifiedValues[&I] = C;
778         ++NumConstantPtrDiffs;
779         return true;
780       }
781     }
782   }
783 
784   // Otherwise, fall back to the generic logic for simplifying and handling
785   // instructions.
786   return Base::visitSub(I);
787 }
788 
789 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
790   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
791   const DataLayout &DL = F.getParent()->getDataLayout();
792   if (!isa<Constant>(LHS))
793     if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
794       LHS = SimpleLHS;
795   if (!isa<Constant>(RHS))
796     if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
797       RHS = SimpleRHS;
798   Value *SimpleV = nullptr;
799   if (auto FI = dyn_cast<FPMathOperator>(&I))
800     SimpleV =
801         SimplifyFPBinOp(I.getOpcode(), LHS, RHS, FI->getFastMathFlags(), DL);
802   else
803     SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
804 
805   if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
806     SimplifiedValues[&I] = C;
807     return true;
808   }
809 
810   // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
811   disableSROA(LHS);
812   disableSROA(RHS);
813 
814   return false;
815 }
816 
817 bool CallAnalyzer::visitLoad(LoadInst &I) {
818   Value *SROAArg;
819   DenseMap<Value *, int>::iterator CostIt;
820   if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
821     if (I.isSimple()) {
822       accumulateSROACost(CostIt, InlineConstants::InstrCost);
823       return true;
824     }
825 
826     disableSROA(CostIt);
827   }
828 
829   return false;
830 }
831 
832 bool CallAnalyzer::visitStore(StoreInst &I) {
833   Value *SROAArg;
834   DenseMap<Value *, int>::iterator CostIt;
835   if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) {
836     if (I.isSimple()) {
837       accumulateSROACost(CostIt, InlineConstants::InstrCost);
838       return true;
839     }
840 
841     disableSROA(CostIt);
842   }
843 
844   return false;
845 }
846 
847 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
848   // Constant folding for extract value is trivial.
849   Constant *C = dyn_cast<Constant>(I.getAggregateOperand());
850   if (!C)
851     C = SimplifiedValues.lookup(I.getAggregateOperand());
852   if (C) {
853     SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
854     return true;
855   }
856 
857   // SROA can look through these but give them a cost.
858   return false;
859 }
860 
861 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
862   // Constant folding for insert value is trivial.
863   Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand());
864   if (!AggC)
865     AggC = SimplifiedValues.lookup(I.getAggregateOperand());
866   Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand());
867   if (!InsertedC)
868     InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
869   if (AggC && InsertedC) {
870     SimplifiedValues[&I] =
871         ConstantExpr::getInsertValue(AggC, InsertedC, I.getIndices());
872     return true;
873   }
874 
875   // SROA can look through these but give them a cost.
876   return false;
877 }
878 
879 /// \brief Try to simplify a call site.
880 ///
881 /// Takes a concrete function and callsite and tries to actually simplify it by
882 /// analyzing the arguments and call itself with instsimplify. Returns true if
883 /// it has simplified the callsite to some other entity (a constant), making it
884 /// free.
885 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
886   // FIXME: Using the instsimplify logic directly for this is inefficient
887   // because we have to continually rebuild the argument list even when no
888   // simplifications can be performed. Until that is fixed with remapping
889   // inside of instsimplify, directly constant fold calls here.
890   if (!canConstantFoldCallTo(F))
891     return false;
892 
893   // Try to re-map the arguments to constants.
894   SmallVector<Constant *, 4> ConstantArgs;
895   ConstantArgs.reserve(CS.arg_size());
896   for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E;
897        ++I) {
898     Constant *C = dyn_cast<Constant>(*I);
899     if (!C)
900       C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
901     if (!C)
902       return false; // This argument doesn't map to a constant.
903 
904     ConstantArgs.push_back(C);
905   }
906   if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
907     SimplifiedValues[CS.getInstruction()] = C;
908     return true;
909   }
910 
911   return false;
912 }
913 
914 bool CallAnalyzer::visitCallSite(CallSite CS) {
915   if (CS.hasFnAttr(Attribute::ReturnsTwice) &&
916       !F.hasFnAttribute(Attribute::ReturnsTwice)) {
917     // This aborts the entire analysis.
918     ExposesReturnsTwice = true;
919     return false;
920   }
921   if (CS.isCall() && cast<CallInst>(CS.getInstruction())->cannotDuplicate())
922     ContainsNoDuplicateCall = true;
923 
924   if (Function *F = CS.getCalledFunction()) {
925     // When we have a concrete function, first try to simplify it directly.
926     if (simplifyCallSite(F, CS))
927       return true;
928 
929     // Next check if it is an intrinsic we know about.
930     // FIXME: Lift this into part of the InstVisitor.
931     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
932       switch (II->getIntrinsicID()) {
933       default:
934         return Base::visitCallSite(CS);
935 
936       case Intrinsic::load_relative:
937         // This is normally lowered to 4 LLVM instructions.
938         Cost += 3 * InlineConstants::InstrCost;
939         return false;
940 
941       case Intrinsic::memset:
942       case Intrinsic::memcpy:
943       case Intrinsic::memmove:
944         // SROA can usually chew through these intrinsics, but they aren't free.
945         return false;
946       case Intrinsic::localescape:
947         HasFrameEscape = true;
948         return false;
949       }
950     }
951 
952     if (F == CS.getInstruction()->getParent()->getParent()) {
953       // This flag will fully abort the analysis, so don't bother with anything
954       // else.
955       IsRecursiveCall = true;
956       return false;
957     }
958 
959     if (TTI.isLoweredToCall(F)) {
960       // We account for the average 1 instruction per call argument setup
961       // here.
962       Cost += CS.arg_size() * InlineConstants::InstrCost;
963 
964       // Everything other than inline ASM will also have a significant cost
965       // merely from making the call.
966       if (!isa<InlineAsm>(CS.getCalledValue()))
967         Cost += InlineConstants::CallPenalty;
968     }
969 
970     return Base::visitCallSite(CS);
971   }
972 
973   // Otherwise we're in a very special case -- an indirect function call. See
974   // if we can be particularly clever about this.
975   Value *Callee = CS.getCalledValue();
976 
977   // First, pay the price of the argument setup. We account for the average
978   // 1 instruction per call argument setup here.
979   Cost += CS.arg_size() * InlineConstants::InstrCost;
980 
981   // Next, check if this happens to be an indirect function call to a known
982   // function in this inline context. If not, we've done all we can.
983   Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
984   if (!F)
985     return Base::visitCallSite(CS);
986 
987   // If we have a constant that we are calling as a function, we can peer
988   // through it and see the function target. This happens not infrequently
989   // during devirtualization and so we want to give it a hefty bonus for
990   // inlining, but cap that bonus in the event that inlining wouldn't pan
991   // out. Pretend to inline the function, with a custom threshold.
992   auto IndirectCallParams = Params;
993   IndirectCallParams.DefaultThreshold = InlineConstants::IndirectCallThreshold;
994   CallAnalyzer CA(TTI, GetAssumptionCache, GetBFI, PSI, *F, CS,
995                   IndirectCallParams);
996   if (CA.analyzeCall(CS)) {
997     // We were able to inline the indirect call! Subtract the cost from the
998     // threshold to get the bonus we want to apply, but don't go below zero.
999     Cost -= std::max(0, CA.getThreshold() - CA.getCost());
1000   }
1001 
1002   return Base::visitCallSite(CS);
1003 }
1004 
1005 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
1006   // At least one return instruction will be free after inlining.
1007   bool Free = !HasReturn;
1008   HasReturn = true;
1009   return Free;
1010 }
1011 
1012 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
1013   // We model unconditional branches as essentially free -- they really
1014   // shouldn't exist at all, but handling them makes the behavior of the
1015   // inliner more regular and predictable. Interestingly, conditional branches
1016   // which will fold away are also free.
1017   return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
1018          dyn_cast_or_null<ConstantInt>(
1019              SimplifiedValues.lookup(BI.getCondition()));
1020 }
1021 
1022 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
1023   // We model unconditional switches as free, see the comments on handling
1024   // branches.
1025   if (isa<ConstantInt>(SI.getCondition()))
1026     return true;
1027   if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
1028     if (isa<ConstantInt>(V))
1029       return true;
1030 
1031   // Otherwise, we need to accumulate a cost proportional to the number of
1032   // distinct successor blocks. This fan-out in the CFG cannot be represented
1033   // for free even if we can represent the core switch as a jumptable that
1034   // takes a single instruction.
1035   //
1036   // NB: We convert large switches which are just used to initialize large phi
1037   // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
1038   // inlining those. It will prevent inlining in cases where the optimization
1039   // does not (yet) fire.
1040   SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
1041   SuccessorBlocks.insert(SI.getDefaultDest());
1042   for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
1043     SuccessorBlocks.insert(I.getCaseSuccessor());
1044   // Add cost corresponding to the number of distinct destinations. The first
1045   // we model as free because of fallthrough.
1046   Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
1047   return false;
1048 }
1049 
1050 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
1051   // We never want to inline functions that contain an indirectbr.  This is
1052   // incorrect because all the blockaddress's (in static global initializers
1053   // for example) would be referring to the original function, and this
1054   // indirect jump would jump from the inlined copy of the function into the
1055   // original function which is extremely undefined behavior.
1056   // FIXME: This logic isn't really right; we can safely inline functions with
1057   // indirectbr's as long as no other function or global references the
1058   // blockaddress of a block within the current function.
1059   HasIndirectBr = true;
1060   return false;
1061 }
1062 
1063 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
1064   // FIXME: It's not clear that a single instruction is an accurate model for
1065   // the inline cost of a resume instruction.
1066   return false;
1067 }
1068 
1069 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) {
1070   // FIXME: It's not clear that a single instruction is an accurate model for
1071   // the inline cost of a cleanupret instruction.
1072   return false;
1073 }
1074 
1075 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) {
1076   // FIXME: It's not clear that a single instruction is an accurate model for
1077   // the inline cost of a catchret instruction.
1078   return false;
1079 }
1080 
1081 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
1082   // FIXME: It might be reasonably to discount the cost of instructions leading
1083   // to unreachable as they have the lowest possible impact on both runtime and
1084   // code size.
1085   return true; // No actual code is needed for unreachable.
1086 }
1087 
1088 bool CallAnalyzer::visitInstruction(Instruction &I) {
1089   // Some instructions are free. All of the free intrinsics can also be
1090   // handled by SROA, etc.
1091   if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
1092     return true;
1093 
1094   // We found something we don't understand or can't handle. Mark any SROA-able
1095   // values in the operand list as no longer viable.
1096   for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
1097     disableSROA(*OI);
1098 
1099   return false;
1100 }
1101 
1102 /// \brief Analyze a basic block for its contribution to the inline cost.
1103 ///
1104 /// This method walks the analyzer over every instruction in the given basic
1105 /// block and accounts for their cost during inlining at this callsite. It
1106 /// aborts early if the threshold has been exceeded or an impossible to inline
1107 /// construct has been detected. It returns false if inlining is no longer
1108 /// viable, and true if inlining remains viable.
1109 bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
1110                                 SmallPtrSetImpl<const Value *> &EphValues) {
1111   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1112     // FIXME: Currently, the number of instructions in a function regardless of
1113     // our ability to simplify them during inline to constants or dead code,
1114     // are actually used by the vector bonus heuristic. As long as that's true,
1115     // we have to special case debug intrinsics here to prevent differences in
1116     // inlining due to debug symbols. Eventually, the number of unsimplified
1117     // instructions shouldn't factor into the cost computation, but until then,
1118     // hack around it here.
1119     if (isa<DbgInfoIntrinsic>(I))
1120       continue;
1121 
1122     // Skip ephemeral values.
1123     if (EphValues.count(&*I))
1124       continue;
1125 
1126     ++NumInstructions;
1127     if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
1128       ++NumVectorInstructions;
1129 
1130     // If the instruction is floating point, and the target says this operation
1131     // is expensive or the function has the "use-soft-float" attribute, this may
1132     // eventually become a library call. Treat the cost as such.
1133     if (I->getType()->isFloatingPointTy()) {
1134       bool hasSoftFloatAttr = false;
1135 
1136       // If the function has the "use-soft-float" attribute, mark it as
1137       // expensive.
1138       if (F.hasFnAttribute("use-soft-float")) {
1139         Attribute Attr = F.getFnAttribute("use-soft-float");
1140         StringRef Val = Attr.getValueAsString();
1141         if (Val == "true")
1142           hasSoftFloatAttr = true;
1143       }
1144 
1145       if (TTI.getFPOpCost(I->getType()) == TargetTransformInfo::TCC_Expensive ||
1146           hasSoftFloatAttr)
1147         Cost += InlineConstants::CallPenalty;
1148     }
1149 
1150     // If the instruction simplified to a constant, there is no cost to this
1151     // instruction. Visit the instructions using our InstVisitor to account for
1152     // all of the per-instruction logic. The visit tree returns true if we
1153     // consumed the instruction in any way, and false if the instruction's base
1154     // cost should count against inlining.
1155     if (Base::visit(&*I))
1156       ++NumInstructionsSimplified;
1157     else
1158       Cost += InlineConstants::InstrCost;
1159 
1160     // If the visit this instruction detected an uninlinable pattern, abort.
1161     if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
1162         HasIndirectBr || HasFrameEscape)
1163       return false;
1164 
1165     // If the caller is a recursive function then we don't want to inline
1166     // functions which allocate a lot of stack space because it would increase
1167     // the caller stack usage dramatically.
1168     if (IsCallerRecursive &&
1169         AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
1170       return false;
1171 
1172     // Check if we've past the maximum possible threshold so we don't spin in
1173     // huge basic blocks that will never inline.
1174     if (Cost > Threshold)
1175       return false;
1176   }
1177 
1178   return true;
1179 }
1180 
1181 /// \brief Compute the base pointer and cumulative constant offsets for V.
1182 ///
1183 /// This strips all constant offsets off of V, leaving it the base pointer, and
1184 /// accumulates the total constant offset applied in the returned constant. It
1185 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
1186 /// no constant offsets applied.
1187 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
1188   if (!V->getType()->isPointerTy())
1189     return nullptr;
1190 
1191   const DataLayout &DL = F.getParent()->getDataLayout();
1192   unsigned IntPtrWidth = DL.getPointerSizeInBits();
1193   APInt Offset = APInt::getNullValue(IntPtrWidth);
1194 
1195   // Even though we don't look through PHI nodes, we could be called on an
1196   // instruction in an unreachable block, which may be on a cycle.
1197   SmallPtrSet<Value *, 4> Visited;
1198   Visited.insert(V);
1199   do {
1200     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
1201       if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
1202         return nullptr;
1203       V = GEP->getPointerOperand();
1204     } else if (Operator::getOpcode(V) == Instruction::BitCast) {
1205       V = cast<Operator>(V)->getOperand(0);
1206     } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1207       if (GA->isInterposable())
1208         break;
1209       V = GA->getAliasee();
1210     } else {
1211       break;
1212     }
1213     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
1214   } while (Visited.insert(V).second);
1215 
1216   Type *IntPtrTy = DL.getIntPtrType(V->getContext());
1217   return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
1218 }
1219 
1220 /// \brief Analyze a call site for potential inlining.
1221 ///
1222 /// Returns true if inlining this call is viable, and false if it is not
1223 /// viable. It computes the cost and adjusts the threshold based on numerous
1224 /// factors and heuristics. If this method returns false but the computed cost
1225 /// is below the computed threshold, then inlining was forcibly disabled by
1226 /// some artifact of the routine.
1227 bool CallAnalyzer::analyzeCall(CallSite CS) {
1228   ++NumCallsAnalyzed;
1229 
1230   // Perform some tweaks to the cost and threshold based on the direct
1231   // callsite information.
1232 
1233   // We want to more aggressively inline vector-dense kernels, so up the
1234   // threshold, and we'll lower it if the % of vector instructions gets too
1235   // low. Note that these bonuses are some what arbitrary and evolved over time
1236   // by accident as much as because they are principled bonuses.
1237   //
1238   // FIXME: It would be nice to remove all such bonuses. At least it would be
1239   // nice to base the bonus values on something more scientific.
1240   assert(NumInstructions == 0);
1241   assert(NumVectorInstructions == 0);
1242 
1243   // Update the threshold based on callsite properties
1244   updateThreshold(CS, F);
1245 
1246   FiftyPercentVectorBonus = 3 * Threshold / 2;
1247   TenPercentVectorBonus = 3 * Threshold / 4;
1248   const DataLayout &DL = F.getParent()->getDataLayout();
1249 
1250   // Track whether the post-inlining function would have more than one basic
1251   // block. A single basic block is often intended for inlining. Balloon the
1252   // threshold by 50% until we pass the single-BB phase.
1253   bool SingleBB = true;
1254   int SingleBBBonus = Threshold / 2;
1255 
1256   // Speculatively apply all possible bonuses to Threshold. If cost exceeds
1257   // this Threshold any time, and cost cannot decrease, we can stop processing
1258   // the rest of the function body.
1259   Threshold += (SingleBBBonus + FiftyPercentVectorBonus);
1260 
1261   // Give out bonuses per argument, as the instructions setting them up will
1262   // be gone after inlining.
1263   for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
1264     if (CS.isByValArgument(I)) {
1265       // We approximate the number of loads and stores needed by dividing the
1266       // size of the byval type by the target's pointer size.
1267       PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
1268       unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
1269       unsigned PointerSize = DL.getPointerSizeInBits();
1270       // Ceiling division.
1271       unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
1272 
1273       // If it generates more than 8 stores it is likely to be expanded as an
1274       // inline memcpy so we take that as an upper bound. Otherwise we assume
1275       // one load and one store per word copied.
1276       // FIXME: The maxStoresPerMemcpy setting from the target should be used
1277       // here instead of a magic number of 8, but it's not available via
1278       // DataLayout.
1279       NumStores = std::min(NumStores, 8U);
1280 
1281       Cost -= 2 * NumStores * InlineConstants::InstrCost;
1282     } else {
1283       // For non-byval arguments subtract off one instruction per call
1284       // argument.
1285       Cost -= InlineConstants::InstrCost;
1286     }
1287   }
1288   // The call instruction also disappears after inlining.
1289   Cost -= InlineConstants::InstrCost + InlineConstants::CallPenalty;
1290 
1291   // If there is only one call of the function, and it has internal linkage,
1292   // the cost of inlining it drops dramatically.
1293   bool OnlyOneCallAndLocalLinkage =
1294       F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction();
1295   if (OnlyOneCallAndLocalLinkage)
1296     Cost -= InlineConstants::LastCallToStaticBonus;
1297 
1298   // If this function uses the coldcc calling convention, prefer not to inline
1299   // it.
1300   if (F.getCallingConv() == CallingConv::Cold)
1301     Cost += InlineConstants::ColdccPenalty;
1302 
1303   // Check if we're done. This can happen due to bonuses and penalties.
1304   if (Cost > Threshold)
1305     return false;
1306 
1307   if (F.empty())
1308     return true;
1309 
1310   Function *Caller = CS.getInstruction()->getParent()->getParent();
1311   // Check if the caller function is recursive itself.
1312   for (User *U : Caller->users()) {
1313     CallSite Site(U);
1314     if (!Site)
1315       continue;
1316     Instruction *I = Site.getInstruction();
1317     if (I->getParent()->getParent() == Caller) {
1318       IsCallerRecursive = true;
1319       break;
1320     }
1321   }
1322 
1323   // Populate our simplified values by mapping from function arguments to call
1324   // arguments with known important simplifications.
1325   CallSite::arg_iterator CAI = CS.arg_begin();
1326   for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1327        FAI != FAE; ++FAI, ++CAI) {
1328     assert(CAI != CS.arg_end());
1329     if (Constant *C = dyn_cast<Constant>(CAI))
1330       SimplifiedValues[&*FAI] = C;
1331 
1332     Value *PtrArg = *CAI;
1333     if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1334       ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
1335 
1336       // We can SROA any pointer arguments derived from alloca instructions.
1337       if (isa<AllocaInst>(PtrArg)) {
1338         SROAArgValues[&*FAI] = PtrArg;
1339         SROAArgCosts[PtrArg] = 0;
1340       }
1341     }
1342   }
1343   NumConstantArgs = SimplifiedValues.size();
1344   NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1345   NumAllocaArgs = SROAArgValues.size();
1346 
1347   // FIXME: If a caller has multiple calls to a callee, we end up recomputing
1348   // the ephemeral values multiple times (and they're completely determined by
1349   // the callee, so this is purely duplicate work).
1350   SmallPtrSet<const Value *, 32> EphValues;
1351   CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues);
1352 
1353   // The worklist of live basic blocks in the callee *after* inlining. We avoid
1354   // adding basic blocks of the callee which can be proven to be dead for this
1355   // particular call site in order to get more accurate cost estimates. This
1356   // requires a somewhat heavyweight iteration pattern: we need to walk the
1357   // basic blocks in a breadth-first order as we insert live successors. To
1358   // accomplish this, prioritizing for small iterations because we exit after
1359   // crossing our threshold, we use a small-size optimized SetVector.
1360   typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
1361                     SmallPtrSet<BasicBlock *, 16>>
1362       BBSetVector;
1363   BBSetVector BBWorklist;
1364   BBWorklist.insert(&F.getEntryBlock());
1365   // Note that we *must not* cache the size, this loop grows the worklist.
1366   for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1367     // Bail out the moment we cross the threshold. This means we'll under-count
1368     // the cost, but only when undercounting doesn't matter.
1369     if (Cost > Threshold)
1370       break;
1371 
1372     BasicBlock *BB = BBWorklist[Idx];
1373     if (BB->empty())
1374       continue;
1375 
1376     // Disallow inlining a blockaddress. A blockaddress only has defined
1377     // behavior for an indirect branch in the same function, and we do not
1378     // currently support inlining indirect branches. But, the inliner may not
1379     // see an indirect branch that ends up being dead code at a particular call
1380     // site. If the blockaddress escapes the function, e.g., via a global
1381     // variable, inlining may lead to an invalid cross-function reference.
1382     if (BB->hasAddressTaken())
1383       return false;
1384 
1385     // Analyze the cost of this block. If we blow through the threshold, this
1386     // returns false, and we can bail on out.
1387     if (!analyzeBlock(BB, EphValues))
1388       return false;
1389 
1390     TerminatorInst *TI = BB->getTerminator();
1391 
1392     // Add in the live successors by first checking whether we have terminator
1393     // that may be simplified based on the values simplified by this call.
1394     if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1395       if (BI->isConditional()) {
1396         Value *Cond = BI->getCondition();
1397         if (ConstantInt *SimpleCond =
1398                 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1399           BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
1400           continue;
1401         }
1402       }
1403     } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1404       Value *Cond = SI->getCondition();
1405       if (ConstantInt *SimpleCond =
1406               dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1407         BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1408         continue;
1409       }
1410     }
1411 
1412     // If we're unable to select a particular successor, just count all of
1413     // them.
1414     for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1415          ++TIdx)
1416       BBWorklist.insert(TI->getSuccessor(TIdx));
1417 
1418     // If we had any successors at this point, than post-inlining is likely to
1419     // have them as well. Note that we assume any basic blocks which existed
1420     // due to branches or switches which folded above will also fold after
1421     // inlining.
1422     if (SingleBB && TI->getNumSuccessors() > 1) {
1423       // Take off the bonus we applied to the threshold.
1424       Threshold -= SingleBBBonus;
1425       SingleBB = false;
1426     }
1427   }
1428 
1429   // If this is a noduplicate call, we can still inline as long as
1430   // inlining this would cause the removal of the caller (so the instruction
1431   // is not actually duplicated, just moved).
1432   if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1433     return false;
1434 
1435   // We applied the maximum possible vector bonus at the beginning. Now,
1436   // subtract the excess bonus, if any, from the Threshold before
1437   // comparing against Cost.
1438   if (NumVectorInstructions <= NumInstructions / 10)
1439     Threshold -= FiftyPercentVectorBonus;
1440   else if (NumVectorInstructions <= NumInstructions / 2)
1441     Threshold -= (FiftyPercentVectorBonus - TenPercentVectorBonus);
1442 
1443   return Cost < std::max(1, Threshold);
1444 }
1445 
1446 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1447 /// \brief Dump stats about this call's analysis.
1448 LLVM_DUMP_METHOD void CallAnalyzer::dump() {
1449 #define DEBUG_PRINT_STAT(x) dbgs() << "      " #x ": " << x << "\n"
1450   DEBUG_PRINT_STAT(NumConstantArgs);
1451   DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1452   DEBUG_PRINT_STAT(NumAllocaArgs);
1453   DEBUG_PRINT_STAT(NumConstantPtrCmps);
1454   DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1455   DEBUG_PRINT_STAT(NumInstructionsSimplified);
1456   DEBUG_PRINT_STAT(NumInstructions);
1457   DEBUG_PRINT_STAT(SROACostSavings);
1458   DEBUG_PRINT_STAT(SROACostSavingsLost);
1459   DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1460   DEBUG_PRINT_STAT(Cost);
1461   DEBUG_PRINT_STAT(Threshold);
1462 #undef DEBUG_PRINT_STAT
1463 }
1464 #endif
1465 
1466 /// \brief Test that two functions either have or have not the given attribute
1467 ///        at the same time.
1468 template <typename AttrKind>
1469 static bool attributeMatches(Function *F1, Function *F2, AttrKind Attr) {
1470   return F1->getFnAttribute(Attr) == F2->getFnAttribute(Attr);
1471 }
1472 
1473 /// \brief Test that there are no attribute conflicts between Caller and Callee
1474 ///        that prevent inlining.
1475 static bool functionsHaveCompatibleAttributes(Function *Caller,
1476                                               Function *Callee,
1477                                               TargetTransformInfo &TTI) {
1478   return TTI.areInlineCompatible(Caller, Callee) &&
1479          AttributeFuncs::areInlineCompatible(*Caller, *Callee);
1480 }
1481 
1482 InlineCost llvm::getInlineCost(
1483     CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
1484     std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
1485     Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
1486     ProfileSummaryInfo *PSI) {
1487   return getInlineCost(CS, CS.getCalledFunction(), Params, CalleeTTI,
1488                        GetAssumptionCache, GetBFI, PSI);
1489 }
1490 
1491 InlineCost llvm::getInlineCost(
1492     CallSite CS, Function *Callee, const InlineParams &Params,
1493     TargetTransformInfo &CalleeTTI,
1494     std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
1495     Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
1496     ProfileSummaryInfo *PSI) {
1497 
1498   // Cannot inline indirect calls.
1499   if (!Callee)
1500     return llvm::InlineCost::getNever();
1501 
1502   // Calls to functions with always-inline attributes should be inlined
1503   // whenever possible.
1504   if (CS.hasFnAttr(Attribute::AlwaysInline)) {
1505     if (isInlineViable(*Callee))
1506       return llvm::InlineCost::getAlways();
1507     return llvm::InlineCost::getNever();
1508   }
1509 
1510   // Never inline functions with conflicting attributes (unless callee has
1511   // always-inline attribute).
1512   if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee, CalleeTTI))
1513     return llvm::InlineCost::getNever();
1514 
1515   // Don't inline this call if the caller has the optnone attribute.
1516   if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
1517     return llvm::InlineCost::getNever();
1518 
1519   // Don't inline functions which can be interposed at link-time.  Don't inline
1520   // functions marked noinline or call sites marked noinline.
1521   // Note: inlining non-exact non-interposable functions is fine, since we know
1522   // we have *a* correct implementation of the source level function.
1523   if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) ||
1524       CS.isNoInline())
1525     return llvm::InlineCost::getNever();
1526 
1527   DEBUG(llvm::dbgs() << "      Analyzing call of " << Callee->getName()
1528                      << "...\n");
1529 
1530   CallAnalyzer CA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, *Callee, CS,
1531                   Params);
1532   bool ShouldInline = CA.analyzeCall(CS);
1533 
1534   DEBUG(CA.dump());
1535 
1536   // Check if there was a reason to force inlining or no inlining.
1537   if (!ShouldInline && CA.getCost() < CA.getThreshold())
1538     return InlineCost::getNever();
1539   if (ShouldInline && CA.getCost() >= CA.getThreshold())
1540     return InlineCost::getAlways();
1541 
1542   return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1543 }
1544 
1545 bool llvm::isInlineViable(Function &F) {
1546   bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice);
1547   for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1548     // Disallow inlining of functions which contain indirect branches or
1549     // blockaddresses.
1550     if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
1551       return false;
1552 
1553     for (auto &II : *BI) {
1554       CallSite CS(&II);
1555       if (!CS)
1556         continue;
1557 
1558       // Disallow recursive calls.
1559       if (&F == CS.getCalledFunction())
1560         return false;
1561 
1562       // Disallow calls which expose returns-twice to a function not previously
1563       // attributed as such.
1564       if (!ReturnsTwice && CS.isCall() &&
1565           cast<CallInst>(CS.getInstruction())->canReturnTwice())
1566         return false;
1567 
1568       // Disallow inlining functions that call @llvm.localescape. Doing this
1569       // correctly would require major changes to the inliner.
1570       if (CS.getCalledFunction() &&
1571           CS.getCalledFunction()->getIntrinsicID() ==
1572               llvm::Intrinsic::localescape)
1573         return false;
1574     }
1575   }
1576 
1577   return true;
1578 }
1579 
1580 // APIs to create InlineParams based on command line flags and/or other
1581 // parameters.
1582 
1583 InlineParams llvm::getInlineParams(int Threshold) {
1584   InlineParams Params;
1585 
1586   // This field is the threshold to use for a callee by default. This is
1587   // derived from one or more of:
1588   //  * optimization or size-optimization levels,
1589   //  * a value passed to createFunctionInliningPass function, or
1590   //  * the -inline-threshold flag.
1591   //  If the -inline-threshold flag is explicitly specified, that is used
1592   //  irrespective of anything else.
1593   if (InlineThreshold.getNumOccurrences() > 0)
1594     Params.DefaultThreshold = InlineThreshold;
1595   else
1596     Params.DefaultThreshold = Threshold;
1597 
1598   // Set the HintThreshold knob from the -inlinehint-threshold.
1599   Params.HintThreshold = HintThreshold;
1600 
1601   // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold.
1602   Params.HotCallSiteThreshold = HotCallSiteThreshold;
1603 
1604   // Set the ColdCallSiteThreshold knob from the -inline-cold-callsite-threshold.
1605   Params.ColdCallSiteThreshold = ColdCallSiteThreshold;
1606 
1607   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
1608   // Set the OptMinSizeThreshold and OptSizeThreshold params only if the
1609   // -inlinehint-threshold commandline option is not explicitly given. If that
1610   // option is present, then its value applies even for callees with size and
1611   // minsize attributes.
1612   // If the -inline-threshold is not specified, set the ColdThreshold from the
1613   // -inlinecold-threshold even if it is not explicitly passed. If
1614   // -inline-threshold is specified, then -inlinecold-threshold needs to be
1615   // explicitly specified to set the ColdThreshold knob
1616   if (InlineThreshold.getNumOccurrences() == 0) {
1617     Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold;
1618     Params.OptSizeThreshold = InlineConstants::OptSizeThreshold;
1619     Params.ColdThreshold = ColdThreshold;
1620   } else if (ColdThreshold.getNumOccurrences() > 0) {
1621     Params.ColdThreshold = ColdThreshold;
1622   }
1623   return Params;
1624 }
1625 
1626 InlineParams llvm::getInlineParams() {
1627   return getInlineParams(InlineThreshold);
1628 }
1629 
1630 // Compute the default threshold for inlining based on the opt level and the
1631 // size opt level.
1632 static int computeThresholdFromOptLevels(unsigned OptLevel,
1633                                          unsigned SizeOptLevel) {
1634   if (OptLevel > 2)
1635     return InlineConstants::OptAggressiveThreshold;
1636   if (SizeOptLevel == 1) // -Os
1637     return InlineConstants::OptSizeThreshold;
1638   if (SizeOptLevel == 2) // -Oz
1639     return InlineConstants::OptMinSizeThreshold;
1640   return InlineThreshold;
1641 }
1642 
1643 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) {
1644   return getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel));
1645 }
1646