1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements inline cost analysis. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/InlineCost.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/BlockFrequencyInfo.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/CodeMetrics.h" 23 #include "llvm/Analysis/ConstantFolding.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/ProfileSummaryInfo.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Config/llvm-config.h" 31 #include "llvm/IR/AssemblyAnnotationWriter.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/GetElementPtrTypeIterator.h" 36 #include "llvm/IR/GlobalAlias.h" 37 #include "llvm/IR/InstVisitor.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/IR/PatternMatch.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/FormattedStream.h" 44 #include "llvm/Support/raw_ostream.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "inline-cost" 49 50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 51 52 static cl::opt<int> 53 DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225), 54 cl::ZeroOrMore, 55 cl::desc("Default amount of inlining to perform")); 56 57 static cl::opt<bool> PrintInstructionComments( 58 "print-instruction-comments", cl::Hidden, cl::init(false), 59 cl::desc("Prints comments for instruction based on inline cost analysis")); 60 61 static cl::opt<int> InlineThreshold( 62 "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore, 63 cl::desc("Control the amount of inlining to perform (default = 225)")); 64 65 static cl::opt<int> HintThreshold( 66 "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore, 67 cl::desc("Threshold for inlining functions with inline hint")); 68 69 static cl::opt<int> 70 ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden, 71 cl::init(45), cl::ZeroOrMore, 72 cl::desc("Threshold for inlining cold callsites")); 73 74 static cl::opt<bool> InlineEnableCostBenefitAnalysis( 75 "inline-enable-cost-benefit-analysis", cl::Hidden, cl::init(false), 76 cl::desc("Enable the cost-benefit analysis for the inliner")); 77 78 static cl::opt<int> InlineSavingsMultiplier( 79 "inline-savings-multiplier", cl::Hidden, cl::init(8), cl::ZeroOrMore, 80 cl::desc("Multiplier to multiply cycle savings by during inlining")); 81 82 static cl::opt<int> 83 InlineSizeAllowance("inline-size-allowance", cl::Hidden, cl::init(100), 84 cl::ZeroOrMore, 85 cl::desc("The maximum size of a callee that get's " 86 "inlined without sufficient cycle savings")); 87 88 // We introduce this threshold to help performance of instrumentation based 89 // PGO before we actually hook up inliner with analysis passes such as BPI and 90 // BFI. 91 static cl::opt<int> ColdThreshold( 92 "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore, 93 cl::desc("Threshold for inlining functions with cold attribute")); 94 95 static cl::opt<int> 96 HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000), 97 cl::ZeroOrMore, 98 cl::desc("Threshold for hot callsites ")); 99 100 static cl::opt<int> LocallyHotCallSiteThreshold( 101 "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore, 102 cl::desc("Threshold for locally hot callsites ")); 103 104 static cl::opt<int> ColdCallSiteRelFreq( 105 "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, 106 cl::desc("Maximum block frequency, expressed as a percentage of caller's " 107 "entry frequency, for a callsite to be cold in the absence of " 108 "profile information.")); 109 110 static cl::opt<int> HotCallSiteRelFreq( 111 "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore, 112 cl::desc("Minimum block frequency, expressed as a multiple of caller's " 113 "entry frequency, for a callsite to be hot in the absence of " 114 "profile information.")); 115 116 static cl::opt<bool> OptComputeFullInlineCost( 117 "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore, 118 cl::desc("Compute the full inline cost of a call site even when the cost " 119 "exceeds the threshold.")); 120 121 static cl::opt<bool> InlineCallerSupersetNoBuiltin( 122 "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true), 123 cl::ZeroOrMore, 124 cl::desc("Allow inlining when caller has a superset of callee's nobuiltin " 125 "attributes.")); 126 127 static cl::opt<bool> DisableGEPConstOperand( 128 "disable-gep-const-evaluation", cl::Hidden, cl::init(false), 129 cl::desc("Disables evaluation of GetElementPtr with constant operands")); 130 131 namespace { 132 class InlineCostCallAnalyzer; 133 134 // This struct is used to store information about inline cost of a 135 // particular instruction 136 struct InstructionCostDetail { 137 int CostBefore = 0; 138 int CostAfter = 0; 139 int ThresholdBefore = 0; 140 int ThresholdAfter = 0; 141 142 int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; } 143 144 int getCostDelta() const { return CostAfter - CostBefore; } 145 146 bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; } 147 }; 148 149 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter { 150 private: 151 InlineCostCallAnalyzer *const ICCA; 152 153 public: 154 InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {} 155 virtual void emitInstructionAnnot(const Instruction *I, 156 formatted_raw_ostream &OS) override; 157 }; 158 159 /// Carry out call site analysis, in order to evaluate inlinability. 160 /// NOTE: the type is currently used as implementation detail of functions such 161 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the 162 /// expectation is that they come from the outer scope, from the wrapper 163 /// functions. If we want to support constructing CallAnalyzer objects where 164 /// lambdas are provided inline at construction, or where the object needs to 165 /// otherwise survive past the scope of the provided functions, we need to 166 /// revisit the argument types. 167 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 168 typedef InstVisitor<CallAnalyzer, bool> Base; 169 friend class InstVisitor<CallAnalyzer, bool>; 170 171 protected: 172 virtual ~CallAnalyzer() {} 173 /// The TargetTransformInfo available for this compilation. 174 const TargetTransformInfo &TTI; 175 176 /// Getter for the cache of @llvm.assume intrinsics. 177 function_ref<AssumptionCache &(Function &)> GetAssumptionCache; 178 179 /// Getter for BlockFrequencyInfo 180 function_ref<BlockFrequencyInfo &(Function &)> GetBFI; 181 182 /// Profile summary information. 183 ProfileSummaryInfo *PSI; 184 185 /// The called function. 186 Function &F; 187 188 // Cache the DataLayout since we use it a lot. 189 const DataLayout &DL; 190 191 /// The OptimizationRemarkEmitter available for this compilation. 192 OptimizationRemarkEmitter *ORE; 193 194 /// The candidate callsite being analyzed. Please do not use this to do 195 /// analysis in the caller function; we want the inline cost query to be 196 /// easily cacheable. Instead, use the cover function paramHasAttr. 197 CallBase &CandidateCall; 198 199 /// Extension points for handling callsite features. 200 // Called before a basic block was analyzed. 201 virtual void onBlockStart(const BasicBlock *BB) {} 202 203 /// Called after a basic block was analyzed. 204 virtual void onBlockAnalyzed(const BasicBlock *BB) {} 205 206 /// Called before an instruction was analyzed 207 virtual void onInstructionAnalysisStart(const Instruction *I) {} 208 209 /// Called after an instruction was analyzed 210 virtual void onInstructionAnalysisFinish(const Instruction *I) {} 211 212 /// Called at the end of the analysis of the callsite. Return the outcome of 213 /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or 214 /// the reason it can't. 215 virtual InlineResult finalizeAnalysis() { return InlineResult::success(); } 216 /// Called when we're about to start processing a basic block, and every time 217 /// we are done processing an instruction. Return true if there is no point in 218 /// continuing the analysis (e.g. we've determined already the call site is 219 /// too expensive to inline) 220 virtual bool shouldStop() { return false; } 221 222 /// Called before the analysis of the callee body starts (with callsite 223 /// contexts propagated). It checks callsite-specific information. Return a 224 /// reason analysis can't continue if that's the case, or 'true' if it may 225 /// continue. 226 virtual InlineResult onAnalysisStart() { return InlineResult::success(); } 227 /// Called if the analysis engine decides SROA cannot be done for the given 228 /// alloca. 229 virtual void onDisableSROA(AllocaInst *Arg) {} 230 231 /// Called the analysis engine determines load elimination won't happen. 232 virtual void onDisableLoadElimination() {} 233 234 /// Called to account for a call. 235 virtual void onCallPenalty() {} 236 237 /// Called to account for the expectation the inlining would result in a load 238 /// elimination. 239 virtual void onLoadEliminationOpportunity() {} 240 241 /// Called to account for the cost of argument setup for the Call in the 242 /// callee's body (not the callsite currently under analysis). 243 virtual void onCallArgumentSetup(const CallBase &Call) {} 244 245 /// Called to account for a load relative intrinsic. 246 virtual void onLoadRelativeIntrinsic() {} 247 248 /// Called to account for a lowered call. 249 virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) { 250 } 251 252 /// Account for a jump table of given size. Return false to stop further 253 /// processing the switch instruction 254 virtual bool onJumpTable(unsigned JumpTableSize) { return true; } 255 256 /// Account for a case cluster of given size. Return false to stop further 257 /// processing of the instruction. 258 virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; } 259 260 /// Called at the end of processing a switch instruction, with the given 261 /// number of case clusters. 262 virtual void onFinalizeSwitch(unsigned JumpTableSize, 263 unsigned NumCaseCluster) {} 264 265 /// Called to account for any other instruction not specifically accounted 266 /// for. 267 virtual void onMissedSimplification() {} 268 269 /// Start accounting potential benefits due to SROA for the given alloca. 270 virtual void onInitializeSROAArg(AllocaInst *Arg) {} 271 272 /// Account SROA savings for the AllocaInst value. 273 virtual void onAggregateSROAUse(AllocaInst *V) {} 274 275 bool handleSROA(Value *V, bool DoNotDisable) { 276 // Check for SROA candidates in comparisons. 277 if (auto *SROAArg = getSROAArgForValueOrNull(V)) { 278 if (DoNotDisable) { 279 onAggregateSROAUse(SROAArg); 280 return true; 281 } 282 disableSROAForArg(SROAArg); 283 } 284 return false; 285 } 286 287 bool IsCallerRecursive = false; 288 bool IsRecursiveCall = false; 289 bool ExposesReturnsTwice = false; 290 bool HasDynamicAlloca = false; 291 bool ContainsNoDuplicateCall = false; 292 bool HasReturn = false; 293 bool HasIndirectBr = false; 294 bool HasUninlineableIntrinsic = false; 295 bool InitsVargArgs = false; 296 297 /// Number of bytes allocated statically by the callee. 298 uint64_t AllocatedSize = 0; 299 unsigned NumInstructions = 0; 300 unsigned NumVectorInstructions = 0; 301 302 /// While we walk the potentially-inlined instructions, we build up and 303 /// maintain a mapping of simplified values specific to this callsite. The 304 /// idea is to propagate any special information we have about arguments to 305 /// this call through the inlinable section of the function, and account for 306 /// likely simplifications post-inlining. The most important aspect we track 307 /// is CFG altering simplifications -- when we prove a basic block dead, that 308 /// can cause dramatic shifts in the cost of inlining a function. 309 DenseMap<Value *, Constant *> SimplifiedValues; 310 311 /// Keep track of the values which map back (through function arguments) to 312 /// allocas on the caller stack which could be simplified through SROA. 313 DenseMap<Value *, AllocaInst *> SROAArgValues; 314 315 /// Keep track of Allocas for which we believe we may get SROA optimization. 316 DenseSet<AllocaInst *> EnabledSROAAllocas; 317 318 /// Keep track of values which map to a pointer base and constant offset. 319 DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs; 320 321 /// Keep track of dead blocks due to the constant arguments. 322 SetVector<BasicBlock *> DeadBlocks; 323 324 /// The mapping of the blocks to their known unique successors due to the 325 /// constant arguments. 326 DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors; 327 328 /// Model the elimination of repeated loads that is expected to happen 329 /// whenever we simplify away the stores that would otherwise cause them to be 330 /// loads. 331 bool EnableLoadElimination; 332 SmallPtrSet<Value *, 16> LoadAddrSet; 333 334 AllocaInst *getSROAArgForValueOrNull(Value *V) const { 335 auto It = SROAArgValues.find(V); 336 if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0) 337 return nullptr; 338 return It->second; 339 } 340 341 // Custom simplification helper routines. 342 bool isAllocaDerivedArg(Value *V); 343 void disableSROAForArg(AllocaInst *SROAArg); 344 void disableSROA(Value *V); 345 void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB); 346 void disableLoadElimination(); 347 bool isGEPFree(GetElementPtrInst &GEP); 348 bool canFoldInboundsGEP(GetElementPtrInst &I); 349 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 350 bool simplifyCallSite(Function *F, CallBase &Call); 351 template <typename Callable> 352 bool simplifyInstruction(Instruction &I, Callable Evaluate); 353 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 354 355 /// Return true if the given argument to the function being considered for 356 /// inlining has the given attribute set either at the call site or the 357 /// function declaration. Primarily used to inspect call site specific 358 /// attributes since these can be more precise than the ones on the callee 359 /// itself. 360 bool paramHasAttr(Argument *A, Attribute::AttrKind Attr); 361 362 /// Return true if the given value is known non null within the callee if 363 /// inlined through this particular callsite. 364 bool isKnownNonNullInCallee(Value *V); 365 366 /// Return true if size growth is allowed when inlining the callee at \p Call. 367 bool allowSizeGrowth(CallBase &Call); 368 369 // Custom analysis routines. 370 InlineResult analyzeBlock(BasicBlock *BB, 371 SmallPtrSetImpl<const Value *> &EphValues); 372 373 // Disable several entry points to the visitor so we don't accidentally use 374 // them by declaring but not defining them here. 375 void visit(Module *); 376 void visit(Module &); 377 void visit(Function *); 378 void visit(Function &); 379 void visit(BasicBlock *); 380 void visit(BasicBlock &); 381 382 // Provide base case for our instruction visit. 383 bool visitInstruction(Instruction &I); 384 385 // Our visit overrides. 386 bool visitAlloca(AllocaInst &I); 387 bool visitPHI(PHINode &I); 388 bool visitGetElementPtr(GetElementPtrInst &I); 389 bool visitBitCast(BitCastInst &I); 390 bool visitPtrToInt(PtrToIntInst &I); 391 bool visitIntToPtr(IntToPtrInst &I); 392 bool visitCastInst(CastInst &I); 393 bool visitCmpInst(CmpInst &I); 394 bool visitSub(BinaryOperator &I); 395 bool visitBinaryOperator(BinaryOperator &I); 396 bool visitFNeg(UnaryOperator &I); 397 bool visitLoad(LoadInst &I); 398 bool visitStore(StoreInst &I); 399 bool visitExtractValue(ExtractValueInst &I); 400 bool visitInsertValue(InsertValueInst &I); 401 bool visitCallBase(CallBase &Call); 402 bool visitReturnInst(ReturnInst &RI); 403 bool visitBranchInst(BranchInst &BI); 404 bool visitSelectInst(SelectInst &SI); 405 bool visitSwitchInst(SwitchInst &SI); 406 bool visitIndirectBrInst(IndirectBrInst &IBI); 407 bool visitResumeInst(ResumeInst &RI); 408 bool visitCleanupReturnInst(CleanupReturnInst &RI); 409 bool visitCatchReturnInst(CatchReturnInst &RI); 410 bool visitUnreachableInst(UnreachableInst &I); 411 412 public: 413 CallAnalyzer(Function &Callee, CallBase &Call, const TargetTransformInfo &TTI, 414 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 415 function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr, 416 ProfileSummaryInfo *PSI = nullptr, 417 OptimizationRemarkEmitter *ORE = nullptr) 418 : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI), 419 PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE), 420 CandidateCall(Call), EnableLoadElimination(true) {} 421 422 InlineResult analyze(); 423 424 Optional<Constant *> getSimplifiedValue(Instruction *I) { 425 if (SimplifiedValues.find(I) != SimplifiedValues.end()) 426 return SimplifiedValues[I]; 427 return None; 428 } 429 430 // Keep a bunch of stats about the cost savings found so we can print them 431 // out when debugging. 432 unsigned NumConstantArgs = 0; 433 unsigned NumConstantOffsetPtrArgs = 0; 434 unsigned NumAllocaArgs = 0; 435 unsigned NumConstantPtrCmps = 0; 436 unsigned NumConstantPtrDiffs = 0; 437 unsigned NumInstructionsSimplified = 0; 438 439 void dump(); 440 }; 441 442 // Considering forming a binary search, we should find the number of nodes 443 // which is same as the number of comparisons when lowered. For a given 444 // number of clusters, n, we can define a recursive function, f(n), to find 445 // the number of nodes in the tree. The recursion is : 446 // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3, 447 // and f(n) = n, when n <= 3. 448 // This will lead a binary tree where the leaf should be either f(2) or f(3) 449 // when n > 3. So, the number of comparisons from leaves should be n, while 450 // the number of non-leaf should be : 451 // 2^(log2(n) - 1) - 1 452 // = 2^log2(n) * 2^-1 - 1 453 // = n / 2 - 1. 454 // Considering comparisons from leaf and non-leaf nodes, we can estimate the 455 // number of comparisons in a simple closed form : 456 // n + n / 2 - 1 = n * 3 / 2 - 1 457 int64_t getExpectedNumberOfCompare(int NumCaseCluster) { 458 return 3 * static_cast<int64_t>(NumCaseCluster) / 2 - 1; 459 } 460 461 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note 462 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer 463 class InlineCostCallAnalyzer final : public CallAnalyzer { 464 const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1; 465 const bool ComputeFullInlineCost; 466 int LoadEliminationCost = 0; 467 /// Bonus to be applied when percentage of vector instructions in callee is 468 /// high (see more details in updateThreshold). 469 int VectorBonus = 0; 470 /// Bonus to be applied when the callee has only one reachable basic block. 471 int SingleBBBonus = 0; 472 473 /// Tunable parameters that control the analysis. 474 const InlineParams &Params; 475 476 // This DenseMap stores the delta change in cost and threshold after 477 // accounting for the given instruction. The map is filled only with the 478 // flag PrintInstructionComments on. 479 DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap; 480 481 /// Upper bound for the inlining cost. Bonuses are being applied to account 482 /// for speculative "expected profit" of the inlining decision. 483 int Threshold = 0; 484 485 /// Attempt to evaluate indirect calls to boost its inline cost. 486 const bool BoostIndirectCalls; 487 488 /// Ignore the threshold when finalizing analysis. 489 const bool IgnoreThreshold; 490 491 // True if the cost-benefit-analysis-based inliner is enabled. 492 const bool CostBenefitAnalysisEnabled; 493 494 /// Inlining cost measured in abstract units, accounts for all the 495 /// instructions expected to be executed for a given function invocation. 496 /// Instructions that are statically proven to be dead based on call-site 497 /// arguments are not counted here. 498 int Cost = 0; 499 500 // The cumulative cost at the beginning of the basic block being analyzed. At 501 // the end of analyzing each basic block, "Cost - CostAtBBStart" represents 502 // the size of that basic block. 503 int CostAtBBStart = 0; 504 505 // The static size of live but cold basic blocks. This is "static" in the 506 // sense that it's not weighted by profile counts at all. 507 int ColdSize = 0; 508 509 // Whether inlining is decided by cost-benefit analysis. 510 bool DecidedByCostBenefit = false; 511 512 bool SingleBB = true; 513 514 unsigned SROACostSavings = 0; 515 unsigned SROACostSavingsLost = 0; 516 517 /// The mapping of caller Alloca values to their accumulated cost savings. If 518 /// we have to disable SROA for one of the allocas, this tells us how much 519 /// cost must be added. 520 DenseMap<AllocaInst *, int> SROAArgCosts; 521 522 /// Return true if \p Call is a cold callsite. 523 bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI); 524 525 /// Update Threshold based on callsite properties such as callee 526 /// attributes and callee hotness for PGO builds. The Callee is explicitly 527 /// passed to support analyzing indirect calls whose target is inferred by 528 /// analysis. 529 void updateThreshold(CallBase &Call, Function &Callee); 530 /// Return a higher threshold if \p Call is a hot callsite. 531 Optional<int> getHotCallSiteThreshold(CallBase &Call, 532 BlockFrequencyInfo *CallerBFI); 533 534 /// Handle a capped 'int' increment for Cost. 535 void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) { 536 assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound"); 537 Cost = (int)std::min(UpperBound, Cost + Inc); 538 } 539 540 void onDisableSROA(AllocaInst *Arg) override { 541 auto CostIt = SROAArgCosts.find(Arg); 542 if (CostIt == SROAArgCosts.end()) 543 return; 544 addCost(CostIt->second); 545 SROACostSavings -= CostIt->second; 546 SROACostSavingsLost += CostIt->second; 547 SROAArgCosts.erase(CostIt); 548 } 549 550 void onDisableLoadElimination() override { 551 addCost(LoadEliminationCost); 552 LoadEliminationCost = 0; 553 } 554 void onCallPenalty() override { addCost(InlineConstants::CallPenalty); } 555 void onCallArgumentSetup(const CallBase &Call) override { 556 // Pay the price of the argument setup. We account for the average 1 557 // instruction per call argument setup here. 558 addCost(Call.arg_size() * InlineConstants::InstrCost); 559 } 560 void onLoadRelativeIntrinsic() override { 561 // This is normally lowered to 4 LLVM instructions. 562 addCost(3 * InlineConstants::InstrCost); 563 } 564 void onLoweredCall(Function *F, CallBase &Call, 565 bool IsIndirectCall) override { 566 // We account for the average 1 instruction per call argument setup here. 567 addCost(Call.arg_size() * InlineConstants::InstrCost); 568 569 // If we have a constant that we are calling as a function, we can peer 570 // through it and see the function target. This happens not infrequently 571 // during devirtualization and so we want to give it a hefty bonus for 572 // inlining, but cap that bonus in the event that inlining wouldn't pan out. 573 // Pretend to inline the function, with a custom threshold. 574 if (IsIndirectCall && BoostIndirectCalls) { 575 auto IndirectCallParams = Params; 576 IndirectCallParams.DefaultThreshold = 577 InlineConstants::IndirectCallThreshold; 578 /// FIXME: if InlineCostCallAnalyzer is derived from, this may need 579 /// to instantiate the derived class. 580 InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI, 581 GetAssumptionCache, GetBFI, PSI, ORE, false); 582 if (CA.analyze().isSuccess()) { 583 // We were able to inline the indirect call! Subtract the cost from the 584 // threshold to get the bonus we want to apply, but don't go below zero. 585 Cost -= std::max(0, CA.getThreshold() - CA.getCost()); 586 } 587 } else 588 // Otherwise simply add the cost for merely making the call. 589 addCost(InlineConstants::CallPenalty); 590 } 591 592 void onFinalizeSwitch(unsigned JumpTableSize, 593 unsigned NumCaseCluster) override { 594 // If suitable for a jump table, consider the cost for the table size and 595 // branch to destination. 596 // Maximum valid cost increased in this function. 597 if (JumpTableSize) { 598 int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost + 599 4 * InlineConstants::InstrCost; 600 601 addCost(JTCost, (int64_t)CostUpperBound); 602 return; 603 } 604 605 if (NumCaseCluster <= 3) { 606 // Suppose a comparison includes one compare and one conditional branch. 607 addCost(NumCaseCluster * 2 * InlineConstants::InstrCost); 608 return; 609 } 610 611 int64_t ExpectedNumberOfCompare = 612 getExpectedNumberOfCompare(NumCaseCluster); 613 int64_t SwitchCost = 614 ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost; 615 616 addCost(SwitchCost, (int64_t)CostUpperBound); 617 } 618 void onMissedSimplification() override { 619 addCost(InlineConstants::InstrCost); 620 } 621 622 void onInitializeSROAArg(AllocaInst *Arg) override { 623 assert(Arg != nullptr && 624 "Should not initialize SROA costs for null value."); 625 SROAArgCosts[Arg] = 0; 626 } 627 628 void onAggregateSROAUse(AllocaInst *SROAArg) override { 629 auto CostIt = SROAArgCosts.find(SROAArg); 630 assert(CostIt != SROAArgCosts.end() && 631 "expected this argument to have a cost"); 632 CostIt->second += InlineConstants::InstrCost; 633 SROACostSavings += InlineConstants::InstrCost; 634 } 635 636 void onBlockStart(const BasicBlock *BB) override { CostAtBBStart = Cost; } 637 638 void onBlockAnalyzed(const BasicBlock *BB) override { 639 if (CostBenefitAnalysisEnabled) { 640 // Keep track of the static size of live but cold basic blocks. For now, 641 // we define a cold basic block to be one that's never executed. 642 assert(GetBFI && "GetBFI must be available"); 643 BlockFrequencyInfo *BFI = &(GetBFI(F)); 644 assert(BFI && "BFI must be available"); 645 auto ProfileCount = BFI->getBlockProfileCount(BB); 646 assert(ProfileCount.hasValue()); 647 if (ProfileCount.getValue() == 0) 648 ColdSize += Cost - CostAtBBStart; 649 } 650 651 auto *TI = BB->getTerminator(); 652 // If we had any successors at this point, than post-inlining is likely to 653 // have them as well. Note that we assume any basic blocks which existed 654 // due to branches or switches which folded above will also fold after 655 // inlining. 656 if (SingleBB && TI->getNumSuccessors() > 1) { 657 // Take off the bonus we applied to the threshold. 658 Threshold -= SingleBBBonus; 659 SingleBB = false; 660 } 661 } 662 663 void onInstructionAnalysisStart(const Instruction *I) override { 664 // This function is called to store the initial cost of inlining before 665 // the given instruction was assessed. 666 if (!PrintInstructionComments) 667 return; 668 InstructionCostDetailMap[I].CostBefore = Cost; 669 InstructionCostDetailMap[I].ThresholdBefore = Threshold; 670 } 671 672 void onInstructionAnalysisFinish(const Instruction *I) override { 673 // This function is called to find new values of cost and threshold after 674 // the instruction has been assessed. 675 if (!PrintInstructionComments) 676 return; 677 InstructionCostDetailMap[I].CostAfter = Cost; 678 InstructionCostDetailMap[I].ThresholdAfter = Threshold; 679 } 680 681 bool isCostBenefitAnalysisEnabled() { 682 if (!PSI || !PSI->hasProfileSummary()) 683 return false; 684 685 if (!GetBFI) 686 return false; 687 688 if (InlineEnableCostBenefitAnalysis.getNumOccurrences()) { 689 // Honor the explicit request from the user. 690 if (!InlineEnableCostBenefitAnalysis) 691 return false; 692 } else { 693 // Otherwise, require instrumentation profile. 694 if (!PSI->hasInstrumentationProfile()) 695 return false; 696 } 697 698 auto *Caller = CandidateCall.getParent()->getParent(); 699 if (!Caller->getEntryCount()) 700 return false; 701 702 BlockFrequencyInfo *CallerBFI = &(GetBFI(*Caller)); 703 if (!CallerBFI) 704 return false; 705 706 // For now, limit to hot call site. 707 if (!PSI->isHotCallSite(CandidateCall, CallerBFI)) 708 return false; 709 710 // Make sure we have a nonzero entry count. 711 auto EntryCount = F.getEntryCount(); 712 if (!EntryCount || !EntryCount.getCount()) 713 return false; 714 715 BlockFrequencyInfo *CalleeBFI = &(GetBFI(F)); 716 if (!CalleeBFI) 717 return false; 718 719 return true; 720 } 721 722 // Determine whether we should inline the given call site, taking into account 723 // both the size cost and the cycle savings. Return None if we don't have 724 // suficient profiling information to determine. 725 Optional<bool> costBenefitAnalysis() { 726 if (!CostBenefitAnalysisEnabled) 727 return None; 728 729 // buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0 730 // for the prelink phase of the AutoFDO + ThinLTO build. Honor the logic by 731 // falling back to the cost-based metric. 732 // TODO: Improve this hacky condition. 733 if (Threshold == 0) 734 return None; 735 736 assert(GetBFI); 737 BlockFrequencyInfo *CalleeBFI = &(GetBFI(F)); 738 assert(CalleeBFI); 739 740 // The cycle savings expressed as the sum of InlineConstants::InstrCost 741 // multiplied by the estimated dynamic count of each instruction we can 742 // avoid. Savings come from the call site cost, such as argument setup and 743 // the call instruction, as well as the instructions that are folded. 744 // 745 // We use 128-bit APInt here to avoid potential overflow. This variable 746 // should stay well below 10^^24 (or 2^^80) in practice. This "worst" case 747 // assumes that we can avoid or fold a billion instructions, each with a 748 // profile count of 10^^15 -- roughly the number of cycles for a 24-hour 749 // period on a 4GHz machine. 750 APInt CycleSavings(128, 0); 751 752 for (auto &BB : F) { 753 APInt CurrentSavings(128, 0); 754 for (auto &I : BB) { 755 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) { 756 // Count a conditional branch as savings if it becomes unconditional. 757 if (BI->isConditional() && 758 dyn_cast_or_null<ConstantInt>( 759 SimplifiedValues.lookup(BI->getCondition()))) { 760 CurrentSavings += InlineConstants::InstrCost; 761 } 762 } else if (Value *V = dyn_cast<Value>(&I)) { 763 // Count an instruction as savings if we can fold it. 764 if (SimplifiedValues.count(V)) { 765 CurrentSavings += InlineConstants::InstrCost; 766 } 767 } 768 } 769 770 auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB); 771 assert(ProfileCount.hasValue()); 772 CurrentSavings *= ProfileCount.getValue(); 773 CycleSavings += CurrentSavings; 774 } 775 776 // Compute the cycle savings per call. 777 auto EntryProfileCount = F.getEntryCount(); 778 assert(EntryProfileCount.hasValue() && EntryProfileCount.getCount()); 779 auto EntryCount = EntryProfileCount.getCount(); 780 CycleSavings += EntryCount / 2; 781 CycleSavings = CycleSavings.udiv(EntryCount); 782 783 // Compute the total savings for the call site. 784 auto *CallerBB = CandidateCall.getParent(); 785 BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent()))); 786 CycleSavings += getCallsiteCost(this->CandidateCall, DL); 787 CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue(); 788 789 // Remove the cost of the cold basic blocks. 790 int Size = Cost - ColdSize; 791 792 // Allow tiny callees to be inlined regardless of whether they meet the 793 // savings threshold. 794 Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1; 795 796 // Return true if the savings justify the cost of inlining. Specifically, 797 // we evaluate the following inequality: 798 // 799 // CycleSavings PSI->getOrCompHotCountThreshold() 800 // -------------- >= ----------------------------------- 801 // Size InlineSavingsMultiplier 802 // 803 // Note that the left hand side is specific to a call site. The right hand 804 // side is a constant for the entire executable. 805 APInt LHS = CycleSavings; 806 LHS *= InlineSavingsMultiplier; 807 APInt RHS(128, PSI->getOrCompHotCountThreshold()); 808 RHS *= Size; 809 return LHS.uge(RHS); 810 } 811 812 InlineResult finalizeAnalysis() override { 813 // Loops generally act a lot like calls in that they act like barriers to 814 // movement, require a certain amount of setup, etc. So when optimising for 815 // size, we penalise any call sites that perform loops. We do this after all 816 // other costs here, so will likely only be dealing with relatively small 817 // functions (and hence DT and LI will hopefully be cheap). 818 auto *Caller = CandidateCall.getFunction(); 819 if (Caller->hasMinSize()) { 820 DominatorTree DT(F); 821 LoopInfo LI(DT); 822 int NumLoops = 0; 823 for (Loop *L : LI) { 824 // Ignore loops that will not be executed 825 if (DeadBlocks.count(L->getHeader())) 826 continue; 827 NumLoops++; 828 } 829 addCost(NumLoops * InlineConstants::CallPenalty); 830 } 831 832 // We applied the maximum possible vector bonus at the beginning. Now, 833 // subtract the excess bonus, if any, from the Threshold before 834 // comparing against Cost. 835 if (NumVectorInstructions <= NumInstructions / 10) 836 Threshold -= VectorBonus; 837 else if (NumVectorInstructions <= NumInstructions / 2) 838 Threshold -= VectorBonus / 2; 839 840 if (auto Result = costBenefitAnalysis()) { 841 DecidedByCostBenefit = true; 842 if (Result.getValue()) 843 return InlineResult::success(); 844 else 845 return InlineResult::failure("Cost over threshold."); 846 } 847 848 if (IgnoreThreshold || Cost < std::max(1, Threshold)) 849 return InlineResult::success(); 850 return InlineResult::failure("Cost over threshold."); 851 } 852 bool shouldStop() override { 853 // Bail out the moment we cross the threshold. This means we'll under-count 854 // the cost, but only when undercounting doesn't matter. 855 return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost; 856 } 857 858 void onLoadEliminationOpportunity() override { 859 LoadEliminationCost += InlineConstants::InstrCost; 860 } 861 862 InlineResult onAnalysisStart() override { 863 // Perform some tweaks to the cost and threshold based on the direct 864 // callsite information. 865 866 // We want to more aggressively inline vector-dense kernels, so up the 867 // threshold, and we'll lower it if the % of vector instructions gets too 868 // low. Note that these bonuses are some what arbitrary and evolved over 869 // time by accident as much as because they are principled bonuses. 870 // 871 // FIXME: It would be nice to remove all such bonuses. At least it would be 872 // nice to base the bonus values on something more scientific. 873 assert(NumInstructions == 0); 874 assert(NumVectorInstructions == 0); 875 876 // Update the threshold based on callsite properties 877 updateThreshold(CandidateCall, F); 878 879 // While Threshold depends on commandline options that can take negative 880 // values, we want to enforce the invariant that the computed threshold and 881 // bonuses are non-negative. 882 assert(Threshold >= 0); 883 assert(SingleBBBonus >= 0); 884 assert(VectorBonus >= 0); 885 886 // Speculatively apply all possible bonuses to Threshold. If cost exceeds 887 // this Threshold any time, and cost cannot decrease, we can stop processing 888 // the rest of the function body. 889 Threshold += (SingleBBBonus + VectorBonus); 890 891 // Give out bonuses for the callsite, as the instructions setting them up 892 // will be gone after inlining. 893 addCost(-getCallsiteCost(this->CandidateCall, DL)); 894 895 // If this function uses the coldcc calling convention, prefer not to inline 896 // it. 897 if (F.getCallingConv() == CallingConv::Cold) 898 Cost += InlineConstants::ColdccPenalty; 899 900 // Check if we're done. This can happen due to bonuses and penalties. 901 if (Cost >= Threshold && !ComputeFullInlineCost) 902 return InlineResult::failure("high cost"); 903 904 return InlineResult::success(); 905 } 906 907 public: 908 InlineCostCallAnalyzer( 909 Function &Callee, CallBase &Call, const InlineParams &Params, 910 const TargetTransformInfo &TTI, 911 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 912 function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr, 913 ProfileSummaryInfo *PSI = nullptr, 914 OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true, 915 bool IgnoreThreshold = false) 916 : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE), 917 ComputeFullInlineCost(OptComputeFullInlineCost || 918 Params.ComputeFullInlineCost || ORE || 919 isCostBenefitAnalysisEnabled()), 920 Params(Params), Threshold(Params.DefaultThreshold), 921 BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold), 922 CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()), 923 Writer(this) {} 924 925 /// Annotation Writer for instruction details 926 InlineCostAnnotationWriter Writer; 927 928 void dump(); 929 930 // Prints the same analysis as dump(), but its definition is not dependent 931 // on the build. 932 void print(); 933 934 Optional<InstructionCostDetail> getCostDetails(const Instruction *I) { 935 if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end()) 936 return InstructionCostDetailMap[I]; 937 return None; 938 } 939 940 virtual ~InlineCostCallAnalyzer() {} 941 int getThreshold() { return Threshold; } 942 int getCost() { return Cost; } 943 bool wasDecidedByCostBenefit() { return DecidedByCostBenefit; } 944 }; 945 946 class InlineCostFeaturesAnalyzer final : public CallAnalyzer { 947 private: 948 InlineCostFeatures Cost = {}; 949 950 // FIXME: These constants are taken from the heuristic-based cost visitor. 951 // These should be removed entirely in a later revision to avoid reliance on 952 // heuristics in the ML inliner. 953 static constexpr int JTCostMultiplier = 4; 954 static constexpr int CaseClusterCostMultiplier = 2; 955 static constexpr int SwitchCostMultiplier = 2; 956 957 // FIXME: These are taken from the heuristic-based cost visitor: we should 958 // eventually abstract these to the CallAnalyzer to avoid duplication. 959 unsigned SROACostSavingOpportunities = 0; 960 int VectorBonus = 0; 961 int SingleBBBonus = 0; 962 int Threshold = 5; 963 964 DenseMap<AllocaInst *, unsigned> SROACosts; 965 966 void increment(InlineCostFeatureIndex Feature, int64_t Delta = 1) { 967 Cost[static_cast<size_t>(Feature)] += Delta; 968 } 969 970 void set(InlineCostFeatureIndex Feature, int64_t Value) { 971 Cost[static_cast<size_t>(Feature)] = Value; 972 } 973 974 void onDisableSROA(AllocaInst *Arg) override { 975 auto CostIt = SROACosts.find(Arg); 976 if (CostIt == SROACosts.end()) 977 return; 978 979 increment(InlineCostFeatureIndex::SROALosses, CostIt->second); 980 SROACostSavingOpportunities -= CostIt->second; 981 SROACosts.erase(CostIt); 982 } 983 984 void onDisableLoadElimination() override { 985 set(InlineCostFeatureIndex::LoadElimination, 1); 986 } 987 988 void onCallPenalty() override { 989 increment(InlineCostFeatureIndex::CallPenalty, 990 InlineConstants::CallPenalty); 991 } 992 993 void onCallArgumentSetup(const CallBase &Call) override { 994 increment(InlineCostFeatureIndex::CallArgumentSetup, 995 Call.arg_size() * InlineConstants::InstrCost); 996 } 997 998 void onLoadRelativeIntrinsic() override { 999 increment(InlineCostFeatureIndex::LoadRelativeIntrinsic, 1000 3 * InlineConstants::InstrCost); 1001 } 1002 1003 void onLoweredCall(Function *F, CallBase &Call, 1004 bool IsIndirectCall) override { 1005 increment(InlineCostFeatureIndex::LoweredCallArgSetup, 1006 Call.arg_size() * InlineConstants::InstrCost); 1007 1008 if (IsIndirectCall) { 1009 InlineParams IndirectCallParams = {/* DefaultThreshold*/ 0, 1010 /*HintThreshold*/ {}, 1011 /*ColdThreshold*/ {}, 1012 /*OptSizeThreshold*/ {}, 1013 /*OptMinSizeThreshold*/ {}, 1014 /*HotCallSiteThreshold*/ {}, 1015 /*LocallyHotCallSiteThreshold*/ {}, 1016 /*ColdCallSiteThreshold*/ {}, 1017 /*ComputeFullInlineCost*/ true, 1018 /*EnableDeferral*/ true}; 1019 IndirectCallParams.DefaultThreshold = 1020 InlineConstants::IndirectCallThreshold; 1021 1022 InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI, 1023 GetAssumptionCache, GetBFI, PSI, ORE, false, 1024 true); 1025 if (CA.analyze().isSuccess()) { 1026 increment(InlineCostFeatureIndex::NestedInlineCostEstimate, 1027 CA.getCost()); 1028 increment(InlineCostFeatureIndex::NestedInlines, 1); 1029 } 1030 } else { 1031 onCallPenalty(); 1032 } 1033 } 1034 1035 void onFinalizeSwitch(unsigned JumpTableSize, 1036 unsigned NumCaseCluster) override { 1037 1038 if (JumpTableSize) { 1039 int64_t JTCost = 1040 static_cast<int64_t>(JumpTableSize) * InlineConstants::InstrCost + 1041 JTCostMultiplier * InlineConstants::InstrCost; 1042 increment(InlineCostFeatureIndex::JumpTablePenalty, JTCost); 1043 return; 1044 } 1045 1046 if (NumCaseCluster <= 3) { 1047 increment(InlineCostFeatureIndex::CaseClusterPenalty, 1048 NumCaseCluster * CaseClusterCostMultiplier * 1049 InlineConstants::InstrCost); 1050 return; 1051 } 1052 1053 int64_t ExpectedNumberOfCompare = 1054 getExpectedNumberOfCompare(NumCaseCluster); 1055 1056 int64_t SwitchCost = ExpectedNumberOfCompare * SwitchCostMultiplier * 1057 InlineConstants::InstrCost; 1058 increment(InlineCostFeatureIndex::SwitchPenalty, SwitchCost); 1059 } 1060 1061 void onMissedSimplification() override { 1062 increment(InlineCostFeatureIndex::UnsimplifiedCommonInstructions, 1063 InlineConstants::InstrCost); 1064 } 1065 1066 void onInitializeSROAArg(AllocaInst *Arg) override { SROACosts[Arg] = 0; } 1067 void onAggregateSROAUse(AllocaInst *Arg) override { 1068 SROACosts.find(Arg)->second += InlineConstants::InstrCost; 1069 SROACostSavingOpportunities += InlineConstants::InstrCost; 1070 } 1071 1072 void onBlockAnalyzed(const BasicBlock *BB) override { 1073 if (BB->getTerminator()->getNumSuccessors() > 1) 1074 set(InlineCostFeatureIndex::IsMultipleBlocks, 1); 1075 Threshold -= SingleBBBonus; 1076 } 1077 1078 InlineResult finalizeAnalysis() override { 1079 auto *Caller = CandidateCall.getFunction(); 1080 if (Caller->hasMinSize()) { 1081 DominatorTree DT(F); 1082 LoopInfo LI(DT); 1083 for (Loop *L : LI) { 1084 // Ignore loops that will not be executed 1085 if (DeadBlocks.count(L->getHeader())) 1086 continue; 1087 increment(InlineCostFeatureIndex::NumLoops, 1088 InlineConstants::CallPenalty); 1089 } 1090 } 1091 set(InlineCostFeatureIndex::DeadBlocks, DeadBlocks.size()); 1092 set(InlineCostFeatureIndex::SimplifiedInstructions, 1093 NumInstructionsSimplified); 1094 set(InlineCostFeatureIndex::ConstantArgs, NumConstantArgs); 1095 set(InlineCostFeatureIndex::ConstantOffsetPtrArgs, 1096 NumConstantOffsetPtrArgs); 1097 set(InlineCostFeatureIndex::SROASavings, SROACostSavingOpportunities); 1098 1099 if (NumVectorInstructions <= NumInstructions / 10) 1100 increment(InlineCostFeatureIndex::Threshold, -1 * VectorBonus); 1101 else if (NumVectorInstructions <= NumInstructions / 2) 1102 increment(InlineCostFeatureIndex::Threshold, -1 * (VectorBonus / 2)); 1103 1104 set(InlineCostFeatureIndex::Threshold, Threshold); 1105 1106 return InlineResult::success(); 1107 } 1108 1109 bool shouldStop() override { return false; } 1110 1111 void onLoadEliminationOpportunity() override { 1112 increment(InlineCostFeatureIndex::LoadElimination, 1); 1113 } 1114 1115 InlineResult onAnalysisStart() override { 1116 increment(InlineCostFeatureIndex::CallSiteCost, 1117 -1 * getCallsiteCost(this->CandidateCall, DL)); 1118 1119 set(InlineCostFeatureIndex::ColdCcPenalty, 1120 (F.getCallingConv() == CallingConv::Cold)); 1121 1122 // FIXME: we shouldn't repeat this logic in both the Features and Cost 1123 // analyzer - instead, we should abstract it to a common method in the 1124 // CallAnalyzer 1125 int SingleBBBonusPercent = 50; 1126 int VectorBonusPercent = TTI.getInlinerVectorBonusPercent(); 1127 Threshold += TTI.adjustInliningThreshold(&CandidateCall); 1128 Threshold *= TTI.getInliningThresholdMultiplier(); 1129 SingleBBBonus = Threshold * SingleBBBonusPercent / 100; 1130 VectorBonus = Threshold * VectorBonusPercent / 100; 1131 Threshold += (SingleBBBonus + VectorBonus); 1132 1133 return InlineResult::success(); 1134 } 1135 1136 public: 1137 InlineCostFeaturesAnalyzer( 1138 const TargetTransformInfo &TTI, 1139 function_ref<AssumptionCache &(Function &)> &GetAssumptionCache, 1140 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 1141 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE, Function &Callee, 1142 CallBase &Call) 1143 : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI) {} 1144 1145 const InlineCostFeatures &features() const { return Cost; } 1146 }; 1147 1148 } // namespace 1149 1150 /// Test whether the given value is an Alloca-derived function argument. 1151 bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 1152 return SROAArgValues.count(V); 1153 } 1154 1155 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) { 1156 onDisableSROA(SROAArg); 1157 EnabledSROAAllocas.erase(SROAArg); 1158 disableLoadElimination(); 1159 } 1160 1161 void InlineCostAnnotationWriter::emitInstructionAnnot( 1162 const Instruction *I, formatted_raw_ostream &OS) { 1163 // The cost of inlining of the given instruction is printed always. 1164 // The threshold delta is printed only when it is non-zero. It happens 1165 // when we decided to give a bonus at a particular instruction. 1166 Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I); 1167 if (!Record) 1168 OS << "; No analysis for the instruction"; 1169 else { 1170 OS << "; cost before = " << Record->CostBefore 1171 << ", cost after = " << Record->CostAfter 1172 << ", threshold before = " << Record->ThresholdBefore 1173 << ", threshold after = " << Record->ThresholdAfter << ", "; 1174 OS << "cost delta = " << Record->getCostDelta(); 1175 if (Record->hasThresholdChanged()) 1176 OS << ", threshold delta = " << Record->getThresholdDelta(); 1177 } 1178 auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I)); 1179 if (C) { 1180 OS << ", simplified to "; 1181 C.getValue()->print(OS, true); 1182 } 1183 OS << "\n"; 1184 } 1185 1186 /// If 'V' maps to a SROA candidate, disable SROA for it. 1187 void CallAnalyzer::disableSROA(Value *V) { 1188 if (auto *SROAArg = getSROAArgForValueOrNull(V)) { 1189 disableSROAForArg(SROAArg); 1190 } 1191 } 1192 1193 void CallAnalyzer::disableLoadElimination() { 1194 if (EnableLoadElimination) { 1195 onDisableLoadElimination(); 1196 EnableLoadElimination = false; 1197 } 1198 } 1199 1200 /// Accumulate a constant GEP offset into an APInt if possible. 1201 /// 1202 /// Returns false if unable to compute the offset for any reason. Respects any 1203 /// simplified values known during the analysis of this callsite. 1204 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 1205 unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType()); 1206 assert(IntPtrWidth == Offset.getBitWidth()); 1207 1208 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1209 GTI != GTE; ++GTI) { 1210 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 1211 if (!OpC) 1212 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 1213 OpC = dyn_cast<ConstantInt>(SimpleOp); 1214 if (!OpC) 1215 return false; 1216 if (OpC->isZero()) 1217 continue; 1218 1219 // Handle a struct index, which adds its field offset to the pointer. 1220 if (StructType *STy = GTI.getStructTypeOrNull()) { 1221 unsigned ElementIdx = OpC->getZExtValue(); 1222 const StructLayout *SL = DL.getStructLayout(STy); 1223 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 1224 continue; 1225 } 1226 1227 APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType())); 1228 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 1229 } 1230 return true; 1231 } 1232 1233 /// Use TTI to check whether a GEP is free. 1234 /// 1235 /// Respects any simplified values known during the analysis of this callsite. 1236 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { 1237 SmallVector<Value *, 4> Operands; 1238 Operands.push_back(GEP.getOperand(0)); 1239 for (const Use &Op : GEP.indices()) 1240 if (Constant *SimpleOp = SimplifiedValues.lookup(Op)) 1241 Operands.push_back(SimpleOp); 1242 else 1243 Operands.push_back(Op); 1244 return TTI.getUserCost(&GEP, Operands, 1245 TargetTransformInfo::TCK_SizeAndLatency) == 1246 TargetTransformInfo::TCC_Free; 1247 } 1248 1249 bool CallAnalyzer::visitAlloca(AllocaInst &I) { 1250 disableSROA(I.getOperand(0)); 1251 1252 // Check whether inlining will turn a dynamic alloca into a static 1253 // alloca and handle that case. 1254 if (I.isArrayAllocation()) { 1255 Constant *Size = SimplifiedValues.lookup(I.getArraySize()); 1256 if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) { 1257 // Sometimes a dynamic alloca could be converted into a static alloca 1258 // after this constant prop, and become a huge static alloca on an 1259 // unconditional CFG path. Avoid inlining if this is going to happen above 1260 // a threshold. 1261 // FIXME: If the threshold is removed or lowered too much, we could end up 1262 // being too pessimistic and prevent inlining non-problematic code. This 1263 // could result in unintended perf regressions. A better overall strategy 1264 // is needed to track stack usage during inlining. 1265 Type *Ty = I.getAllocatedType(); 1266 AllocatedSize = SaturatingMultiplyAdd( 1267 AllocSize->getLimitedValue(), 1268 DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize); 1269 if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) 1270 HasDynamicAlloca = true; 1271 return false; 1272 } 1273 } 1274 1275 // Accumulate the allocated size. 1276 if (I.isStaticAlloca()) { 1277 Type *Ty = I.getAllocatedType(); 1278 AllocatedSize = 1279 SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize); 1280 } 1281 1282 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 1283 // a variety of reasons, and so we would like to not inline them into 1284 // functions which don't currently have a dynamic alloca. This simply 1285 // disables inlining altogether in the presence of a dynamic alloca. 1286 if (!I.isStaticAlloca()) 1287 HasDynamicAlloca = true; 1288 1289 return false; 1290 } 1291 1292 bool CallAnalyzer::visitPHI(PHINode &I) { 1293 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 1294 // though we don't want to propagate it's bonuses. The idea is to disable 1295 // SROA if it *might* be used in an inappropriate manner. 1296 1297 // Phi nodes are always zero-cost. 1298 // FIXME: Pointer sizes may differ between different address spaces, so do we 1299 // need to use correct address space in the call to getPointerSizeInBits here? 1300 // Or could we skip the getPointerSizeInBits call completely? As far as I can 1301 // see the ZeroOffset is used as a dummy value, so we can probably use any 1302 // bit width for the ZeroOffset? 1303 APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0)); 1304 bool CheckSROA = I.getType()->isPointerTy(); 1305 1306 // Track the constant or pointer with constant offset we've seen so far. 1307 Constant *FirstC = nullptr; 1308 std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset}; 1309 Value *FirstV = nullptr; 1310 1311 for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) { 1312 BasicBlock *Pred = I.getIncomingBlock(i); 1313 // If the incoming block is dead, skip the incoming block. 1314 if (DeadBlocks.count(Pred)) 1315 continue; 1316 // If the parent block of phi is not the known successor of the incoming 1317 // block, skip the incoming block. 1318 BasicBlock *KnownSuccessor = KnownSuccessors[Pred]; 1319 if (KnownSuccessor && KnownSuccessor != I.getParent()) 1320 continue; 1321 1322 Value *V = I.getIncomingValue(i); 1323 // If the incoming value is this phi itself, skip the incoming value. 1324 if (&I == V) 1325 continue; 1326 1327 Constant *C = dyn_cast<Constant>(V); 1328 if (!C) 1329 C = SimplifiedValues.lookup(V); 1330 1331 std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset}; 1332 if (!C && CheckSROA) 1333 BaseAndOffset = ConstantOffsetPtrs.lookup(V); 1334 1335 if (!C && !BaseAndOffset.first) 1336 // The incoming value is neither a constant nor a pointer with constant 1337 // offset, exit early. 1338 return true; 1339 1340 if (FirstC) { 1341 if (FirstC == C) 1342 // If we've seen a constant incoming value before and it is the same 1343 // constant we see this time, continue checking the next incoming value. 1344 continue; 1345 // Otherwise early exit because we either see a different constant or saw 1346 // a constant before but we have a pointer with constant offset this time. 1347 return true; 1348 } 1349 1350 if (FirstV) { 1351 // The same logic as above, but check pointer with constant offset here. 1352 if (FirstBaseAndOffset == BaseAndOffset) 1353 continue; 1354 return true; 1355 } 1356 1357 if (C) { 1358 // This is the 1st time we've seen a constant, record it. 1359 FirstC = C; 1360 continue; 1361 } 1362 1363 // The remaining case is that this is the 1st time we've seen a pointer with 1364 // constant offset, record it. 1365 FirstV = V; 1366 FirstBaseAndOffset = BaseAndOffset; 1367 } 1368 1369 // Check if we can map phi to a constant. 1370 if (FirstC) { 1371 SimplifiedValues[&I] = FirstC; 1372 return true; 1373 } 1374 1375 // Check if we can map phi to a pointer with constant offset. 1376 if (FirstBaseAndOffset.first) { 1377 ConstantOffsetPtrs[&I] = FirstBaseAndOffset; 1378 1379 if (auto *SROAArg = getSROAArgForValueOrNull(FirstV)) 1380 SROAArgValues[&I] = SROAArg; 1381 } 1382 1383 return true; 1384 } 1385 1386 /// Check we can fold GEPs of constant-offset call site argument pointers. 1387 /// This requires target data and inbounds GEPs. 1388 /// 1389 /// \return true if the specified GEP can be folded. 1390 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) { 1391 // Check if we have a base + offset for the pointer. 1392 std::pair<Value *, APInt> BaseAndOffset = 1393 ConstantOffsetPtrs.lookup(I.getPointerOperand()); 1394 if (!BaseAndOffset.first) 1395 return false; 1396 1397 // Check if the offset of this GEP is constant, and if so accumulate it 1398 // into Offset. 1399 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) 1400 return false; 1401 1402 // Add the result as a new mapping to Base + Offset. 1403 ConstantOffsetPtrs[&I] = BaseAndOffset; 1404 1405 return true; 1406 } 1407 1408 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 1409 auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand()); 1410 1411 // Lambda to check whether a GEP's indices are all constant. 1412 auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) { 1413 for (const Use &Op : GEP.indices()) 1414 if (!isa<Constant>(Op) && !SimplifiedValues.lookup(Op)) 1415 return false; 1416 return true; 1417 }; 1418 1419 if (!DisableGEPConstOperand) 1420 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1421 SmallVector<Constant *, 2> Indices; 1422 for (unsigned int Index = 1; Index < COps.size(); ++Index) 1423 Indices.push_back(COps[Index]); 1424 return ConstantExpr::getGetElementPtr( 1425 I.getSourceElementType(), COps[0], Indices, I.isInBounds()); 1426 })) 1427 return true; 1428 1429 if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) { 1430 if (SROAArg) 1431 SROAArgValues[&I] = SROAArg; 1432 1433 // Constant GEPs are modeled as free. 1434 return true; 1435 } 1436 1437 // Variable GEPs will require math and will disable SROA. 1438 if (SROAArg) 1439 disableSROAForArg(SROAArg); 1440 return isGEPFree(I); 1441 } 1442 1443 /// Simplify \p I if its operands are constants and update SimplifiedValues. 1444 /// \p Evaluate is a callable specific to instruction type that evaluates the 1445 /// instruction when all the operands are constants. 1446 template <typename Callable> 1447 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) { 1448 SmallVector<Constant *, 2> COps; 1449 for (Value *Op : I.operands()) { 1450 Constant *COp = dyn_cast<Constant>(Op); 1451 if (!COp) 1452 COp = SimplifiedValues.lookup(Op); 1453 if (!COp) 1454 return false; 1455 COps.push_back(COp); 1456 } 1457 auto *C = Evaluate(COps); 1458 if (!C) 1459 return false; 1460 SimplifiedValues[&I] = C; 1461 return true; 1462 } 1463 1464 bool CallAnalyzer::visitBitCast(BitCastInst &I) { 1465 // Propagate constants through bitcasts. 1466 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1467 return ConstantExpr::getBitCast(COps[0], I.getType()); 1468 })) 1469 return true; 1470 1471 // Track base/offsets through casts 1472 std::pair<Value *, APInt> BaseAndOffset = 1473 ConstantOffsetPtrs.lookup(I.getOperand(0)); 1474 // Casts don't change the offset, just wrap it up. 1475 if (BaseAndOffset.first) 1476 ConstantOffsetPtrs[&I] = BaseAndOffset; 1477 1478 // Also look for SROA candidates here. 1479 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0))) 1480 SROAArgValues[&I] = SROAArg; 1481 1482 // Bitcasts are always zero cost. 1483 return true; 1484 } 1485 1486 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 1487 // Propagate constants through ptrtoint. 1488 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1489 return ConstantExpr::getPtrToInt(COps[0], I.getType()); 1490 })) 1491 return true; 1492 1493 // Track base/offset pairs when converted to a plain integer provided the 1494 // integer is large enough to represent the pointer. 1495 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 1496 unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace(); 1497 if (IntegerSize == DL.getPointerSizeInBits(AS)) { 1498 std::pair<Value *, APInt> BaseAndOffset = 1499 ConstantOffsetPtrs.lookup(I.getOperand(0)); 1500 if (BaseAndOffset.first) 1501 ConstantOffsetPtrs[&I] = BaseAndOffset; 1502 } 1503 1504 // This is really weird. Technically, ptrtoint will disable SROA. However, 1505 // unless that ptrtoint is *used* somewhere in the live basic blocks after 1506 // inlining, it will be nuked, and SROA should proceed. All of the uses which 1507 // would block SROA would also block SROA if applied directly to a pointer, 1508 // and so we can just add the integer in here. The only places where SROA is 1509 // preserved either cannot fire on an integer, or won't in-and-of themselves 1510 // disable SROA (ext) w/o some later use that we would see and disable. 1511 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0))) 1512 SROAArgValues[&I] = SROAArg; 1513 1514 return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1515 TargetTransformInfo::TCC_Free; 1516 } 1517 1518 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 1519 // Propagate constants through ptrtoint. 1520 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1521 return ConstantExpr::getIntToPtr(COps[0], I.getType()); 1522 })) 1523 return true; 1524 1525 // Track base/offset pairs when round-tripped through a pointer without 1526 // modifications provided the integer is not too large. 1527 Value *Op = I.getOperand(0); 1528 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 1529 if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) { 1530 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 1531 if (BaseAndOffset.first) 1532 ConstantOffsetPtrs[&I] = BaseAndOffset; 1533 } 1534 1535 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 1536 if (auto *SROAArg = getSROAArgForValueOrNull(Op)) 1537 SROAArgValues[&I] = SROAArg; 1538 1539 return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1540 TargetTransformInfo::TCC_Free; 1541 } 1542 1543 bool CallAnalyzer::visitCastInst(CastInst &I) { 1544 // Propagate constants through casts. 1545 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1546 return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType()); 1547 })) 1548 return true; 1549 1550 // Disable SROA in the face of arbitrary casts we don't explicitly list 1551 // elsewhere. 1552 disableSROA(I.getOperand(0)); 1553 1554 // If this is a floating-point cast, and the target says this operation 1555 // is expensive, this may eventually become a library call. Treat the cost 1556 // as such. 1557 switch (I.getOpcode()) { 1558 case Instruction::FPTrunc: 1559 case Instruction::FPExt: 1560 case Instruction::UIToFP: 1561 case Instruction::SIToFP: 1562 case Instruction::FPToUI: 1563 case Instruction::FPToSI: 1564 if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive) 1565 onCallPenalty(); 1566 break; 1567 default: 1568 break; 1569 } 1570 1571 return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1572 TargetTransformInfo::TCC_Free; 1573 } 1574 1575 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) { 1576 return CandidateCall.paramHasAttr(A->getArgNo(), Attr); 1577 } 1578 1579 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) { 1580 // Does the *call site* have the NonNull attribute set on an argument? We 1581 // use the attribute on the call site to memoize any analysis done in the 1582 // caller. This will also trip if the callee function has a non-null 1583 // parameter attribute, but that's a less interesting case because hopefully 1584 // the callee would already have been simplified based on that. 1585 if (Argument *A = dyn_cast<Argument>(V)) 1586 if (paramHasAttr(A, Attribute::NonNull)) 1587 return true; 1588 1589 // Is this an alloca in the caller? This is distinct from the attribute case 1590 // above because attributes aren't updated within the inliner itself and we 1591 // always want to catch the alloca derived case. 1592 if (isAllocaDerivedArg(V)) 1593 // We can actually predict the result of comparisons between an 1594 // alloca-derived value and null. Note that this fires regardless of 1595 // SROA firing. 1596 return true; 1597 1598 return false; 1599 } 1600 1601 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) { 1602 // If the normal destination of the invoke or the parent block of the call 1603 // site is unreachable-terminated, there is little point in inlining this 1604 // unless there is literally zero cost. 1605 // FIXME: Note that it is possible that an unreachable-terminated block has a 1606 // hot entry. For example, in below scenario inlining hot_call_X() may be 1607 // beneficial : 1608 // main() { 1609 // hot_call_1(); 1610 // ... 1611 // hot_call_N() 1612 // exit(0); 1613 // } 1614 // For now, we are not handling this corner case here as it is rare in real 1615 // code. In future, we should elaborate this based on BPI and BFI in more 1616 // general threshold adjusting heuristics in updateThreshold(). 1617 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 1618 if (isa<UnreachableInst>(II->getNormalDest()->getTerminator())) 1619 return false; 1620 } else if (isa<UnreachableInst>(Call.getParent()->getTerminator())) 1621 return false; 1622 1623 return true; 1624 } 1625 1626 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call, 1627 BlockFrequencyInfo *CallerBFI) { 1628 // If global profile summary is available, then callsite's coldness is 1629 // determined based on that. 1630 if (PSI && PSI->hasProfileSummary()) 1631 return PSI->isColdCallSite(Call, CallerBFI); 1632 1633 // Otherwise we need BFI to be available. 1634 if (!CallerBFI) 1635 return false; 1636 1637 // Determine if the callsite is cold relative to caller's entry. We could 1638 // potentially cache the computation of scaled entry frequency, but the added 1639 // complexity is not worth it unless this scaling shows up high in the 1640 // profiles. 1641 const BranchProbability ColdProb(ColdCallSiteRelFreq, 100); 1642 auto CallSiteBB = Call.getParent(); 1643 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB); 1644 auto CallerEntryFreq = 1645 CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock())); 1646 return CallSiteFreq < CallerEntryFreq * ColdProb; 1647 } 1648 1649 Optional<int> 1650 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call, 1651 BlockFrequencyInfo *CallerBFI) { 1652 1653 // If global profile summary is available, then callsite's hotness is 1654 // determined based on that. 1655 if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI)) 1656 return Params.HotCallSiteThreshold; 1657 1658 // Otherwise we need BFI to be available and to have a locally hot callsite 1659 // threshold. 1660 if (!CallerBFI || !Params.LocallyHotCallSiteThreshold) 1661 return None; 1662 1663 // Determine if the callsite is hot relative to caller's entry. We could 1664 // potentially cache the computation of scaled entry frequency, but the added 1665 // complexity is not worth it unless this scaling shows up high in the 1666 // profiles. 1667 auto CallSiteBB = Call.getParent(); 1668 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency(); 1669 auto CallerEntryFreq = CallerBFI->getEntryFreq(); 1670 if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq) 1671 return Params.LocallyHotCallSiteThreshold; 1672 1673 // Otherwise treat it normally. 1674 return None; 1675 } 1676 1677 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) { 1678 // If no size growth is allowed for this inlining, set Threshold to 0. 1679 if (!allowSizeGrowth(Call)) { 1680 Threshold = 0; 1681 return; 1682 } 1683 1684 Function *Caller = Call.getCaller(); 1685 1686 // return min(A, B) if B is valid. 1687 auto MinIfValid = [](int A, Optional<int> B) { 1688 return B ? std::min(A, B.getValue()) : A; 1689 }; 1690 1691 // return max(A, B) if B is valid. 1692 auto MaxIfValid = [](int A, Optional<int> B) { 1693 return B ? std::max(A, B.getValue()) : A; 1694 }; 1695 1696 // Various bonus percentages. These are multiplied by Threshold to get the 1697 // bonus values. 1698 // SingleBBBonus: This bonus is applied if the callee has a single reachable 1699 // basic block at the given callsite context. This is speculatively applied 1700 // and withdrawn if more than one basic block is seen. 1701 // 1702 // LstCallToStaticBonus: This large bonus is applied to ensure the inlining 1703 // of the last call to a static function as inlining such functions is 1704 // guaranteed to reduce code size. 1705 // 1706 // These bonus percentages may be set to 0 based on properties of the caller 1707 // and the callsite. 1708 int SingleBBBonusPercent = 50; 1709 int VectorBonusPercent = TTI.getInlinerVectorBonusPercent(); 1710 int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus; 1711 1712 // Lambda to set all the above bonus and bonus percentages to 0. 1713 auto DisallowAllBonuses = [&]() { 1714 SingleBBBonusPercent = 0; 1715 VectorBonusPercent = 0; 1716 LastCallToStaticBonus = 0; 1717 }; 1718 1719 // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available 1720 // and reduce the threshold if the caller has the necessary attribute. 1721 if (Caller->hasMinSize()) { 1722 Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold); 1723 // For minsize, we want to disable the single BB bonus and the vector 1724 // bonuses, but not the last-call-to-static bonus. Inlining the last call to 1725 // a static function will, at the minimum, eliminate the parameter setup and 1726 // call/return instructions. 1727 SingleBBBonusPercent = 0; 1728 VectorBonusPercent = 0; 1729 } else if (Caller->hasOptSize()) 1730 Threshold = MinIfValid(Threshold, Params.OptSizeThreshold); 1731 1732 // Adjust the threshold based on inlinehint attribute and profile based 1733 // hotness information if the caller does not have MinSize attribute. 1734 if (!Caller->hasMinSize()) { 1735 if (Callee.hasFnAttribute(Attribute::InlineHint)) 1736 Threshold = MaxIfValid(Threshold, Params.HintThreshold); 1737 1738 // FIXME: After switching to the new passmanager, simplify the logic below 1739 // by checking only the callsite hotness/coldness as we will reliably 1740 // have local profile information. 1741 // 1742 // Callsite hotness and coldness can be determined if sample profile is 1743 // used (which adds hotness metadata to calls) or if caller's 1744 // BlockFrequencyInfo is available. 1745 BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr; 1746 auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI); 1747 if (!Caller->hasOptSize() && HotCallSiteThreshold) { 1748 LLVM_DEBUG(dbgs() << "Hot callsite.\n"); 1749 // FIXME: This should update the threshold only if it exceeds the 1750 // current threshold, but AutoFDO + ThinLTO currently relies on this 1751 // behavior to prevent inlining of hot callsites during ThinLTO 1752 // compile phase. 1753 Threshold = HotCallSiteThreshold.getValue(); 1754 } else if (isColdCallSite(Call, CallerBFI)) { 1755 LLVM_DEBUG(dbgs() << "Cold callsite.\n"); 1756 // Do not apply bonuses for a cold callsite including the 1757 // LastCallToStatic bonus. While this bonus might result in code size 1758 // reduction, it can cause the size of a non-cold caller to increase 1759 // preventing it from being inlined. 1760 DisallowAllBonuses(); 1761 Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold); 1762 } else if (PSI) { 1763 // Use callee's global profile information only if we have no way of 1764 // determining this via callsite information. 1765 if (PSI->isFunctionEntryHot(&Callee)) { 1766 LLVM_DEBUG(dbgs() << "Hot callee.\n"); 1767 // If callsite hotness can not be determined, we may still know 1768 // that the callee is hot and treat it as a weaker hint for threshold 1769 // increase. 1770 Threshold = MaxIfValid(Threshold, Params.HintThreshold); 1771 } else if (PSI->isFunctionEntryCold(&Callee)) { 1772 LLVM_DEBUG(dbgs() << "Cold callee.\n"); 1773 // Do not apply bonuses for a cold callee including the 1774 // LastCallToStatic bonus. While this bonus might result in code size 1775 // reduction, it can cause the size of a non-cold caller to increase 1776 // preventing it from being inlined. 1777 DisallowAllBonuses(); 1778 Threshold = MinIfValid(Threshold, Params.ColdThreshold); 1779 } 1780 } 1781 } 1782 1783 Threshold += TTI.adjustInliningThreshold(&Call); 1784 1785 // Finally, take the target-specific inlining threshold multiplier into 1786 // account. 1787 Threshold *= TTI.getInliningThresholdMultiplier(); 1788 1789 SingleBBBonus = Threshold * SingleBBBonusPercent / 100; 1790 VectorBonus = Threshold * VectorBonusPercent / 100; 1791 1792 bool OnlyOneCallAndLocalLinkage = 1793 F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction(); 1794 // If there is only one call of the function, and it has internal linkage, 1795 // the cost of inlining it drops dramatically. It may seem odd to update 1796 // Cost in updateThreshold, but the bonus depends on the logic in this method. 1797 if (OnlyOneCallAndLocalLinkage) 1798 Cost -= LastCallToStaticBonus; 1799 } 1800 1801 bool CallAnalyzer::visitCmpInst(CmpInst &I) { 1802 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1803 // First try to handle simplified comparisons. 1804 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1805 return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]); 1806 })) 1807 return true; 1808 1809 if (I.getOpcode() == Instruction::FCmp) 1810 return false; 1811 1812 // Otherwise look for a comparison between constant offset pointers with 1813 // a common base. 1814 Value *LHSBase, *RHSBase; 1815 APInt LHSOffset, RHSOffset; 1816 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 1817 if (LHSBase) { 1818 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 1819 if (RHSBase && LHSBase == RHSBase) { 1820 // We have common bases, fold the icmp to a constant based on the 1821 // offsets. 1822 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 1823 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 1824 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 1825 SimplifiedValues[&I] = C; 1826 ++NumConstantPtrCmps; 1827 return true; 1828 } 1829 } 1830 } 1831 1832 // If the comparison is an equality comparison with null, we can simplify it 1833 // if we know the value (argument) can't be null 1834 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) && 1835 isKnownNonNullInCallee(I.getOperand(0))) { 1836 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 1837 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 1838 : ConstantInt::getFalse(I.getType()); 1839 return true; 1840 } 1841 return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1))); 1842 } 1843 1844 bool CallAnalyzer::visitSub(BinaryOperator &I) { 1845 // Try to handle a special case: we can fold computing the difference of two 1846 // constant-related pointers. 1847 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1848 Value *LHSBase, *RHSBase; 1849 APInt LHSOffset, RHSOffset; 1850 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 1851 if (LHSBase) { 1852 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 1853 if (RHSBase && LHSBase == RHSBase) { 1854 // We have common bases, fold the subtract to a constant based on the 1855 // offsets. 1856 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 1857 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 1858 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 1859 SimplifiedValues[&I] = C; 1860 ++NumConstantPtrDiffs; 1861 return true; 1862 } 1863 } 1864 } 1865 1866 // Otherwise, fall back to the generic logic for simplifying and handling 1867 // instructions. 1868 return Base::visitSub(I); 1869 } 1870 1871 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 1872 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1873 Constant *CLHS = dyn_cast<Constant>(LHS); 1874 if (!CLHS) 1875 CLHS = SimplifiedValues.lookup(LHS); 1876 Constant *CRHS = dyn_cast<Constant>(RHS); 1877 if (!CRHS) 1878 CRHS = SimplifiedValues.lookup(RHS); 1879 1880 Value *SimpleV = nullptr; 1881 if (auto FI = dyn_cast<FPMathOperator>(&I)) 1882 SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, 1883 FI->getFastMathFlags(), DL); 1884 else 1885 SimpleV = 1886 SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL); 1887 1888 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) 1889 SimplifiedValues[&I] = C; 1890 1891 if (SimpleV) 1892 return true; 1893 1894 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 1895 disableSROA(LHS); 1896 disableSROA(RHS); 1897 1898 // If the instruction is floating point, and the target says this operation 1899 // is expensive, this may eventually become a library call. Treat the cost 1900 // as such. Unless it's fneg which can be implemented with an xor. 1901 using namespace llvm::PatternMatch; 1902 if (I.getType()->isFloatingPointTy() && 1903 TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive && 1904 !match(&I, m_FNeg(m_Value()))) 1905 onCallPenalty(); 1906 1907 return false; 1908 } 1909 1910 bool CallAnalyzer::visitFNeg(UnaryOperator &I) { 1911 Value *Op = I.getOperand(0); 1912 Constant *COp = dyn_cast<Constant>(Op); 1913 if (!COp) 1914 COp = SimplifiedValues.lookup(Op); 1915 1916 Value *SimpleV = SimplifyFNegInst( 1917 COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL); 1918 1919 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) 1920 SimplifiedValues[&I] = C; 1921 1922 if (SimpleV) 1923 return true; 1924 1925 // Disable any SROA on arguments to arbitrary, unsimplified fneg. 1926 disableSROA(Op); 1927 1928 return false; 1929 } 1930 1931 bool CallAnalyzer::visitLoad(LoadInst &I) { 1932 if (handleSROA(I.getPointerOperand(), I.isSimple())) 1933 return true; 1934 1935 // If the data is already loaded from this address and hasn't been clobbered 1936 // by any stores or calls, this load is likely to be redundant and can be 1937 // eliminated. 1938 if (EnableLoadElimination && 1939 !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) { 1940 onLoadEliminationOpportunity(); 1941 return true; 1942 } 1943 1944 return false; 1945 } 1946 1947 bool CallAnalyzer::visitStore(StoreInst &I) { 1948 if (handleSROA(I.getPointerOperand(), I.isSimple())) 1949 return true; 1950 1951 // The store can potentially clobber loads and prevent repeated loads from 1952 // being eliminated. 1953 // FIXME: 1954 // 1. We can probably keep an initial set of eliminatable loads substracted 1955 // from the cost even when we finally see a store. We just need to disable 1956 // *further* accumulation of elimination savings. 1957 // 2. We should probably at some point thread MemorySSA for the callee into 1958 // this and then use that to actually compute *really* precise savings. 1959 disableLoadElimination(); 1960 return false; 1961 } 1962 1963 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 1964 // Constant folding for extract value is trivial. 1965 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1966 return ConstantExpr::getExtractValue(COps[0], I.getIndices()); 1967 })) 1968 return true; 1969 1970 // SROA can't look through these, but they may be free. 1971 return Base::visitExtractValue(I); 1972 } 1973 1974 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 1975 // Constant folding for insert value is trivial. 1976 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1977 return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0], 1978 /*InsertedValueOperand*/ COps[1], 1979 I.getIndices()); 1980 })) 1981 return true; 1982 1983 // SROA can't look through these, but they may be free. 1984 return Base::visitInsertValue(I); 1985 } 1986 1987 /// Try to simplify a call site. 1988 /// 1989 /// Takes a concrete function and callsite and tries to actually simplify it by 1990 /// analyzing the arguments and call itself with instsimplify. Returns true if 1991 /// it has simplified the callsite to some other entity (a constant), making it 1992 /// free. 1993 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) { 1994 // FIXME: Using the instsimplify logic directly for this is inefficient 1995 // because we have to continually rebuild the argument list even when no 1996 // simplifications can be performed. Until that is fixed with remapping 1997 // inside of instsimplify, directly constant fold calls here. 1998 if (!canConstantFoldCallTo(&Call, F)) 1999 return false; 2000 2001 // Try to re-map the arguments to constants. 2002 SmallVector<Constant *, 4> ConstantArgs; 2003 ConstantArgs.reserve(Call.arg_size()); 2004 for (Value *I : Call.args()) { 2005 Constant *C = dyn_cast<Constant>(I); 2006 if (!C) 2007 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I)); 2008 if (!C) 2009 return false; // This argument doesn't map to a constant. 2010 2011 ConstantArgs.push_back(C); 2012 } 2013 if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) { 2014 SimplifiedValues[&Call] = C; 2015 return true; 2016 } 2017 2018 return false; 2019 } 2020 2021 bool CallAnalyzer::visitCallBase(CallBase &Call) { 2022 if (Call.hasFnAttr(Attribute::ReturnsTwice) && 2023 !F.hasFnAttribute(Attribute::ReturnsTwice)) { 2024 // This aborts the entire analysis. 2025 ExposesReturnsTwice = true; 2026 return false; 2027 } 2028 if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate()) 2029 ContainsNoDuplicateCall = true; 2030 2031 Value *Callee = Call.getCalledOperand(); 2032 Function *F = dyn_cast_or_null<Function>(Callee); 2033 bool IsIndirectCall = !F; 2034 if (IsIndirectCall) { 2035 // Check if this happens to be an indirect function call to a known function 2036 // in this inline context. If not, we've done all we can. 2037 F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 2038 if (!F) { 2039 onCallArgumentSetup(Call); 2040 2041 if (!Call.onlyReadsMemory()) 2042 disableLoadElimination(); 2043 return Base::visitCallBase(Call); 2044 } 2045 } 2046 2047 assert(F && "Expected a call to a known function"); 2048 2049 // When we have a concrete function, first try to simplify it directly. 2050 if (simplifyCallSite(F, Call)) 2051 return true; 2052 2053 // Next check if it is an intrinsic we know about. 2054 // FIXME: Lift this into part of the InstVisitor. 2055 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) { 2056 switch (II->getIntrinsicID()) { 2057 default: 2058 if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II)) 2059 disableLoadElimination(); 2060 return Base::visitCallBase(Call); 2061 2062 case Intrinsic::load_relative: 2063 onLoadRelativeIntrinsic(); 2064 return false; 2065 2066 case Intrinsic::memset: 2067 case Intrinsic::memcpy: 2068 case Intrinsic::memmove: 2069 disableLoadElimination(); 2070 // SROA can usually chew through these intrinsics, but they aren't free. 2071 return false; 2072 case Intrinsic::icall_branch_funnel: 2073 case Intrinsic::localescape: 2074 HasUninlineableIntrinsic = true; 2075 return false; 2076 case Intrinsic::vastart: 2077 InitsVargArgs = true; 2078 return false; 2079 case Intrinsic::launder_invariant_group: 2080 case Intrinsic::strip_invariant_group: 2081 if (auto *SROAArg = getSROAArgForValueOrNull(II->getOperand(0))) 2082 SROAArgValues[II] = SROAArg; 2083 return true; 2084 } 2085 } 2086 2087 if (F == Call.getFunction()) { 2088 // This flag will fully abort the analysis, so don't bother with anything 2089 // else. 2090 IsRecursiveCall = true; 2091 return false; 2092 } 2093 2094 if (TTI.isLoweredToCall(F)) { 2095 onLoweredCall(F, Call, IsIndirectCall); 2096 } 2097 2098 if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory()))) 2099 disableLoadElimination(); 2100 return Base::visitCallBase(Call); 2101 } 2102 2103 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) { 2104 // At least one return instruction will be free after inlining. 2105 bool Free = !HasReturn; 2106 HasReturn = true; 2107 return Free; 2108 } 2109 2110 bool CallAnalyzer::visitBranchInst(BranchInst &BI) { 2111 // We model unconditional branches as essentially free -- they really 2112 // shouldn't exist at all, but handling them makes the behavior of the 2113 // inliner more regular and predictable. Interestingly, conditional branches 2114 // which will fold away are also free. 2115 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) || 2116 dyn_cast_or_null<ConstantInt>( 2117 SimplifiedValues.lookup(BI.getCondition())); 2118 } 2119 2120 bool CallAnalyzer::visitSelectInst(SelectInst &SI) { 2121 bool CheckSROA = SI.getType()->isPointerTy(); 2122 Value *TrueVal = SI.getTrueValue(); 2123 Value *FalseVal = SI.getFalseValue(); 2124 2125 Constant *TrueC = dyn_cast<Constant>(TrueVal); 2126 if (!TrueC) 2127 TrueC = SimplifiedValues.lookup(TrueVal); 2128 Constant *FalseC = dyn_cast<Constant>(FalseVal); 2129 if (!FalseC) 2130 FalseC = SimplifiedValues.lookup(FalseVal); 2131 Constant *CondC = 2132 dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition())); 2133 2134 if (!CondC) { 2135 // Select C, X, X => X 2136 if (TrueC == FalseC && TrueC) { 2137 SimplifiedValues[&SI] = TrueC; 2138 return true; 2139 } 2140 2141 if (!CheckSROA) 2142 return Base::visitSelectInst(SI); 2143 2144 std::pair<Value *, APInt> TrueBaseAndOffset = 2145 ConstantOffsetPtrs.lookup(TrueVal); 2146 std::pair<Value *, APInt> FalseBaseAndOffset = 2147 ConstantOffsetPtrs.lookup(FalseVal); 2148 if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) { 2149 ConstantOffsetPtrs[&SI] = TrueBaseAndOffset; 2150 2151 if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal)) 2152 SROAArgValues[&SI] = SROAArg; 2153 return true; 2154 } 2155 2156 return Base::visitSelectInst(SI); 2157 } 2158 2159 // Select condition is a constant. 2160 Value *SelectedV = CondC->isAllOnesValue() ? TrueVal 2161 : (CondC->isNullValue()) ? FalseVal 2162 : nullptr; 2163 if (!SelectedV) { 2164 // Condition is a vector constant that is not all 1s or all 0s. If all 2165 // operands are constants, ConstantExpr::getSelect() can handle the cases 2166 // such as select vectors. 2167 if (TrueC && FalseC) { 2168 if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) { 2169 SimplifiedValues[&SI] = C; 2170 return true; 2171 } 2172 } 2173 return Base::visitSelectInst(SI); 2174 } 2175 2176 // Condition is either all 1s or all 0s. SI can be simplified. 2177 if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) { 2178 SimplifiedValues[&SI] = SelectedC; 2179 return true; 2180 } 2181 2182 if (!CheckSROA) 2183 return true; 2184 2185 std::pair<Value *, APInt> BaseAndOffset = 2186 ConstantOffsetPtrs.lookup(SelectedV); 2187 if (BaseAndOffset.first) { 2188 ConstantOffsetPtrs[&SI] = BaseAndOffset; 2189 2190 if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV)) 2191 SROAArgValues[&SI] = SROAArg; 2192 } 2193 2194 return true; 2195 } 2196 2197 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) { 2198 // We model unconditional switches as free, see the comments on handling 2199 // branches. 2200 if (isa<ConstantInt>(SI.getCondition())) 2201 return true; 2202 if (Value *V = SimplifiedValues.lookup(SI.getCondition())) 2203 if (isa<ConstantInt>(V)) 2204 return true; 2205 2206 // Assume the most general case where the switch is lowered into 2207 // either a jump table, bit test, or a balanced binary tree consisting of 2208 // case clusters without merging adjacent clusters with the same 2209 // destination. We do not consider the switches that are lowered with a mix 2210 // of jump table/bit test/binary search tree. The cost of the switch is 2211 // proportional to the size of the tree or the size of jump table range. 2212 // 2213 // NB: We convert large switches which are just used to initialize large phi 2214 // nodes to lookup tables instead in simplifycfg, so this shouldn't prevent 2215 // inlining those. It will prevent inlining in cases where the optimization 2216 // does not (yet) fire. 2217 2218 unsigned JumpTableSize = 0; 2219 BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr; 2220 unsigned NumCaseCluster = 2221 TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI); 2222 2223 onFinalizeSwitch(JumpTableSize, NumCaseCluster); 2224 return false; 2225 } 2226 2227 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) { 2228 // We never want to inline functions that contain an indirectbr. This is 2229 // incorrect because all the blockaddress's (in static global initializers 2230 // for example) would be referring to the original function, and this 2231 // indirect jump would jump from the inlined copy of the function into the 2232 // original function which is extremely undefined behavior. 2233 // FIXME: This logic isn't really right; we can safely inline functions with 2234 // indirectbr's as long as no other function or global references the 2235 // blockaddress of a block within the current function. 2236 HasIndirectBr = true; 2237 return false; 2238 } 2239 2240 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) { 2241 // FIXME: It's not clear that a single instruction is an accurate model for 2242 // the inline cost of a resume instruction. 2243 return false; 2244 } 2245 2246 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) { 2247 // FIXME: It's not clear that a single instruction is an accurate model for 2248 // the inline cost of a cleanupret instruction. 2249 return false; 2250 } 2251 2252 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) { 2253 // FIXME: It's not clear that a single instruction is an accurate model for 2254 // the inline cost of a catchret instruction. 2255 return false; 2256 } 2257 2258 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) { 2259 // FIXME: It might be reasonably to discount the cost of instructions leading 2260 // to unreachable as they have the lowest possible impact on both runtime and 2261 // code size. 2262 return true; // No actual code is needed for unreachable. 2263 } 2264 2265 bool CallAnalyzer::visitInstruction(Instruction &I) { 2266 // Some instructions are free. All of the free intrinsics can also be 2267 // handled by SROA, etc. 2268 if (TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 2269 TargetTransformInfo::TCC_Free) 2270 return true; 2271 2272 // We found something we don't understand or can't handle. Mark any SROA-able 2273 // values in the operand list as no longer viable. 2274 for (const Use &Op : I.operands()) 2275 disableSROA(Op); 2276 2277 return false; 2278 } 2279 2280 /// Analyze a basic block for its contribution to the inline cost. 2281 /// 2282 /// This method walks the analyzer over every instruction in the given basic 2283 /// block and accounts for their cost during inlining at this callsite. It 2284 /// aborts early if the threshold has been exceeded or an impossible to inline 2285 /// construct has been detected. It returns false if inlining is no longer 2286 /// viable, and true if inlining remains viable. 2287 InlineResult 2288 CallAnalyzer::analyzeBlock(BasicBlock *BB, 2289 SmallPtrSetImpl<const Value *> &EphValues) { 2290 for (Instruction &I : *BB) { 2291 // FIXME: Currently, the number of instructions in a function regardless of 2292 // our ability to simplify them during inline to constants or dead code, 2293 // are actually used by the vector bonus heuristic. As long as that's true, 2294 // we have to special case debug intrinsics here to prevent differences in 2295 // inlining due to debug symbols. Eventually, the number of unsimplified 2296 // instructions shouldn't factor into the cost computation, but until then, 2297 // hack around it here. 2298 if (isa<DbgInfoIntrinsic>(I)) 2299 continue; 2300 2301 // Skip pseudo-probes. 2302 if (isa<PseudoProbeInst>(I)) 2303 continue; 2304 2305 // Skip ephemeral values. 2306 if (EphValues.count(&I)) 2307 continue; 2308 2309 ++NumInstructions; 2310 if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy()) 2311 ++NumVectorInstructions; 2312 2313 // If the instruction simplified to a constant, there is no cost to this 2314 // instruction. Visit the instructions using our InstVisitor to account for 2315 // all of the per-instruction logic. The visit tree returns true if we 2316 // consumed the instruction in any way, and false if the instruction's base 2317 // cost should count against inlining. 2318 onInstructionAnalysisStart(&I); 2319 2320 if (Base::visit(&I)) 2321 ++NumInstructionsSimplified; 2322 else 2323 onMissedSimplification(); 2324 2325 onInstructionAnalysisFinish(&I); 2326 using namespace ore; 2327 // If the visit this instruction detected an uninlinable pattern, abort. 2328 InlineResult IR = InlineResult::success(); 2329 if (IsRecursiveCall) 2330 IR = InlineResult::failure("recursive"); 2331 else if (ExposesReturnsTwice) 2332 IR = InlineResult::failure("exposes returns twice"); 2333 else if (HasDynamicAlloca) 2334 IR = InlineResult::failure("dynamic alloca"); 2335 else if (HasIndirectBr) 2336 IR = InlineResult::failure("indirect branch"); 2337 else if (HasUninlineableIntrinsic) 2338 IR = InlineResult::failure("uninlinable intrinsic"); 2339 else if (InitsVargArgs) 2340 IR = InlineResult::failure("varargs"); 2341 if (!IR.isSuccess()) { 2342 if (ORE) 2343 ORE->emit([&]() { 2344 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", 2345 &CandidateCall) 2346 << NV("Callee", &F) << " has uninlinable pattern (" 2347 << NV("InlineResult", IR.getFailureReason()) 2348 << ") and cost is not fully computed"; 2349 }); 2350 return IR; 2351 } 2352 2353 // If the caller is a recursive function then we don't want to inline 2354 // functions which allocate a lot of stack space because it would increase 2355 // the caller stack usage dramatically. 2356 if (IsCallerRecursive && 2357 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) { 2358 auto IR = 2359 InlineResult::failure("recursive and allocates too much stack space"); 2360 if (ORE) 2361 ORE->emit([&]() { 2362 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", 2363 &CandidateCall) 2364 << NV("Callee", &F) << " is " 2365 << NV("InlineResult", IR.getFailureReason()) 2366 << ". Cost is not fully computed"; 2367 }); 2368 return IR; 2369 } 2370 2371 if (shouldStop()) 2372 return InlineResult::failure( 2373 "Call site analysis is not favorable to inlining."); 2374 } 2375 2376 return InlineResult::success(); 2377 } 2378 2379 /// Compute the base pointer and cumulative constant offsets for V. 2380 /// 2381 /// This strips all constant offsets off of V, leaving it the base pointer, and 2382 /// accumulates the total constant offset applied in the returned constant. It 2383 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 2384 /// no constant offsets applied. 2385 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 2386 if (!V->getType()->isPointerTy()) 2387 return nullptr; 2388 2389 unsigned AS = V->getType()->getPointerAddressSpace(); 2390 unsigned IntPtrWidth = DL.getIndexSizeInBits(AS); 2391 APInt Offset = APInt::getNullValue(IntPtrWidth); 2392 2393 // Even though we don't look through PHI nodes, we could be called on an 2394 // instruction in an unreachable block, which may be on a cycle. 2395 SmallPtrSet<Value *, 4> Visited; 2396 Visited.insert(V); 2397 do { 2398 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2399 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 2400 return nullptr; 2401 V = GEP->getPointerOperand(); 2402 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 2403 V = cast<Operator>(V)->getOperand(0); 2404 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 2405 if (GA->isInterposable()) 2406 break; 2407 V = GA->getAliasee(); 2408 } else { 2409 break; 2410 } 2411 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 2412 } while (Visited.insert(V).second); 2413 2414 Type *IdxPtrTy = DL.getIndexType(V->getType()); 2415 return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset)); 2416 } 2417 2418 /// Find dead blocks due to deleted CFG edges during inlining. 2419 /// 2420 /// If we know the successor of the current block, \p CurrBB, has to be \p 2421 /// NextBB, the other successors of \p CurrBB are dead if these successors have 2422 /// no live incoming CFG edges. If one block is found to be dead, we can 2423 /// continue growing the dead block list by checking the successors of the dead 2424 /// blocks to see if all their incoming edges are dead or not. 2425 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) { 2426 auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) { 2427 // A CFG edge is dead if the predecessor is dead or the predecessor has a 2428 // known successor which is not the one under exam. 2429 return (DeadBlocks.count(Pred) || 2430 (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ)); 2431 }; 2432 2433 auto IsNewlyDead = [&](BasicBlock *BB) { 2434 // If all the edges to a block are dead, the block is also dead. 2435 return (!DeadBlocks.count(BB) && 2436 llvm::all_of(predecessors(BB), 2437 [&](BasicBlock *P) { return IsEdgeDead(P, BB); })); 2438 }; 2439 2440 for (BasicBlock *Succ : successors(CurrBB)) { 2441 if (Succ == NextBB || !IsNewlyDead(Succ)) 2442 continue; 2443 SmallVector<BasicBlock *, 4> NewDead; 2444 NewDead.push_back(Succ); 2445 while (!NewDead.empty()) { 2446 BasicBlock *Dead = NewDead.pop_back_val(); 2447 if (DeadBlocks.insert(Dead)) 2448 // Continue growing the dead block lists. 2449 for (BasicBlock *S : successors(Dead)) 2450 if (IsNewlyDead(S)) 2451 NewDead.push_back(S); 2452 } 2453 } 2454 } 2455 2456 /// Analyze a call site for potential inlining. 2457 /// 2458 /// Returns true if inlining this call is viable, and false if it is not 2459 /// viable. It computes the cost and adjusts the threshold based on numerous 2460 /// factors and heuristics. If this method returns false but the computed cost 2461 /// is below the computed threshold, then inlining was forcibly disabled by 2462 /// some artifact of the routine. 2463 InlineResult CallAnalyzer::analyze() { 2464 ++NumCallsAnalyzed; 2465 2466 auto Result = onAnalysisStart(); 2467 if (!Result.isSuccess()) 2468 return Result; 2469 2470 if (F.empty()) 2471 return InlineResult::success(); 2472 2473 Function *Caller = CandidateCall.getFunction(); 2474 // Check if the caller function is recursive itself. 2475 for (User *U : Caller->users()) { 2476 CallBase *Call = dyn_cast<CallBase>(U); 2477 if (Call && Call->getFunction() == Caller) { 2478 IsCallerRecursive = true; 2479 break; 2480 } 2481 } 2482 2483 // Populate our simplified values by mapping from function arguments to call 2484 // arguments with known important simplifications. 2485 auto CAI = CandidateCall.arg_begin(); 2486 for (Argument &FAI : F.args()) { 2487 assert(CAI != CandidateCall.arg_end()); 2488 if (Constant *C = dyn_cast<Constant>(CAI)) 2489 SimplifiedValues[&FAI] = C; 2490 2491 Value *PtrArg = *CAI; 2492 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 2493 ConstantOffsetPtrs[&FAI] = std::make_pair(PtrArg, C->getValue()); 2494 2495 // We can SROA any pointer arguments derived from alloca instructions. 2496 if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) { 2497 SROAArgValues[&FAI] = SROAArg; 2498 onInitializeSROAArg(SROAArg); 2499 EnabledSROAAllocas.insert(SROAArg); 2500 } 2501 } 2502 ++CAI; 2503 } 2504 NumConstantArgs = SimplifiedValues.size(); 2505 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 2506 NumAllocaArgs = SROAArgValues.size(); 2507 2508 // FIXME: If a caller has multiple calls to a callee, we end up recomputing 2509 // the ephemeral values multiple times (and they're completely determined by 2510 // the callee, so this is purely duplicate work). 2511 SmallPtrSet<const Value *, 32> EphValues; 2512 CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues); 2513 2514 // The worklist of live basic blocks in the callee *after* inlining. We avoid 2515 // adding basic blocks of the callee which can be proven to be dead for this 2516 // particular call site in order to get more accurate cost estimates. This 2517 // requires a somewhat heavyweight iteration pattern: we need to walk the 2518 // basic blocks in a breadth-first order as we insert live successors. To 2519 // accomplish this, prioritizing for small iterations because we exit after 2520 // crossing our threshold, we use a small-size optimized SetVector. 2521 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 2522 SmallPtrSet<BasicBlock *, 16>> 2523 BBSetVector; 2524 BBSetVector BBWorklist; 2525 BBWorklist.insert(&F.getEntryBlock()); 2526 2527 // Note that we *must not* cache the size, this loop grows the worklist. 2528 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 2529 if (shouldStop()) 2530 break; 2531 2532 BasicBlock *BB = BBWorklist[Idx]; 2533 if (BB->empty()) 2534 continue; 2535 2536 onBlockStart(BB); 2537 2538 // Disallow inlining a blockaddress with uses other than strictly callbr. 2539 // A blockaddress only has defined behavior for an indirect branch in the 2540 // same function, and we do not currently support inlining indirect 2541 // branches. But, the inliner may not see an indirect branch that ends up 2542 // being dead code at a particular call site. If the blockaddress escapes 2543 // the function, e.g., via a global variable, inlining may lead to an 2544 // invalid cross-function reference. 2545 // FIXME: pr/39560: continue relaxing this overt restriction. 2546 if (BB->hasAddressTaken()) 2547 for (User *U : BlockAddress::get(&*BB)->users()) 2548 if (!isa<CallBrInst>(*U)) 2549 return InlineResult::failure("blockaddress used outside of callbr"); 2550 2551 // Analyze the cost of this block. If we blow through the threshold, this 2552 // returns false, and we can bail on out. 2553 InlineResult IR = analyzeBlock(BB, EphValues); 2554 if (!IR.isSuccess()) 2555 return IR; 2556 2557 Instruction *TI = BB->getTerminator(); 2558 2559 // Add in the live successors by first checking whether we have terminator 2560 // that may be simplified based on the values simplified by this call. 2561 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 2562 if (BI->isConditional()) { 2563 Value *Cond = BI->getCondition(); 2564 if (ConstantInt *SimpleCond = 2565 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 2566 BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0); 2567 BBWorklist.insert(NextBB); 2568 KnownSuccessors[BB] = NextBB; 2569 findDeadBlocks(BB, NextBB); 2570 continue; 2571 } 2572 } 2573 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 2574 Value *Cond = SI->getCondition(); 2575 if (ConstantInt *SimpleCond = 2576 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 2577 BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor(); 2578 BBWorklist.insert(NextBB); 2579 KnownSuccessors[BB] = NextBB; 2580 findDeadBlocks(BB, NextBB); 2581 continue; 2582 } 2583 } 2584 2585 // If we're unable to select a particular successor, just count all of 2586 // them. 2587 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 2588 ++TIdx) 2589 BBWorklist.insert(TI->getSuccessor(TIdx)); 2590 2591 onBlockAnalyzed(BB); 2592 } 2593 2594 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && 2595 &F == CandidateCall.getCalledFunction(); 2596 // If this is a noduplicate call, we can still inline as long as 2597 // inlining this would cause the removal of the caller (so the instruction 2598 // is not actually duplicated, just moved). 2599 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 2600 return InlineResult::failure("noduplicate"); 2601 2602 return finalizeAnalysis(); 2603 } 2604 2605 void InlineCostCallAnalyzer::print() { 2606 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" 2607 if (PrintInstructionComments) 2608 F.print(dbgs(), &Writer); 2609 DEBUG_PRINT_STAT(NumConstantArgs); 2610 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 2611 DEBUG_PRINT_STAT(NumAllocaArgs); 2612 DEBUG_PRINT_STAT(NumConstantPtrCmps); 2613 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 2614 DEBUG_PRINT_STAT(NumInstructionsSimplified); 2615 DEBUG_PRINT_STAT(NumInstructions); 2616 DEBUG_PRINT_STAT(SROACostSavings); 2617 DEBUG_PRINT_STAT(SROACostSavingsLost); 2618 DEBUG_PRINT_STAT(LoadEliminationCost); 2619 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 2620 DEBUG_PRINT_STAT(Cost); 2621 DEBUG_PRINT_STAT(Threshold); 2622 #undef DEBUG_PRINT_STAT 2623 } 2624 2625 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2626 /// Dump stats about this call's analysis. 2627 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() { print(); } 2628 #endif 2629 2630 /// Test that there are no attribute conflicts between Caller and Callee 2631 /// that prevent inlining. 2632 static bool functionsHaveCompatibleAttributes( 2633 Function *Caller, Function *Callee, TargetTransformInfo &TTI, 2634 function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) { 2635 // Note that CalleeTLI must be a copy not a reference. The legacy pass manager 2636 // caches the most recently created TLI in the TargetLibraryInfoWrapperPass 2637 // object, and always returns the same object (which is overwritten on each 2638 // GetTLI call). Therefore we copy the first result. 2639 auto CalleeTLI = GetTLI(*Callee); 2640 return TTI.areInlineCompatible(Caller, Callee) && 2641 GetTLI(*Caller).areInlineCompatible(CalleeTLI, 2642 InlineCallerSupersetNoBuiltin) && 2643 AttributeFuncs::areInlineCompatible(*Caller, *Callee); 2644 } 2645 2646 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) { 2647 int Cost = 0; 2648 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) { 2649 if (Call.isByValArgument(I)) { 2650 // We approximate the number of loads and stores needed by dividing the 2651 // size of the byval type by the target's pointer size. 2652 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType()); 2653 unsigned TypeSize = DL.getTypeSizeInBits(Call.getParamByValType(I)); 2654 unsigned AS = PTy->getAddressSpace(); 2655 unsigned PointerSize = DL.getPointerSizeInBits(AS); 2656 // Ceiling division. 2657 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 2658 2659 // If it generates more than 8 stores it is likely to be expanded as an 2660 // inline memcpy so we take that as an upper bound. Otherwise we assume 2661 // one load and one store per word copied. 2662 // FIXME: The maxStoresPerMemcpy setting from the target should be used 2663 // here instead of a magic number of 8, but it's not available via 2664 // DataLayout. 2665 NumStores = std::min(NumStores, 8U); 2666 2667 Cost += 2 * NumStores * InlineConstants::InstrCost; 2668 } else { 2669 // For non-byval arguments subtract off one instruction per call 2670 // argument. 2671 Cost += InlineConstants::InstrCost; 2672 } 2673 } 2674 // The call instruction also disappears after inlining. 2675 Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty; 2676 return Cost; 2677 } 2678 2679 InlineCost llvm::getInlineCost( 2680 CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI, 2681 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2682 function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 2683 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2684 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2685 return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI, 2686 GetAssumptionCache, GetTLI, GetBFI, PSI, ORE); 2687 } 2688 2689 Optional<int> llvm::getInliningCostEstimate( 2690 CallBase &Call, TargetTransformInfo &CalleeTTI, 2691 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2692 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2693 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2694 const InlineParams Params = {/* DefaultThreshold*/ 0, 2695 /*HintThreshold*/ {}, 2696 /*ColdThreshold*/ {}, 2697 /*OptSizeThreshold*/ {}, 2698 /*OptMinSizeThreshold*/ {}, 2699 /*HotCallSiteThreshold*/ {}, 2700 /*LocallyHotCallSiteThreshold*/ {}, 2701 /*ColdCallSiteThreshold*/ {}, 2702 /*ComputeFullInlineCost*/ true, 2703 /*EnableDeferral*/ true}; 2704 2705 InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI, 2706 GetAssumptionCache, GetBFI, PSI, ORE, true, 2707 /*IgnoreThreshold*/ true); 2708 auto R = CA.analyze(); 2709 if (!R.isSuccess()) 2710 return None; 2711 return CA.getCost(); 2712 } 2713 2714 Optional<InlineCostFeatures> llvm::getInliningCostFeatures( 2715 CallBase &Call, TargetTransformInfo &CalleeTTI, 2716 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2717 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2718 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2719 InlineCostFeaturesAnalyzer CFA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, 2720 ORE, *Call.getCalledFunction(), Call); 2721 auto R = CFA.analyze(); 2722 if (!R.isSuccess()) 2723 return None; 2724 return CFA.features(); 2725 } 2726 2727 Optional<InlineResult> llvm::getAttributeBasedInliningDecision( 2728 CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI, 2729 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 2730 2731 // Cannot inline indirect calls. 2732 if (!Callee) 2733 return InlineResult::failure("indirect call"); 2734 2735 // When callee coroutine function is inlined into caller coroutine function 2736 // before coro-split pass, 2737 // coro-early pass can not handle this quiet well. 2738 // So we won't inline the coroutine function if it have not been unsplited 2739 if (Callee->isPresplitCoroutine()) 2740 return InlineResult::failure("unsplited coroutine call"); 2741 2742 // Never inline calls with byval arguments that does not have the alloca 2743 // address space. Since byval arguments can be replaced with a copy to an 2744 // alloca, the inlined code would need to be adjusted to handle that the 2745 // argument is in the alloca address space (so it is a little bit complicated 2746 // to solve). 2747 unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace(); 2748 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) 2749 if (Call.isByValArgument(I)) { 2750 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType()); 2751 if (PTy->getAddressSpace() != AllocaAS) 2752 return InlineResult::failure("byval arguments without alloca" 2753 " address space"); 2754 } 2755 2756 // Calls to functions with always-inline attributes should be inlined 2757 // whenever possible. 2758 if (Call.hasFnAttr(Attribute::AlwaysInline)) { 2759 auto IsViable = isInlineViable(*Callee); 2760 if (IsViable.isSuccess()) 2761 return InlineResult::success(); 2762 return InlineResult::failure(IsViable.getFailureReason()); 2763 } 2764 2765 // Never inline functions with conflicting attributes (unless callee has 2766 // always-inline attribute). 2767 Function *Caller = Call.getCaller(); 2768 if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI)) 2769 return InlineResult::failure("conflicting attributes"); 2770 2771 // Don't inline this call if the caller has the optnone attribute. 2772 if (Caller->hasOptNone()) 2773 return InlineResult::failure("optnone attribute"); 2774 2775 // Don't inline a function that treats null pointer as valid into a caller 2776 // that does not have this attribute. 2777 if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined()) 2778 return InlineResult::failure("nullptr definitions incompatible"); 2779 2780 // Don't inline functions which can be interposed at link-time. 2781 if (Callee->isInterposable()) 2782 return InlineResult::failure("interposable"); 2783 2784 // Don't inline functions marked noinline. 2785 if (Callee->hasFnAttribute(Attribute::NoInline)) 2786 return InlineResult::failure("noinline function attribute"); 2787 2788 // Don't inline call sites marked noinline. 2789 if (Call.isNoInline()) 2790 return InlineResult::failure("noinline call site attribute"); 2791 2792 // Don't inline functions if one does not have any stack protector attribute 2793 // but the other does. 2794 if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr()) 2795 return InlineResult::failure( 2796 "stack protected caller but callee requested no stack protector"); 2797 if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr()) 2798 return InlineResult::failure( 2799 "stack protected callee but caller requested no stack protector"); 2800 2801 return None; 2802 } 2803 2804 InlineCost llvm::getInlineCost( 2805 CallBase &Call, Function *Callee, const InlineParams &Params, 2806 TargetTransformInfo &CalleeTTI, 2807 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2808 function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 2809 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2810 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2811 2812 auto UserDecision = 2813 llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI); 2814 2815 if (UserDecision.hasValue()) { 2816 if (UserDecision->isSuccess()) 2817 return llvm::InlineCost::getAlways("always inline attribute"); 2818 return llvm::InlineCost::getNever(UserDecision->getFailureReason()); 2819 } 2820 2821 LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 2822 << "... (caller:" << Call.getCaller()->getName() 2823 << ")\n"); 2824 2825 InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI, 2826 GetAssumptionCache, GetBFI, PSI, ORE); 2827 InlineResult ShouldInline = CA.analyze(); 2828 2829 LLVM_DEBUG(CA.dump()); 2830 2831 // Always make cost benefit based decision explicit. 2832 // We use always/never here since threshold is not meaningful, 2833 // as it's not what drives cost-benefit analysis. 2834 if (CA.wasDecidedByCostBenefit()) { 2835 if (ShouldInline.isSuccess()) 2836 return InlineCost::getAlways("benefit over cost"); 2837 else 2838 return InlineCost::getNever("cost over benefit"); 2839 } 2840 2841 // Check if there was a reason to force inlining or no inlining. 2842 if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold()) 2843 return InlineCost::getNever(ShouldInline.getFailureReason()); 2844 if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold()) 2845 return InlineCost::getAlways("empty function"); 2846 2847 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 2848 } 2849 2850 InlineResult llvm::isInlineViable(Function &F) { 2851 bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice); 2852 for (BasicBlock &BB : F) { 2853 // Disallow inlining of functions which contain indirect branches. 2854 if (isa<IndirectBrInst>(BB.getTerminator())) 2855 return InlineResult::failure("contains indirect branches"); 2856 2857 // Disallow inlining of blockaddresses which are used by non-callbr 2858 // instructions. 2859 if (BB.hasAddressTaken()) 2860 for (User *U : BlockAddress::get(&BB)->users()) 2861 if (!isa<CallBrInst>(*U)) 2862 return InlineResult::failure("blockaddress used outside of callbr"); 2863 2864 for (auto &II : BB) { 2865 CallBase *Call = dyn_cast<CallBase>(&II); 2866 if (!Call) 2867 continue; 2868 2869 // Disallow recursive calls. 2870 Function *Callee = Call->getCalledFunction(); 2871 if (&F == Callee) 2872 return InlineResult::failure("recursive call"); 2873 2874 // Disallow calls which expose returns-twice to a function not previously 2875 // attributed as such. 2876 if (!ReturnsTwice && isa<CallInst>(Call) && 2877 cast<CallInst>(Call)->canReturnTwice()) 2878 return InlineResult::failure("exposes returns-twice attribute"); 2879 2880 if (Callee) 2881 switch (Callee->getIntrinsicID()) { 2882 default: 2883 break; 2884 case llvm::Intrinsic::icall_branch_funnel: 2885 // Disallow inlining of @llvm.icall.branch.funnel because current 2886 // backend can't separate call targets from call arguments. 2887 return InlineResult::failure( 2888 "disallowed inlining of @llvm.icall.branch.funnel"); 2889 case llvm::Intrinsic::localescape: 2890 // Disallow inlining functions that call @llvm.localescape. Doing this 2891 // correctly would require major changes to the inliner. 2892 return InlineResult::failure( 2893 "disallowed inlining of @llvm.localescape"); 2894 case llvm::Intrinsic::vastart: 2895 // Disallow inlining of functions that initialize VarArgs with 2896 // va_start. 2897 return InlineResult::failure( 2898 "contains VarArgs initialized with va_start"); 2899 } 2900 } 2901 } 2902 2903 return InlineResult::success(); 2904 } 2905 2906 // APIs to create InlineParams based on command line flags and/or other 2907 // parameters. 2908 2909 InlineParams llvm::getInlineParams(int Threshold) { 2910 InlineParams Params; 2911 2912 // This field is the threshold to use for a callee by default. This is 2913 // derived from one or more of: 2914 // * optimization or size-optimization levels, 2915 // * a value passed to createFunctionInliningPass function, or 2916 // * the -inline-threshold flag. 2917 // If the -inline-threshold flag is explicitly specified, that is used 2918 // irrespective of anything else. 2919 if (InlineThreshold.getNumOccurrences() > 0) 2920 Params.DefaultThreshold = InlineThreshold; 2921 else 2922 Params.DefaultThreshold = Threshold; 2923 2924 // Set the HintThreshold knob from the -inlinehint-threshold. 2925 Params.HintThreshold = HintThreshold; 2926 2927 // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold. 2928 Params.HotCallSiteThreshold = HotCallSiteThreshold; 2929 2930 // If the -locally-hot-callsite-threshold is explicitly specified, use it to 2931 // populate LocallyHotCallSiteThreshold. Later, we populate 2932 // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if 2933 // we know that optimization level is O3 (in the getInlineParams variant that 2934 // takes the opt and size levels). 2935 // FIXME: Remove this check (and make the assignment unconditional) after 2936 // addressing size regression issues at O2. 2937 if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0) 2938 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold; 2939 2940 // Set the ColdCallSiteThreshold knob from the 2941 // -inline-cold-callsite-threshold. 2942 Params.ColdCallSiteThreshold = ColdCallSiteThreshold; 2943 2944 // Set the OptMinSizeThreshold and OptSizeThreshold params only if the 2945 // -inlinehint-threshold commandline option is not explicitly given. If that 2946 // option is present, then its value applies even for callees with size and 2947 // minsize attributes. 2948 // If the -inline-threshold is not specified, set the ColdThreshold from the 2949 // -inlinecold-threshold even if it is not explicitly passed. If 2950 // -inline-threshold is specified, then -inlinecold-threshold needs to be 2951 // explicitly specified to set the ColdThreshold knob 2952 if (InlineThreshold.getNumOccurrences() == 0) { 2953 Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold; 2954 Params.OptSizeThreshold = InlineConstants::OptSizeThreshold; 2955 Params.ColdThreshold = ColdThreshold; 2956 } else if (ColdThreshold.getNumOccurrences() > 0) { 2957 Params.ColdThreshold = ColdThreshold; 2958 } 2959 return Params; 2960 } 2961 2962 InlineParams llvm::getInlineParams() { 2963 return getInlineParams(DefaultThreshold); 2964 } 2965 2966 // Compute the default threshold for inlining based on the opt level and the 2967 // size opt level. 2968 static int computeThresholdFromOptLevels(unsigned OptLevel, 2969 unsigned SizeOptLevel) { 2970 if (OptLevel > 2) 2971 return InlineConstants::OptAggressiveThreshold; 2972 if (SizeOptLevel == 1) // -Os 2973 return InlineConstants::OptSizeThreshold; 2974 if (SizeOptLevel == 2) // -Oz 2975 return InlineConstants::OptMinSizeThreshold; 2976 return DefaultThreshold; 2977 } 2978 2979 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) { 2980 auto Params = 2981 getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel)); 2982 // At O3, use the value of -locally-hot-callsite-threshold option to populate 2983 // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only 2984 // when it is specified explicitly. 2985 if (OptLevel > 2) 2986 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold; 2987 return Params; 2988 } 2989 2990 PreservedAnalyses 2991 InlineCostAnnotationPrinterPass::run(Function &F, 2992 FunctionAnalysisManager &FAM) { 2993 PrintInstructionComments = true; 2994 std::function<AssumptionCache &(Function &)> GetAssumptionCache = 2995 [&](Function &F) -> AssumptionCache & { 2996 return FAM.getResult<AssumptionAnalysis>(F); 2997 }; 2998 Module *M = F.getParent(); 2999 ProfileSummaryInfo PSI(*M); 3000 DataLayout DL(M); 3001 TargetTransformInfo TTI(DL); 3002 // FIXME: Redesign the usage of InlineParams to expand the scope of this pass. 3003 // In the current implementation, the type of InlineParams doesn't matter as 3004 // the pass serves only for verification of inliner's decisions. 3005 // We can add a flag which determines InlineParams for this run. Right now, 3006 // the default InlineParams are used. 3007 const InlineParams Params = llvm::getInlineParams(); 3008 for (BasicBlock &BB : F) { 3009 for (Instruction &I : BB) { 3010 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 3011 Function *CalledFunction = CI->getCalledFunction(); 3012 if (!CalledFunction || CalledFunction->isDeclaration()) 3013 continue; 3014 OptimizationRemarkEmitter ORE(CalledFunction); 3015 InlineCostCallAnalyzer ICCA(*CalledFunction, *CI, Params, TTI, 3016 GetAssumptionCache, nullptr, &PSI, &ORE); 3017 ICCA.analyze(); 3018 OS << " Analyzing call of " << CalledFunction->getName() 3019 << "... (caller:" << CI->getCaller()->getName() << ")\n"; 3020 ICCA.print(); 3021 } 3022 } 3023 } 3024 return PreservedAnalyses::all(); 3025 } 3026