1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements inline cost analysis. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/InlineCost.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/BlockFrequencyInfo.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/CodeMetrics.h" 23 #include "llvm/Analysis/ConstantFolding.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/ProfileSummaryInfo.h" 27 #include "llvm/Analysis/TargetLibraryInfo.h" 28 #include "llvm/Analysis/TargetTransformInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Config/llvm-config.h" 31 #include "llvm/IR/AssemblyAnnotationWriter.h" 32 #include "llvm/IR/CallingConv.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/GetElementPtrTypeIterator.h" 36 #include "llvm/IR/GlobalAlias.h" 37 #include "llvm/IR/InstVisitor.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Operator.h" 40 #include "llvm/IR/PatternMatch.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/FormattedStream.h" 44 #include "llvm/Support/raw_ostream.h" 45 46 using namespace llvm; 47 48 #define DEBUG_TYPE "inline-cost" 49 50 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 51 52 static cl::opt<int> 53 DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225), 54 cl::ZeroOrMore, 55 cl::desc("Default amount of inlining to perform")); 56 57 static cl::opt<bool> PrintInstructionComments( 58 "print-instruction-comments", cl::Hidden, cl::init(false), 59 cl::desc("Prints comments for instruction based on inline cost analysis")); 60 61 static cl::opt<int> InlineThreshold( 62 "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore, 63 cl::desc("Control the amount of inlining to perform (default = 225)")); 64 65 static cl::opt<int> HintThreshold( 66 "inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore, 67 cl::desc("Threshold for inlining functions with inline hint")); 68 69 static cl::opt<int> 70 ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden, 71 cl::init(45), cl::ZeroOrMore, 72 cl::desc("Threshold for inlining cold callsites")); 73 74 static cl::opt<bool> InlineEnableCostBenefitAnalysis( 75 "inline-enable-cost-benefit-analysis", cl::Hidden, cl::init(false), 76 cl::desc("Enable the cost-benefit analysis for the inliner")); 77 78 static cl::opt<int> InlineSavingsMultiplier( 79 "inline-savings-multiplier", cl::Hidden, cl::init(8), cl::ZeroOrMore, 80 cl::desc("Multiplier to multiply cycle savings by during inlining")); 81 82 static cl::opt<int> 83 InlineSizeAllowance("inline-size-allowance", cl::Hidden, cl::init(100), 84 cl::ZeroOrMore, 85 cl::desc("The maximum size of a callee that get's " 86 "inlined without sufficient cycle savings")); 87 88 // We introduce this threshold to help performance of instrumentation based 89 // PGO before we actually hook up inliner with analysis passes such as BPI and 90 // BFI. 91 static cl::opt<int> ColdThreshold( 92 "inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore, 93 cl::desc("Threshold for inlining functions with cold attribute")); 94 95 static cl::opt<int> 96 HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000), 97 cl::ZeroOrMore, 98 cl::desc("Threshold for hot callsites ")); 99 100 static cl::opt<int> LocallyHotCallSiteThreshold( 101 "locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore, 102 cl::desc("Threshold for locally hot callsites ")); 103 104 static cl::opt<int> ColdCallSiteRelFreq( 105 "cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore, 106 cl::desc("Maximum block frequency, expressed as a percentage of caller's " 107 "entry frequency, for a callsite to be cold in the absence of " 108 "profile information.")); 109 110 static cl::opt<int> HotCallSiteRelFreq( 111 "hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore, 112 cl::desc("Minimum block frequency, expressed as a multiple of caller's " 113 "entry frequency, for a callsite to be hot in the absence of " 114 "profile information.")); 115 116 static cl::opt<bool> OptComputeFullInlineCost( 117 "inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore, 118 cl::desc("Compute the full inline cost of a call site even when the cost " 119 "exceeds the threshold.")); 120 121 static cl::opt<bool> InlineCallerSupersetNoBuiltin( 122 "inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true), 123 cl::ZeroOrMore, 124 cl::desc("Allow inlining when caller has a superset of callee's nobuiltin " 125 "attributes.")); 126 127 static cl::opt<bool> DisableGEPConstOperand( 128 "disable-gep-const-evaluation", cl::Hidden, cl::init(false), 129 cl::desc("Disables evaluation of GetElementPtr with constant operands")); 130 131 namespace { 132 class InlineCostCallAnalyzer; 133 134 // This struct is used to store information about inline cost of a 135 // particular instruction 136 struct InstructionCostDetail { 137 int CostBefore = 0; 138 int CostAfter = 0; 139 int ThresholdBefore = 0; 140 int ThresholdAfter = 0; 141 142 int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; } 143 144 int getCostDelta() const { return CostAfter - CostBefore; } 145 146 bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; } 147 }; 148 149 class InlineCostAnnotationWriter : public AssemblyAnnotationWriter { 150 private: 151 InlineCostCallAnalyzer *const ICCA; 152 153 public: 154 InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {} 155 virtual void emitInstructionAnnot(const Instruction *I, 156 formatted_raw_ostream &OS) override; 157 }; 158 159 /// Carry out call site analysis, in order to evaluate inlinability. 160 /// NOTE: the type is currently used as implementation detail of functions such 161 /// as llvm::getInlineCost. Note the function_ref constructor parameters - the 162 /// expectation is that they come from the outer scope, from the wrapper 163 /// functions. If we want to support constructing CallAnalyzer objects where 164 /// lambdas are provided inline at construction, or where the object needs to 165 /// otherwise survive past the scope of the provided functions, we need to 166 /// revisit the argument types. 167 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 168 typedef InstVisitor<CallAnalyzer, bool> Base; 169 friend class InstVisitor<CallAnalyzer, bool>; 170 171 protected: 172 virtual ~CallAnalyzer() {} 173 /// The TargetTransformInfo available for this compilation. 174 const TargetTransformInfo &TTI; 175 176 /// Getter for the cache of @llvm.assume intrinsics. 177 function_ref<AssumptionCache &(Function &)> GetAssumptionCache; 178 179 /// Getter for BlockFrequencyInfo 180 function_ref<BlockFrequencyInfo &(Function &)> GetBFI; 181 182 /// Profile summary information. 183 ProfileSummaryInfo *PSI; 184 185 /// The called function. 186 Function &F; 187 188 // Cache the DataLayout since we use it a lot. 189 const DataLayout &DL; 190 191 /// The OptimizationRemarkEmitter available for this compilation. 192 OptimizationRemarkEmitter *ORE; 193 194 /// The candidate callsite being analyzed. Please do not use this to do 195 /// analysis in the caller function; we want the inline cost query to be 196 /// easily cacheable. Instead, use the cover function paramHasAttr. 197 CallBase &CandidateCall; 198 199 /// Extension points for handling callsite features. 200 // Called before a basic block was analyzed. 201 virtual void onBlockStart(const BasicBlock *BB) {} 202 203 /// Called after a basic block was analyzed. 204 virtual void onBlockAnalyzed(const BasicBlock *BB) {} 205 206 /// Called before an instruction was analyzed 207 virtual void onInstructionAnalysisStart(const Instruction *I) {} 208 209 /// Called after an instruction was analyzed 210 virtual void onInstructionAnalysisFinish(const Instruction *I) {} 211 212 /// Called at the end of the analysis of the callsite. Return the outcome of 213 /// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or 214 /// the reason it can't. 215 virtual InlineResult finalizeAnalysis() { return InlineResult::success(); } 216 /// Called when we're about to start processing a basic block, and every time 217 /// we are done processing an instruction. Return true if there is no point in 218 /// continuing the analysis (e.g. we've determined already the call site is 219 /// too expensive to inline) 220 virtual bool shouldStop() { return false; } 221 222 /// Called before the analysis of the callee body starts (with callsite 223 /// contexts propagated). It checks callsite-specific information. Return a 224 /// reason analysis can't continue if that's the case, or 'true' if it may 225 /// continue. 226 virtual InlineResult onAnalysisStart() { return InlineResult::success(); } 227 /// Called if the analysis engine decides SROA cannot be done for the given 228 /// alloca. 229 virtual void onDisableSROA(AllocaInst *Arg) {} 230 231 /// Called the analysis engine determines load elimination won't happen. 232 virtual void onDisableLoadElimination() {} 233 234 /// Called to account for a call. 235 virtual void onCallPenalty() {} 236 237 /// Called to account for the expectation the inlining would result in a load 238 /// elimination. 239 virtual void onLoadEliminationOpportunity() {} 240 241 /// Called to account for the cost of argument setup for the Call in the 242 /// callee's body (not the callsite currently under analysis). 243 virtual void onCallArgumentSetup(const CallBase &Call) {} 244 245 /// Called to account for a load relative intrinsic. 246 virtual void onLoadRelativeIntrinsic() {} 247 248 /// Called to account for a lowered call. 249 virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) { 250 } 251 252 /// Account for a jump table of given size. Return false to stop further 253 /// processing the switch instruction 254 virtual bool onJumpTable(unsigned JumpTableSize) { return true; } 255 256 /// Account for a case cluster of given size. Return false to stop further 257 /// processing of the instruction. 258 virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; } 259 260 /// Called at the end of processing a switch instruction, with the given 261 /// number of case clusters. 262 virtual void onFinalizeSwitch(unsigned JumpTableSize, 263 unsigned NumCaseCluster) {} 264 265 /// Called to account for any other instruction not specifically accounted 266 /// for. 267 virtual void onMissedSimplification() {} 268 269 /// Start accounting potential benefits due to SROA for the given alloca. 270 virtual void onInitializeSROAArg(AllocaInst *Arg) {} 271 272 /// Account SROA savings for the AllocaInst value. 273 virtual void onAggregateSROAUse(AllocaInst *V) {} 274 275 bool handleSROA(Value *V, bool DoNotDisable) { 276 // Check for SROA candidates in comparisons. 277 if (auto *SROAArg = getSROAArgForValueOrNull(V)) { 278 if (DoNotDisable) { 279 onAggregateSROAUse(SROAArg); 280 return true; 281 } 282 disableSROAForArg(SROAArg); 283 } 284 return false; 285 } 286 287 bool IsCallerRecursive = false; 288 bool IsRecursiveCall = false; 289 bool ExposesReturnsTwice = false; 290 bool HasDynamicAlloca = false; 291 bool ContainsNoDuplicateCall = false; 292 bool HasReturn = false; 293 bool HasIndirectBr = false; 294 bool HasUninlineableIntrinsic = false; 295 bool InitsVargArgs = false; 296 297 /// Number of bytes allocated statically by the callee. 298 uint64_t AllocatedSize = 0; 299 unsigned NumInstructions = 0; 300 unsigned NumVectorInstructions = 0; 301 302 /// While we walk the potentially-inlined instructions, we build up and 303 /// maintain a mapping of simplified values specific to this callsite. The 304 /// idea is to propagate any special information we have about arguments to 305 /// this call through the inlinable section of the function, and account for 306 /// likely simplifications post-inlining. The most important aspect we track 307 /// is CFG altering simplifications -- when we prove a basic block dead, that 308 /// can cause dramatic shifts in the cost of inlining a function. 309 DenseMap<Value *, Constant *> SimplifiedValues; 310 311 /// Keep track of the values which map back (through function arguments) to 312 /// allocas on the caller stack which could be simplified through SROA. 313 DenseMap<Value *, AllocaInst *> SROAArgValues; 314 315 /// Keep track of Allocas for which we believe we may get SROA optimization. 316 DenseSet<AllocaInst *> EnabledSROAAllocas; 317 318 /// Keep track of values which map to a pointer base and constant offset. 319 DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs; 320 321 /// Keep track of dead blocks due to the constant arguments. 322 SetVector<BasicBlock *> DeadBlocks; 323 324 /// The mapping of the blocks to their known unique successors due to the 325 /// constant arguments. 326 DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors; 327 328 /// Model the elimination of repeated loads that is expected to happen 329 /// whenever we simplify away the stores that would otherwise cause them to be 330 /// loads. 331 bool EnableLoadElimination; 332 SmallPtrSet<Value *, 16> LoadAddrSet; 333 334 AllocaInst *getSROAArgForValueOrNull(Value *V) const { 335 auto It = SROAArgValues.find(V); 336 if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0) 337 return nullptr; 338 return It->second; 339 } 340 341 // Custom simplification helper routines. 342 bool isAllocaDerivedArg(Value *V); 343 void disableSROAForArg(AllocaInst *SROAArg); 344 void disableSROA(Value *V); 345 void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB); 346 void disableLoadElimination(); 347 bool isGEPFree(GetElementPtrInst &GEP); 348 bool canFoldInboundsGEP(GetElementPtrInst &I); 349 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 350 bool simplifyCallSite(Function *F, CallBase &Call); 351 template <typename Callable> 352 bool simplifyInstruction(Instruction &I, Callable Evaluate); 353 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 354 355 /// Return true if the given argument to the function being considered for 356 /// inlining has the given attribute set either at the call site or the 357 /// function declaration. Primarily used to inspect call site specific 358 /// attributes since these can be more precise than the ones on the callee 359 /// itself. 360 bool paramHasAttr(Argument *A, Attribute::AttrKind Attr); 361 362 /// Return true if the given value is known non null within the callee if 363 /// inlined through this particular callsite. 364 bool isKnownNonNullInCallee(Value *V); 365 366 /// Return true if size growth is allowed when inlining the callee at \p Call. 367 bool allowSizeGrowth(CallBase &Call); 368 369 // Custom analysis routines. 370 InlineResult analyzeBlock(BasicBlock *BB, 371 SmallPtrSetImpl<const Value *> &EphValues); 372 373 // Disable several entry points to the visitor so we don't accidentally use 374 // them by declaring but not defining them here. 375 void visit(Module *); 376 void visit(Module &); 377 void visit(Function *); 378 void visit(Function &); 379 void visit(BasicBlock *); 380 void visit(BasicBlock &); 381 382 // Provide base case for our instruction visit. 383 bool visitInstruction(Instruction &I); 384 385 // Our visit overrides. 386 bool visitAlloca(AllocaInst &I); 387 bool visitPHI(PHINode &I); 388 bool visitGetElementPtr(GetElementPtrInst &I); 389 bool visitBitCast(BitCastInst &I); 390 bool visitPtrToInt(PtrToIntInst &I); 391 bool visitIntToPtr(IntToPtrInst &I); 392 bool visitCastInst(CastInst &I); 393 bool visitUnaryInstruction(UnaryInstruction &I); 394 bool visitCmpInst(CmpInst &I); 395 bool visitSub(BinaryOperator &I); 396 bool visitBinaryOperator(BinaryOperator &I); 397 bool visitFNeg(UnaryOperator &I); 398 bool visitLoad(LoadInst &I); 399 bool visitStore(StoreInst &I); 400 bool visitExtractValue(ExtractValueInst &I); 401 bool visitInsertValue(InsertValueInst &I); 402 bool visitCallBase(CallBase &Call); 403 bool visitReturnInst(ReturnInst &RI); 404 bool visitBranchInst(BranchInst &BI); 405 bool visitSelectInst(SelectInst &SI); 406 bool visitSwitchInst(SwitchInst &SI); 407 bool visitIndirectBrInst(IndirectBrInst &IBI); 408 bool visitResumeInst(ResumeInst &RI); 409 bool visitCleanupReturnInst(CleanupReturnInst &RI); 410 bool visitCatchReturnInst(CatchReturnInst &RI); 411 bool visitUnreachableInst(UnreachableInst &I); 412 413 public: 414 CallAnalyzer( 415 Function &Callee, CallBase &Call, const TargetTransformInfo &TTI, 416 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 417 function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr, 418 ProfileSummaryInfo *PSI = nullptr, 419 OptimizationRemarkEmitter *ORE = nullptr) 420 : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI), 421 PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE), 422 CandidateCall(Call), EnableLoadElimination(true) {} 423 424 InlineResult analyze(); 425 426 Optional<Constant*> getSimplifiedValue(Instruction *I) { 427 if (SimplifiedValues.find(I) != SimplifiedValues.end()) 428 return SimplifiedValues[I]; 429 return None; 430 } 431 432 // Keep a bunch of stats about the cost savings found so we can print them 433 // out when debugging. 434 unsigned NumConstantArgs = 0; 435 unsigned NumConstantOffsetPtrArgs = 0; 436 unsigned NumAllocaArgs = 0; 437 unsigned NumConstantPtrCmps = 0; 438 unsigned NumConstantPtrDiffs = 0; 439 unsigned NumInstructionsSimplified = 0; 440 441 void dump(); 442 }; 443 444 /// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note 445 /// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer 446 class InlineCostCallAnalyzer final : public CallAnalyzer { 447 const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1; 448 const bool ComputeFullInlineCost; 449 int LoadEliminationCost = 0; 450 /// Bonus to be applied when percentage of vector instructions in callee is 451 /// high (see more details in updateThreshold). 452 int VectorBonus = 0; 453 /// Bonus to be applied when the callee has only one reachable basic block. 454 int SingleBBBonus = 0; 455 456 /// Tunable parameters that control the analysis. 457 const InlineParams &Params; 458 459 // This DenseMap stores the delta change in cost and threshold after 460 // accounting for the given instruction. The map is filled only with the 461 // flag PrintInstructionComments on. 462 DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap; 463 464 /// Upper bound for the inlining cost. Bonuses are being applied to account 465 /// for speculative "expected profit" of the inlining decision. 466 int Threshold = 0; 467 468 /// Attempt to evaluate indirect calls to boost its inline cost. 469 const bool BoostIndirectCalls; 470 471 /// Ignore the threshold when finalizing analysis. 472 const bool IgnoreThreshold; 473 474 // True if the cost-benefit-analysis-based inliner is enabled. 475 const bool CostBenefitAnalysisEnabled; 476 477 /// Inlining cost measured in abstract units, accounts for all the 478 /// instructions expected to be executed for a given function invocation. 479 /// Instructions that are statically proven to be dead based on call-site 480 /// arguments are not counted here. 481 int Cost = 0; 482 483 // The cumulative cost at the beginning of the basic block being analyzed. At 484 // the end of analyzing each basic block, "Cost - CostAtBBStart" represents 485 // the size of that basic block. 486 int CostAtBBStart = 0; 487 488 // The static size of live but cold basic blocks. This is "static" in the 489 // sense that it's not weighted by profile counts at all. 490 int ColdSize = 0; 491 492 // Whether inlining is decided by cost-benefit analysis. 493 bool DecidedByCostBenefit = false; 494 495 bool SingleBB = true; 496 497 unsigned SROACostSavings = 0; 498 unsigned SROACostSavingsLost = 0; 499 500 /// The mapping of caller Alloca values to their accumulated cost savings. If 501 /// we have to disable SROA for one of the allocas, this tells us how much 502 /// cost must be added. 503 DenseMap<AllocaInst *, int> SROAArgCosts; 504 505 /// Return true if \p Call is a cold callsite. 506 bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI); 507 508 /// Update Threshold based on callsite properties such as callee 509 /// attributes and callee hotness for PGO builds. The Callee is explicitly 510 /// passed to support analyzing indirect calls whose target is inferred by 511 /// analysis. 512 void updateThreshold(CallBase &Call, Function &Callee); 513 /// Return a higher threshold if \p Call is a hot callsite. 514 Optional<int> getHotCallSiteThreshold(CallBase &Call, 515 BlockFrequencyInfo *CallerBFI); 516 517 /// Handle a capped 'int' increment for Cost. 518 void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) { 519 assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound"); 520 Cost = (int)std::min(UpperBound, Cost + Inc); 521 } 522 523 void onDisableSROA(AllocaInst *Arg) override { 524 auto CostIt = SROAArgCosts.find(Arg); 525 if (CostIt == SROAArgCosts.end()) 526 return; 527 addCost(CostIt->second); 528 SROACostSavings -= CostIt->second; 529 SROACostSavingsLost += CostIt->second; 530 SROAArgCosts.erase(CostIt); 531 } 532 533 void onDisableLoadElimination() override { 534 addCost(LoadEliminationCost); 535 LoadEliminationCost = 0; 536 } 537 void onCallPenalty() override { addCost(InlineConstants::CallPenalty); } 538 void onCallArgumentSetup(const CallBase &Call) override { 539 // Pay the price of the argument setup. We account for the average 1 540 // instruction per call argument setup here. 541 addCost(Call.arg_size() * InlineConstants::InstrCost); 542 } 543 void onLoadRelativeIntrinsic() override { 544 // This is normally lowered to 4 LLVM instructions. 545 addCost(3 * InlineConstants::InstrCost); 546 } 547 void onLoweredCall(Function *F, CallBase &Call, 548 bool IsIndirectCall) override { 549 // We account for the average 1 instruction per call argument setup here. 550 addCost(Call.arg_size() * InlineConstants::InstrCost); 551 552 // If we have a constant that we are calling as a function, we can peer 553 // through it and see the function target. This happens not infrequently 554 // during devirtualization and so we want to give it a hefty bonus for 555 // inlining, but cap that bonus in the event that inlining wouldn't pan out. 556 // Pretend to inline the function, with a custom threshold. 557 if (IsIndirectCall && BoostIndirectCalls) { 558 auto IndirectCallParams = Params; 559 IndirectCallParams.DefaultThreshold = 560 InlineConstants::IndirectCallThreshold; 561 /// FIXME: if InlineCostCallAnalyzer is derived from, this may need 562 /// to instantiate the derived class. 563 InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI, 564 GetAssumptionCache, GetBFI, PSI, ORE, false); 565 if (CA.analyze().isSuccess()) { 566 // We were able to inline the indirect call! Subtract the cost from the 567 // threshold to get the bonus we want to apply, but don't go below zero. 568 Cost -= std::max(0, CA.getThreshold() - CA.getCost()); 569 } 570 } else 571 // Otherwise simply add the cost for merely making the call. 572 addCost(InlineConstants::CallPenalty); 573 } 574 575 void onFinalizeSwitch(unsigned JumpTableSize, 576 unsigned NumCaseCluster) override { 577 // If suitable for a jump table, consider the cost for the table size and 578 // branch to destination. 579 // Maximum valid cost increased in this function. 580 if (JumpTableSize) { 581 int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost + 582 4 * InlineConstants::InstrCost; 583 584 addCost(JTCost, (int64_t)CostUpperBound); 585 return; 586 } 587 // Considering forming a binary search, we should find the number of nodes 588 // which is same as the number of comparisons when lowered. For a given 589 // number of clusters, n, we can define a recursive function, f(n), to find 590 // the number of nodes in the tree. The recursion is : 591 // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3, 592 // and f(n) = n, when n <= 3. 593 // This will lead a binary tree where the leaf should be either f(2) or f(3) 594 // when n > 3. So, the number of comparisons from leaves should be n, while 595 // the number of non-leaf should be : 596 // 2^(log2(n) - 1) - 1 597 // = 2^log2(n) * 2^-1 - 1 598 // = n / 2 - 1. 599 // Considering comparisons from leaf and non-leaf nodes, we can estimate the 600 // number of comparisons in a simple closed form : 601 // n + n / 2 - 1 = n * 3 / 2 - 1 602 if (NumCaseCluster <= 3) { 603 // Suppose a comparison includes one compare and one conditional branch. 604 addCost(NumCaseCluster * 2 * InlineConstants::InstrCost); 605 return; 606 } 607 608 int64_t ExpectedNumberOfCompare = 3 * (int64_t)NumCaseCluster / 2 - 1; 609 int64_t SwitchCost = 610 ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost; 611 612 addCost(SwitchCost, (int64_t)CostUpperBound); 613 } 614 void onMissedSimplification() override { 615 addCost(InlineConstants::InstrCost); 616 } 617 618 void onInitializeSROAArg(AllocaInst *Arg) override { 619 assert(Arg != nullptr && 620 "Should not initialize SROA costs for null value."); 621 SROAArgCosts[Arg] = 0; 622 } 623 624 void onAggregateSROAUse(AllocaInst *SROAArg) override { 625 auto CostIt = SROAArgCosts.find(SROAArg); 626 assert(CostIt != SROAArgCosts.end() && 627 "expected this argument to have a cost"); 628 CostIt->second += InlineConstants::InstrCost; 629 SROACostSavings += InlineConstants::InstrCost; 630 } 631 632 void onBlockStart(const BasicBlock *BB) override { CostAtBBStart = Cost; } 633 634 void onBlockAnalyzed(const BasicBlock *BB) override { 635 if (CostBenefitAnalysisEnabled) { 636 // Keep track of the static size of live but cold basic blocks. For now, 637 // we define a cold basic block to be one that's never executed. 638 assert(GetBFI && "GetBFI must be available"); 639 BlockFrequencyInfo *BFI = &(GetBFI(F)); 640 assert(BFI && "BFI must be available"); 641 auto ProfileCount = BFI->getBlockProfileCount(BB); 642 assert(ProfileCount.hasValue()); 643 if (ProfileCount.getValue() == 0) 644 ColdSize += Cost - CostAtBBStart; 645 } 646 647 auto *TI = BB->getTerminator(); 648 // If we had any successors at this point, than post-inlining is likely to 649 // have them as well. Note that we assume any basic blocks which existed 650 // due to branches or switches which folded above will also fold after 651 // inlining. 652 if (SingleBB && TI->getNumSuccessors() > 1) { 653 // Take off the bonus we applied to the threshold. 654 Threshold -= SingleBBBonus; 655 SingleBB = false; 656 } 657 } 658 659 void onInstructionAnalysisStart(const Instruction *I) override { 660 // This function is called to store the initial cost of inlining before 661 // the given instruction was assessed. 662 if (!PrintInstructionComments) 663 return; 664 InstructionCostDetailMap[I].CostBefore = Cost; 665 InstructionCostDetailMap[I].ThresholdBefore = Threshold; 666 } 667 668 void onInstructionAnalysisFinish(const Instruction *I) override { 669 // This function is called to find new values of cost and threshold after 670 // the instruction has been assessed. 671 if (!PrintInstructionComments) 672 return; 673 InstructionCostDetailMap[I].CostAfter = Cost; 674 InstructionCostDetailMap[I].ThresholdAfter = Threshold; 675 } 676 677 bool isCostBenefitAnalysisEnabled() { 678 if (!PSI || !PSI->hasProfileSummary()) 679 return false; 680 681 if (!GetBFI) 682 return false; 683 684 if (InlineEnableCostBenefitAnalysis.getNumOccurrences()) { 685 // Honor the explicit request from the user. 686 if (!InlineEnableCostBenefitAnalysis) 687 return false; 688 } else { 689 // Otherwise, require instrumentation profile. 690 if (!PSI->hasInstrumentationProfile()) 691 return false; 692 } 693 694 auto *Caller = CandidateCall.getParent()->getParent(); 695 if (!Caller->getEntryCount()) 696 return false; 697 698 BlockFrequencyInfo *CallerBFI = &(GetBFI(*Caller)); 699 if (!CallerBFI) 700 return false; 701 702 // For now, limit to hot call site. 703 if (!PSI->isHotCallSite(CandidateCall, CallerBFI)) 704 return false; 705 706 // Make sure we have a nonzero entry count. 707 auto EntryCount = F.getEntryCount(); 708 if (!EntryCount || !EntryCount.getCount()) 709 return false; 710 711 BlockFrequencyInfo *CalleeBFI = &(GetBFI(F)); 712 if (!CalleeBFI) 713 return false; 714 715 return true; 716 } 717 718 // Determine whether we should inline the given call site, taking into account 719 // both the size cost and the cycle savings. Return None if we don't have 720 // suficient profiling information to determine. 721 Optional<bool> costBenefitAnalysis() { 722 if (!CostBenefitAnalysisEnabled) 723 return None; 724 725 // buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0 726 // for the prelink phase of the AutoFDO + ThinLTO build. Honor the logic by 727 // falling back to the cost-based metric. 728 // TODO: Improve this hacky condition. 729 if (Threshold == 0) 730 return None; 731 732 assert(GetBFI); 733 BlockFrequencyInfo *CalleeBFI = &(GetBFI(F)); 734 assert(CalleeBFI); 735 736 // The cycle savings expressed as the sum of InlineConstants::InstrCost 737 // multiplied by the estimated dynamic count of each instruction we can 738 // avoid. Savings come from the call site cost, such as argument setup and 739 // the call instruction, as well as the instructions that are folded. 740 // 741 // We use 128-bit APInt here to avoid potential overflow. This variable 742 // should stay well below 10^^24 (or 2^^80) in practice. This "worst" case 743 // assumes that we can avoid or fold a billion instructions, each with a 744 // profile count of 10^^15 -- roughly the number of cycles for a 24-hour 745 // period on a 4GHz machine. 746 APInt CycleSavings(128, 0); 747 748 for (auto &BB : F) { 749 APInt CurrentSavings(128, 0); 750 for (auto &I : BB) { 751 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) { 752 // Count a conditional branch as savings if it becomes unconditional. 753 if (BI->isConditional() && 754 dyn_cast_or_null<ConstantInt>( 755 SimplifiedValues.lookup(BI->getCondition()))) { 756 CurrentSavings += InlineConstants::InstrCost; 757 } 758 } else if (Value *V = dyn_cast<Value>(&I)) { 759 // Count an instruction as savings if we can fold it. 760 if (SimplifiedValues.count(V)) { 761 CurrentSavings += InlineConstants::InstrCost; 762 } 763 } 764 } 765 766 auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB); 767 assert(ProfileCount.hasValue()); 768 CurrentSavings *= ProfileCount.getValue(); 769 CycleSavings += CurrentSavings; 770 } 771 772 // Compute the cycle savings per call. 773 auto EntryProfileCount = F.getEntryCount(); 774 assert(EntryProfileCount.hasValue() && EntryProfileCount.getCount()); 775 auto EntryCount = EntryProfileCount.getCount(); 776 CycleSavings += EntryCount / 2; 777 CycleSavings = CycleSavings.udiv(EntryCount); 778 779 // Compute the total savings for the call site. 780 auto *CallerBB = CandidateCall.getParent(); 781 BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent()))); 782 CycleSavings += getCallsiteCost(this->CandidateCall, DL); 783 CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue(); 784 785 // Remove the cost of the cold basic blocks. 786 int Size = Cost - ColdSize; 787 788 // Allow tiny callees to be inlined regardless of whether they meet the 789 // savings threshold. 790 Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1; 791 792 // Return true if the savings justify the cost of inlining. Specifically, 793 // we evaluate the following inequality: 794 // 795 // CycleSavings PSI->getOrCompHotCountThreshold() 796 // -------------- >= ----------------------------------- 797 // Size InlineSavingsMultiplier 798 // 799 // Note that the left hand side is specific to a call site. The right hand 800 // side is a constant for the entire executable. 801 APInt LHS = CycleSavings; 802 LHS *= InlineSavingsMultiplier; 803 APInt RHS(128, PSI->getOrCompHotCountThreshold()); 804 RHS *= Size; 805 return LHS.uge(RHS); 806 } 807 808 InlineResult finalizeAnalysis() override { 809 // Loops generally act a lot like calls in that they act like barriers to 810 // movement, require a certain amount of setup, etc. So when optimising for 811 // size, we penalise any call sites that perform loops. We do this after all 812 // other costs here, so will likely only be dealing with relatively small 813 // functions (and hence DT and LI will hopefully be cheap). 814 auto *Caller = CandidateCall.getFunction(); 815 if (Caller->hasMinSize()) { 816 DominatorTree DT(F); 817 LoopInfo LI(DT); 818 int NumLoops = 0; 819 for (Loop *L : LI) { 820 // Ignore loops that will not be executed 821 if (DeadBlocks.count(L->getHeader())) 822 continue; 823 NumLoops++; 824 } 825 addCost(NumLoops * InlineConstants::CallPenalty); 826 } 827 828 // We applied the maximum possible vector bonus at the beginning. Now, 829 // subtract the excess bonus, if any, from the Threshold before 830 // comparing against Cost. 831 if (NumVectorInstructions <= NumInstructions / 10) 832 Threshold -= VectorBonus; 833 else if (NumVectorInstructions <= NumInstructions / 2) 834 Threshold -= VectorBonus / 2; 835 836 if (auto Result = costBenefitAnalysis()) { 837 DecidedByCostBenefit = true; 838 if (Result.getValue()) 839 return InlineResult::success(); 840 else 841 return InlineResult::failure("Cost over threshold."); 842 } 843 844 if (IgnoreThreshold || Cost < std::max(1, Threshold)) 845 return InlineResult::success(); 846 return InlineResult::failure("Cost over threshold."); 847 } 848 bool shouldStop() override { 849 // Bail out the moment we cross the threshold. This means we'll under-count 850 // the cost, but only when undercounting doesn't matter. 851 return !IgnoreThreshold && Cost >= Threshold && !ComputeFullInlineCost; 852 } 853 854 void onLoadEliminationOpportunity() override { 855 LoadEliminationCost += InlineConstants::InstrCost; 856 } 857 858 InlineResult onAnalysisStart() override { 859 // Perform some tweaks to the cost and threshold based on the direct 860 // callsite information. 861 862 // We want to more aggressively inline vector-dense kernels, so up the 863 // threshold, and we'll lower it if the % of vector instructions gets too 864 // low. Note that these bonuses are some what arbitrary and evolved over 865 // time by accident as much as because they are principled bonuses. 866 // 867 // FIXME: It would be nice to remove all such bonuses. At least it would be 868 // nice to base the bonus values on something more scientific. 869 assert(NumInstructions == 0); 870 assert(NumVectorInstructions == 0); 871 872 // Update the threshold based on callsite properties 873 updateThreshold(CandidateCall, F); 874 875 // While Threshold depends on commandline options that can take negative 876 // values, we want to enforce the invariant that the computed threshold and 877 // bonuses are non-negative. 878 assert(Threshold >= 0); 879 assert(SingleBBBonus >= 0); 880 assert(VectorBonus >= 0); 881 882 // Speculatively apply all possible bonuses to Threshold. If cost exceeds 883 // this Threshold any time, and cost cannot decrease, we can stop processing 884 // the rest of the function body. 885 Threshold += (SingleBBBonus + VectorBonus); 886 887 // Give out bonuses for the callsite, as the instructions setting them up 888 // will be gone after inlining. 889 addCost(-getCallsiteCost(this->CandidateCall, DL)); 890 891 // If this function uses the coldcc calling convention, prefer not to inline 892 // it. 893 if (F.getCallingConv() == CallingConv::Cold) 894 Cost += InlineConstants::ColdccPenalty; 895 896 // Check if we're done. This can happen due to bonuses and penalties. 897 if (Cost >= Threshold && !ComputeFullInlineCost) 898 return InlineResult::failure("high cost"); 899 900 return InlineResult::success(); 901 } 902 903 public: 904 InlineCostCallAnalyzer( 905 Function &Callee, CallBase &Call, const InlineParams &Params, 906 const TargetTransformInfo &TTI, 907 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 908 function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr, 909 ProfileSummaryInfo *PSI = nullptr, 910 OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true, 911 bool IgnoreThreshold = false) 912 : CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE), 913 ComputeFullInlineCost(OptComputeFullInlineCost || 914 Params.ComputeFullInlineCost || ORE || 915 isCostBenefitAnalysisEnabled()), 916 Params(Params), Threshold(Params.DefaultThreshold), 917 BoostIndirectCalls(BoostIndirect), IgnoreThreshold(IgnoreThreshold), 918 CostBenefitAnalysisEnabled(isCostBenefitAnalysisEnabled()), 919 Writer(this) {} 920 921 /// Annotation Writer for instruction details 922 InlineCostAnnotationWriter Writer; 923 924 void dump(); 925 926 // Prints the same analysis as dump(), but its definition is not dependent 927 // on the build. 928 void print(); 929 930 Optional<InstructionCostDetail> getCostDetails(const Instruction *I) { 931 if (InstructionCostDetailMap.find(I) != InstructionCostDetailMap.end()) 932 return InstructionCostDetailMap[I]; 933 return None; 934 } 935 936 virtual ~InlineCostCallAnalyzer() {} 937 int getThreshold() { return Threshold; } 938 int getCost() { return Cost; } 939 bool wasDecidedByCostBenefit() { return DecidedByCostBenefit; } 940 }; 941 } // namespace 942 943 /// Test whether the given value is an Alloca-derived function argument. 944 bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 945 return SROAArgValues.count(V); 946 } 947 948 void CallAnalyzer::disableSROAForArg(AllocaInst *SROAArg) { 949 onDisableSROA(SROAArg); 950 EnabledSROAAllocas.erase(SROAArg); 951 disableLoadElimination(); 952 } 953 954 void InlineCostAnnotationWriter::emitInstructionAnnot(const Instruction *I, 955 formatted_raw_ostream &OS) { 956 // The cost of inlining of the given instruction is printed always. 957 // The threshold delta is printed only when it is non-zero. It happens 958 // when we decided to give a bonus at a particular instruction. 959 Optional<InstructionCostDetail> Record = ICCA->getCostDetails(I); 960 if (!Record) 961 OS << "; No analysis for the instruction"; 962 else { 963 OS << "; cost before = " << Record->CostBefore 964 << ", cost after = " << Record->CostAfter 965 << ", threshold before = " << Record->ThresholdBefore 966 << ", threshold after = " << Record->ThresholdAfter << ", "; 967 OS << "cost delta = " << Record->getCostDelta(); 968 if (Record->hasThresholdChanged()) 969 OS << ", threshold delta = " << Record->getThresholdDelta(); 970 } 971 auto C = ICCA->getSimplifiedValue(const_cast<Instruction *>(I)); 972 if (C) { 973 OS << ", simplified to "; 974 C.getValue()->print(OS, true); 975 } 976 OS << "\n"; 977 } 978 979 /// If 'V' maps to a SROA candidate, disable SROA for it. 980 void CallAnalyzer::disableSROA(Value *V) { 981 if (auto *SROAArg = getSROAArgForValueOrNull(V)) { 982 disableSROAForArg(SROAArg); 983 } 984 } 985 986 void CallAnalyzer::disableLoadElimination() { 987 if (EnableLoadElimination) { 988 onDisableLoadElimination(); 989 EnableLoadElimination = false; 990 } 991 } 992 993 /// Accumulate a constant GEP offset into an APInt if possible. 994 /// 995 /// Returns false if unable to compute the offset for any reason. Respects any 996 /// simplified values known during the analysis of this callsite. 997 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 998 unsigned IntPtrWidth = DL.getIndexTypeSizeInBits(GEP.getType()); 999 assert(IntPtrWidth == Offset.getBitWidth()); 1000 1001 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 1002 GTI != GTE; ++GTI) { 1003 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 1004 if (!OpC) 1005 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 1006 OpC = dyn_cast<ConstantInt>(SimpleOp); 1007 if (!OpC) 1008 return false; 1009 if (OpC->isZero()) 1010 continue; 1011 1012 // Handle a struct index, which adds its field offset to the pointer. 1013 if (StructType *STy = GTI.getStructTypeOrNull()) { 1014 unsigned ElementIdx = OpC->getZExtValue(); 1015 const StructLayout *SL = DL.getStructLayout(STy); 1016 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 1017 continue; 1018 } 1019 1020 APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType())); 1021 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 1022 } 1023 return true; 1024 } 1025 1026 /// Use TTI to check whether a GEP is free. 1027 /// 1028 /// Respects any simplified values known during the analysis of this callsite. 1029 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { 1030 SmallVector<Value *, 4> Operands; 1031 Operands.push_back(GEP.getOperand(0)); 1032 for (const Use &Op : GEP.indices()) 1033 if (Constant *SimpleOp = SimplifiedValues.lookup(Op)) 1034 Operands.push_back(SimpleOp); 1035 else 1036 Operands.push_back(Op); 1037 return TTI.getUserCost(&GEP, Operands, 1038 TargetTransformInfo::TCK_SizeAndLatency) == 1039 TargetTransformInfo::TCC_Free; 1040 } 1041 1042 bool CallAnalyzer::visitAlloca(AllocaInst &I) { 1043 // Check whether inlining will turn a dynamic alloca into a static 1044 // alloca and handle that case. 1045 if (I.isArrayAllocation()) { 1046 Constant *Size = SimplifiedValues.lookup(I.getArraySize()); 1047 if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) { 1048 // Sometimes a dynamic alloca could be converted into a static alloca 1049 // after this constant prop, and become a huge static alloca on an 1050 // unconditional CFG path. Avoid inlining if this is going to happen above 1051 // a threshold. 1052 // FIXME: If the threshold is removed or lowered too much, we could end up 1053 // being too pessimistic and prevent inlining non-problematic code. This 1054 // could result in unintended perf regressions. A better overall strategy 1055 // is needed to track stack usage during inlining. 1056 Type *Ty = I.getAllocatedType(); 1057 AllocatedSize = SaturatingMultiplyAdd( 1058 AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty).getKnownMinSize(), 1059 AllocatedSize); 1060 if (AllocatedSize > InlineConstants::MaxSimplifiedDynamicAllocaToInline) { 1061 HasDynamicAlloca = true; 1062 return false; 1063 } 1064 return Base::visitAlloca(I); 1065 } 1066 } 1067 1068 // Accumulate the allocated size. 1069 if (I.isStaticAlloca()) { 1070 Type *Ty = I.getAllocatedType(); 1071 AllocatedSize = 1072 SaturatingAdd(DL.getTypeAllocSize(Ty).getKnownMinSize(), AllocatedSize); 1073 } 1074 1075 // We will happily inline static alloca instructions. 1076 if (I.isStaticAlloca()) 1077 return Base::visitAlloca(I); 1078 1079 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 1080 // a variety of reasons, and so we would like to not inline them into 1081 // functions which don't currently have a dynamic alloca. This simply 1082 // disables inlining altogether in the presence of a dynamic alloca. 1083 HasDynamicAlloca = true; 1084 return false; 1085 } 1086 1087 bool CallAnalyzer::visitPHI(PHINode &I) { 1088 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 1089 // though we don't want to propagate it's bonuses. The idea is to disable 1090 // SROA if it *might* be used in an inappropriate manner. 1091 1092 // Phi nodes are always zero-cost. 1093 // FIXME: Pointer sizes may differ between different address spaces, so do we 1094 // need to use correct address space in the call to getPointerSizeInBits here? 1095 // Or could we skip the getPointerSizeInBits call completely? As far as I can 1096 // see the ZeroOffset is used as a dummy value, so we can probably use any 1097 // bit width for the ZeroOffset? 1098 APInt ZeroOffset = APInt::getNullValue(DL.getPointerSizeInBits(0)); 1099 bool CheckSROA = I.getType()->isPointerTy(); 1100 1101 // Track the constant or pointer with constant offset we've seen so far. 1102 Constant *FirstC = nullptr; 1103 std::pair<Value *, APInt> FirstBaseAndOffset = {nullptr, ZeroOffset}; 1104 Value *FirstV = nullptr; 1105 1106 for (unsigned i = 0, e = I.getNumIncomingValues(); i != e; ++i) { 1107 BasicBlock *Pred = I.getIncomingBlock(i); 1108 // If the incoming block is dead, skip the incoming block. 1109 if (DeadBlocks.count(Pred)) 1110 continue; 1111 // If the parent block of phi is not the known successor of the incoming 1112 // block, skip the incoming block. 1113 BasicBlock *KnownSuccessor = KnownSuccessors[Pred]; 1114 if (KnownSuccessor && KnownSuccessor != I.getParent()) 1115 continue; 1116 1117 Value *V = I.getIncomingValue(i); 1118 // If the incoming value is this phi itself, skip the incoming value. 1119 if (&I == V) 1120 continue; 1121 1122 Constant *C = dyn_cast<Constant>(V); 1123 if (!C) 1124 C = SimplifiedValues.lookup(V); 1125 1126 std::pair<Value *, APInt> BaseAndOffset = {nullptr, ZeroOffset}; 1127 if (!C && CheckSROA) 1128 BaseAndOffset = ConstantOffsetPtrs.lookup(V); 1129 1130 if (!C && !BaseAndOffset.first) 1131 // The incoming value is neither a constant nor a pointer with constant 1132 // offset, exit early. 1133 return true; 1134 1135 if (FirstC) { 1136 if (FirstC == C) 1137 // If we've seen a constant incoming value before and it is the same 1138 // constant we see this time, continue checking the next incoming value. 1139 continue; 1140 // Otherwise early exit because we either see a different constant or saw 1141 // a constant before but we have a pointer with constant offset this time. 1142 return true; 1143 } 1144 1145 if (FirstV) { 1146 // The same logic as above, but check pointer with constant offset here. 1147 if (FirstBaseAndOffset == BaseAndOffset) 1148 continue; 1149 return true; 1150 } 1151 1152 if (C) { 1153 // This is the 1st time we've seen a constant, record it. 1154 FirstC = C; 1155 continue; 1156 } 1157 1158 // The remaining case is that this is the 1st time we've seen a pointer with 1159 // constant offset, record it. 1160 FirstV = V; 1161 FirstBaseAndOffset = BaseAndOffset; 1162 } 1163 1164 // Check if we can map phi to a constant. 1165 if (FirstC) { 1166 SimplifiedValues[&I] = FirstC; 1167 return true; 1168 } 1169 1170 // Check if we can map phi to a pointer with constant offset. 1171 if (FirstBaseAndOffset.first) { 1172 ConstantOffsetPtrs[&I] = FirstBaseAndOffset; 1173 1174 if (auto *SROAArg = getSROAArgForValueOrNull(FirstV)) 1175 SROAArgValues[&I] = SROAArg; 1176 } 1177 1178 return true; 1179 } 1180 1181 /// Check we can fold GEPs of constant-offset call site argument pointers. 1182 /// This requires target data and inbounds GEPs. 1183 /// 1184 /// \return true if the specified GEP can be folded. 1185 bool CallAnalyzer::canFoldInboundsGEP(GetElementPtrInst &I) { 1186 // Check if we have a base + offset for the pointer. 1187 std::pair<Value *, APInt> BaseAndOffset = 1188 ConstantOffsetPtrs.lookup(I.getPointerOperand()); 1189 if (!BaseAndOffset.first) 1190 return false; 1191 1192 // Check if the offset of this GEP is constant, and if so accumulate it 1193 // into Offset. 1194 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) 1195 return false; 1196 1197 // Add the result as a new mapping to Base + Offset. 1198 ConstantOffsetPtrs[&I] = BaseAndOffset; 1199 1200 return true; 1201 } 1202 1203 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 1204 auto *SROAArg = getSROAArgForValueOrNull(I.getPointerOperand()); 1205 1206 // Lambda to check whether a GEP's indices are all constant. 1207 auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) { 1208 for (const Use &Op : GEP.indices()) 1209 if (!isa<Constant>(Op) && !SimplifiedValues.lookup(Op)) 1210 return false; 1211 return true; 1212 }; 1213 1214 if (!DisableGEPConstOperand) 1215 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1216 SmallVector<Constant *, 2> Indices; 1217 for (unsigned int Index = 1 ; Index < COps.size() ; ++Index) 1218 Indices.push_back(COps[Index]); 1219 return ConstantExpr::getGetElementPtr(I.getSourceElementType(), COps[0], 1220 Indices, I.isInBounds()); 1221 })) 1222 return true; 1223 1224 if ((I.isInBounds() && canFoldInboundsGEP(I)) || IsGEPOffsetConstant(I)) { 1225 if (SROAArg) 1226 SROAArgValues[&I] = SROAArg; 1227 1228 // Constant GEPs are modeled as free. 1229 return true; 1230 } 1231 1232 // Variable GEPs will require math and will disable SROA. 1233 if (SROAArg) 1234 disableSROAForArg(SROAArg); 1235 return isGEPFree(I); 1236 } 1237 1238 /// Simplify \p I if its operands are constants and update SimplifiedValues. 1239 /// \p Evaluate is a callable specific to instruction type that evaluates the 1240 /// instruction when all the operands are constants. 1241 template <typename Callable> 1242 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) { 1243 SmallVector<Constant *, 2> COps; 1244 for (Value *Op : I.operands()) { 1245 Constant *COp = dyn_cast<Constant>(Op); 1246 if (!COp) 1247 COp = SimplifiedValues.lookup(Op); 1248 if (!COp) 1249 return false; 1250 COps.push_back(COp); 1251 } 1252 auto *C = Evaluate(COps); 1253 if (!C) 1254 return false; 1255 SimplifiedValues[&I] = C; 1256 return true; 1257 } 1258 1259 bool CallAnalyzer::visitBitCast(BitCastInst &I) { 1260 // Propagate constants through bitcasts. 1261 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1262 return ConstantExpr::getBitCast(COps[0], I.getType()); 1263 })) 1264 return true; 1265 1266 // Track base/offsets through casts 1267 std::pair<Value *, APInt> BaseAndOffset = 1268 ConstantOffsetPtrs.lookup(I.getOperand(0)); 1269 // Casts don't change the offset, just wrap it up. 1270 if (BaseAndOffset.first) 1271 ConstantOffsetPtrs[&I] = BaseAndOffset; 1272 1273 // Also look for SROA candidates here. 1274 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0))) 1275 SROAArgValues[&I] = SROAArg; 1276 1277 // Bitcasts are always zero cost. 1278 return true; 1279 } 1280 1281 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 1282 // Propagate constants through ptrtoint. 1283 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1284 return ConstantExpr::getPtrToInt(COps[0], I.getType()); 1285 })) 1286 return true; 1287 1288 // Track base/offset pairs when converted to a plain integer provided the 1289 // integer is large enough to represent the pointer. 1290 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 1291 unsigned AS = I.getOperand(0)->getType()->getPointerAddressSpace(); 1292 if (IntegerSize == DL.getPointerSizeInBits(AS)) { 1293 std::pair<Value *, APInt> BaseAndOffset = 1294 ConstantOffsetPtrs.lookup(I.getOperand(0)); 1295 if (BaseAndOffset.first) 1296 ConstantOffsetPtrs[&I] = BaseAndOffset; 1297 } 1298 1299 // This is really weird. Technically, ptrtoint will disable SROA. However, 1300 // unless that ptrtoint is *used* somewhere in the live basic blocks after 1301 // inlining, it will be nuked, and SROA should proceed. All of the uses which 1302 // would block SROA would also block SROA if applied directly to a pointer, 1303 // and so we can just add the integer in here. The only places where SROA is 1304 // preserved either cannot fire on an integer, or won't in-and-of themselves 1305 // disable SROA (ext) w/o some later use that we would see and disable. 1306 if (auto *SROAArg = getSROAArgForValueOrNull(I.getOperand(0))) 1307 SROAArgValues[&I] = SROAArg; 1308 1309 return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1310 TargetTransformInfo::TCC_Free; 1311 } 1312 1313 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 1314 // Propagate constants through ptrtoint. 1315 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1316 return ConstantExpr::getIntToPtr(COps[0], I.getType()); 1317 })) 1318 return true; 1319 1320 // Track base/offset pairs when round-tripped through a pointer without 1321 // modifications provided the integer is not too large. 1322 Value *Op = I.getOperand(0); 1323 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 1324 if (IntegerSize <= DL.getPointerTypeSizeInBits(I.getType())) { 1325 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 1326 if (BaseAndOffset.first) 1327 ConstantOffsetPtrs[&I] = BaseAndOffset; 1328 } 1329 1330 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 1331 if (auto *SROAArg = getSROAArgForValueOrNull(Op)) 1332 SROAArgValues[&I] = SROAArg; 1333 1334 return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1335 TargetTransformInfo::TCC_Free; 1336 } 1337 1338 bool CallAnalyzer::visitCastInst(CastInst &I) { 1339 // Propagate constants through casts. 1340 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1341 return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType()); 1342 })) 1343 return true; 1344 1345 // Disable SROA in the face of arbitrary casts we don't explicitly list 1346 // elsewhere. 1347 disableSROA(I.getOperand(0)); 1348 1349 // If this is a floating-point cast, and the target says this operation 1350 // is expensive, this may eventually become a library call. Treat the cost 1351 // as such. 1352 switch (I.getOpcode()) { 1353 case Instruction::FPTrunc: 1354 case Instruction::FPExt: 1355 case Instruction::UIToFP: 1356 case Instruction::SIToFP: 1357 case Instruction::FPToUI: 1358 case Instruction::FPToSI: 1359 if (TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive) 1360 onCallPenalty(); 1361 break; 1362 default: 1363 break; 1364 } 1365 1366 return TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1367 TargetTransformInfo::TCC_Free; 1368 } 1369 1370 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { 1371 Value *Operand = I.getOperand(0); 1372 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1373 return ConstantFoldInstOperands(&I, COps[0], DL); 1374 })) 1375 return true; 1376 1377 // Disable any SROA on the argument to arbitrary unary instructions. 1378 disableSROA(Operand); 1379 1380 return false; 1381 } 1382 1383 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) { 1384 return CandidateCall.paramHasAttr(A->getArgNo(), Attr); 1385 } 1386 1387 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) { 1388 // Does the *call site* have the NonNull attribute set on an argument? We 1389 // use the attribute on the call site to memoize any analysis done in the 1390 // caller. This will also trip if the callee function has a non-null 1391 // parameter attribute, but that's a less interesting case because hopefully 1392 // the callee would already have been simplified based on that. 1393 if (Argument *A = dyn_cast<Argument>(V)) 1394 if (paramHasAttr(A, Attribute::NonNull)) 1395 return true; 1396 1397 // Is this an alloca in the caller? This is distinct from the attribute case 1398 // above because attributes aren't updated within the inliner itself and we 1399 // always want to catch the alloca derived case. 1400 if (isAllocaDerivedArg(V)) 1401 // We can actually predict the result of comparisons between an 1402 // alloca-derived value and null. Note that this fires regardless of 1403 // SROA firing. 1404 return true; 1405 1406 return false; 1407 } 1408 1409 bool CallAnalyzer::allowSizeGrowth(CallBase &Call) { 1410 // If the normal destination of the invoke or the parent block of the call 1411 // site is unreachable-terminated, there is little point in inlining this 1412 // unless there is literally zero cost. 1413 // FIXME: Note that it is possible that an unreachable-terminated block has a 1414 // hot entry. For example, in below scenario inlining hot_call_X() may be 1415 // beneficial : 1416 // main() { 1417 // hot_call_1(); 1418 // ... 1419 // hot_call_N() 1420 // exit(0); 1421 // } 1422 // For now, we are not handling this corner case here as it is rare in real 1423 // code. In future, we should elaborate this based on BPI and BFI in more 1424 // general threshold adjusting heuristics in updateThreshold(). 1425 if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) { 1426 if (isa<UnreachableInst>(II->getNormalDest()->getTerminator())) 1427 return false; 1428 } else if (isa<UnreachableInst>(Call.getParent()->getTerminator())) 1429 return false; 1430 1431 return true; 1432 } 1433 1434 bool InlineCostCallAnalyzer::isColdCallSite(CallBase &Call, 1435 BlockFrequencyInfo *CallerBFI) { 1436 // If global profile summary is available, then callsite's coldness is 1437 // determined based on that. 1438 if (PSI && PSI->hasProfileSummary()) 1439 return PSI->isColdCallSite(Call, CallerBFI); 1440 1441 // Otherwise we need BFI to be available. 1442 if (!CallerBFI) 1443 return false; 1444 1445 // Determine if the callsite is cold relative to caller's entry. We could 1446 // potentially cache the computation of scaled entry frequency, but the added 1447 // complexity is not worth it unless this scaling shows up high in the 1448 // profiles. 1449 const BranchProbability ColdProb(ColdCallSiteRelFreq, 100); 1450 auto CallSiteBB = Call.getParent(); 1451 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB); 1452 auto CallerEntryFreq = 1453 CallerBFI->getBlockFreq(&(Call.getCaller()->getEntryBlock())); 1454 return CallSiteFreq < CallerEntryFreq * ColdProb; 1455 } 1456 1457 Optional<int> 1458 InlineCostCallAnalyzer::getHotCallSiteThreshold(CallBase &Call, 1459 BlockFrequencyInfo *CallerBFI) { 1460 1461 // If global profile summary is available, then callsite's hotness is 1462 // determined based on that. 1463 if (PSI && PSI->hasProfileSummary() && PSI->isHotCallSite(Call, CallerBFI)) 1464 return Params.HotCallSiteThreshold; 1465 1466 // Otherwise we need BFI to be available and to have a locally hot callsite 1467 // threshold. 1468 if (!CallerBFI || !Params.LocallyHotCallSiteThreshold) 1469 return None; 1470 1471 // Determine if the callsite is hot relative to caller's entry. We could 1472 // potentially cache the computation of scaled entry frequency, but the added 1473 // complexity is not worth it unless this scaling shows up high in the 1474 // profiles. 1475 auto CallSiteBB = Call.getParent(); 1476 auto CallSiteFreq = CallerBFI->getBlockFreq(CallSiteBB).getFrequency(); 1477 auto CallerEntryFreq = CallerBFI->getEntryFreq(); 1478 if (CallSiteFreq >= CallerEntryFreq * HotCallSiteRelFreq) 1479 return Params.LocallyHotCallSiteThreshold; 1480 1481 // Otherwise treat it normally. 1482 return None; 1483 } 1484 1485 void InlineCostCallAnalyzer::updateThreshold(CallBase &Call, Function &Callee) { 1486 // If no size growth is allowed for this inlining, set Threshold to 0. 1487 if (!allowSizeGrowth(Call)) { 1488 Threshold = 0; 1489 return; 1490 } 1491 1492 Function *Caller = Call.getCaller(); 1493 1494 // return min(A, B) if B is valid. 1495 auto MinIfValid = [](int A, Optional<int> B) { 1496 return B ? std::min(A, B.getValue()) : A; 1497 }; 1498 1499 // return max(A, B) if B is valid. 1500 auto MaxIfValid = [](int A, Optional<int> B) { 1501 return B ? std::max(A, B.getValue()) : A; 1502 }; 1503 1504 // Various bonus percentages. These are multiplied by Threshold to get the 1505 // bonus values. 1506 // SingleBBBonus: This bonus is applied if the callee has a single reachable 1507 // basic block at the given callsite context. This is speculatively applied 1508 // and withdrawn if more than one basic block is seen. 1509 // 1510 // LstCallToStaticBonus: This large bonus is applied to ensure the inlining 1511 // of the last call to a static function as inlining such functions is 1512 // guaranteed to reduce code size. 1513 // 1514 // These bonus percentages may be set to 0 based on properties of the caller 1515 // and the callsite. 1516 int SingleBBBonusPercent = 50; 1517 int VectorBonusPercent = TTI.getInlinerVectorBonusPercent(); 1518 int LastCallToStaticBonus = InlineConstants::LastCallToStaticBonus; 1519 1520 // Lambda to set all the above bonus and bonus percentages to 0. 1521 auto DisallowAllBonuses = [&]() { 1522 SingleBBBonusPercent = 0; 1523 VectorBonusPercent = 0; 1524 LastCallToStaticBonus = 0; 1525 }; 1526 1527 // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available 1528 // and reduce the threshold if the caller has the necessary attribute. 1529 if (Caller->hasMinSize()) { 1530 Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold); 1531 // For minsize, we want to disable the single BB bonus and the vector 1532 // bonuses, but not the last-call-to-static bonus. Inlining the last call to 1533 // a static function will, at the minimum, eliminate the parameter setup and 1534 // call/return instructions. 1535 SingleBBBonusPercent = 0; 1536 VectorBonusPercent = 0; 1537 } else if (Caller->hasOptSize()) 1538 Threshold = MinIfValid(Threshold, Params.OptSizeThreshold); 1539 1540 // Adjust the threshold based on inlinehint attribute and profile based 1541 // hotness information if the caller does not have MinSize attribute. 1542 if (!Caller->hasMinSize()) { 1543 if (Callee.hasFnAttribute(Attribute::InlineHint)) 1544 Threshold = MaxIfValid(Threshold, Params.HintThreshold); 1545 1546 // FIXME: After switching to the new passmanager, simplify the logic below 1547 // by checking only the callsite hotness/coldness as we will reliably 1548 // have local profile information. 1549 // 1550 // Callsite hotness and coldness can be determined if sample profile is 1551 // used (which adds hotness metadata to calls) or if caller's 1552 // BlockFrequencyInfo is available. 1553 BlockFrequencyInfo *CallerBFI = GetBFI ? &(GetBFI(*Caller)) : nullptr; 1554 auto HotCallSiteThreshold = getHotCallSiteThreshold(Call, CallerBFI); 1555 if (!Caller->hasOptSize() && HotCallSiteThreshold) { 1556 LLVM_DEBUG(dbgs() << "Hot callsite.\n"); 1557 // FIXME: This should update the threshold only if it exceeds the 1558 // current threshold, but AutoFDO + ThinLTO currently relies on this 1559 // behavior to prevent inlining of hot callsites during ThinLTO 1560 // compile phase. 1561 Threshold = HotCallSiteThreshold.getValue(); 1562 } else if (isColdCallSite(Call, CallerBFI)) { 1563 LLVM_DEBUG(dbgs() << "Cold callsite.\n"); 1564 // Do not apply bonuses for a cold callsite including the 1565 // LastCallToStatic bonus. While this bonus might result in code size 1566 // reduction, it can cause the size of a non-cold caller to increase 1567 // preventing it from being inlined. 1568 DisallowAllBonuses(); 1569 Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold); 1570 } else if (PSI) { 1571 // Use callee's global profile information only if we have no way of 1572 // determining this via callsite information. 1573 if (PSI->isFunctionEntryHot(&Callee)) { 1574 LLVM_DEBUG(dbgs() << "Hot callee.\n"); 1575 // If callsite hotness can not be determined, we may still know 1576 // that the callee is hot and treat it as a weaker hint for threshold 1577 // increase. 1578 Threshold = MaxIfValid(Threshold, Params.HintThreshold); 1579 } else if (PSI->isFunctionEntryCold(&Callee)) { 1580 LLVM_DEBUG(dbgs() << "Cold callee.\n"); 1581 // Do not apply bonuses for a cold callee including the 1582 // LastCallToStatic bonus. While this bonus might result in code size 1583 // reduction, it can cause the size of a non-cold caller to increase 1584 // preventing it from being inlined. 1585 DisallowAllBonuses(); 1586 Threshold = MinIfValid(Threshold, Params.ColdThreshold); 1587 } 1588 } 1589 } 1590 1591 Threshold += TTI.adjustInliningThreshold(&Call); 1592 1593 // Finally, take the target-specific inlining threshold multiplier into 1594 // account. 1595 Threshold *= TTI.getInliningThresholdMultiplier(); 1596 1597 SingleBBBonus = Threshold * SingleBBBonusPercent / 100; 1598 VectorBonus = Threshold * VectorBonusPercent / 100; 1599 1600 bool OnlyOneCallAndLocalLinkage = 1601 F.hasLocalLinkage() && F.hasOneUse() && &F == Call.getCalledFunction(); 1602 // If there is only one call of the function, and it has internal linkage, 1603 // the cost of inlining it drops dramatically. It may seem odd to update 1604 // Cost in updateThreshold, but the bonus depends on the logic in this method. 1605 if (OnlyOneCallAndLocalLinkage) 1606 Cost -= LastCallToStaticBonus; 1607 } 1608 1609 bool CallAnalyzer::visitCmpInst(CmpInst &I) { 1610 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1611 // First try to handle simplified comparisons. 1612 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1613 return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]); 1614 })) 1615 return true; 1616 1617 if (I.getOpcode() == Instruction::FCmp) 1618 return false; 1619 1620 // Otherwise look for a comparison between constant offset pointers with 1621 // a common base. 1622 Value *LHSBase, *RHSBase; 1623 APInt LHSOffset, RHSOffset; 1624 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 1625 if (LHSBase) { 1626 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 1627 if (RHSBase && LHSBase == RHSBase) { 1628 // We have common bases, fold the icmp to a constant based on the 1629 // offsets. 1630 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 1631 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 1632 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 1633 SimplifiedValues[&I] = C; 1634 ++NumConstantPtrCmps; 1635 return true; 1636 } 1637 } 1638 } 1639 1640 // If the comparison is an equality comparison with null, we can simplify it 1641 // if we know the value (argument) can't be null 1642 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) && 1643 isKnownNonNullInCallee(I.getOperand(0))) { 1644 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 1645 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 1646 : ConstantInt::getFalse(I.getType()); 1647 return true; 1648 } 1649 return handleSROA(I.getOperand(0), isa<ConstantPointerNull>(I.getOperand(1))); 1650 } 1651 1652 bool CallAnalyzer::visitSub(BinaryOperator &I) { 1653 // Try to handle a special case: we can fold computing the difference of two 1654 // constant-related pointers. 1655 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1656 Value *LHSBase, *RHSBase; 1657 APInt LHSOffset, RHSOffset; 1658 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 1659 if (LHSBase) { 1660 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 1661 if (RHSBase && LHSBase == RHSBase) { 1662 // We have common bases, fold the subtract to a constant based on the 1663 // offsets. 1664 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 1665 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 1666 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 1667 SimplifiedValues[&I] = C; 1668 ++NumConstantPtrDiffs; 1669 return true; 1670 } 1671 } 1672 } 1673 1674 // Otherwise, fall back to the generic logic for simplifying and handling 1675 // instructions. 1676 return Base::visitSub(I); 1677 } 1678 1679 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 1680 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 1681 Constant *CLHS = dyn_cast<Constant>(LHS); 1682 if (!CLHS) 1683 CLHS = SimplifiedValues.lookup(LHS); 1684 Constant *CRHS = dyn_cast<Constant>(RHS); 1685 if (!CRHS) 1686 CRHS = SimplifiedValues.lookup(RHS); 1687 1688 Value *SimpleV = nullptr; 1689 if (auto FI = dyn_cast<FPMathOperator>(&I)) 1690 SimpleV = SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, 1691 FI->getFastMathFlags(), DL); 1692 else 1693 SimpleV = 1694 SimplifyBinOp(I.getOpcode(), CLHS ? CLHS : LHS, CRHS ? CRHS : RHS, DL); 1695 1696 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) 1697 SimplifiedValues[&I] = C; 1698 1699 if (SimpleV) 1700 return true; 1701 1702 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 1703 disableSROA(LHS); 1704 disableSROA(RHS); 1705 1706 // If the instruction is floating point, and the target says this operation 1707 // is expensive, this may eventually become a library call. Treat the cost 1708 // as such. Unless it's fneg which can be implemented with an xor. 1709 using namespace llvm::PatternMatch; 1710 if (I.getType()->isFloatingPointTy() && 1711 TTI.getFPOpCost(I.getType()) == TargetTransformInfo::TCC_Expensive && 1712 !match(&I, m_FNeg(m_Value()))) 1713 onCallPenalty(); 1714 1715 return false; 1716 } 1717 1718 bool CallAnalyzer::visitFNeg(UnaryOperator &I) { 1719 Value *Op = I.getOperand(0); 1720 Constant *COp = dyn_cast<Constant>(Op); 1721 if (!COp) 1722 COp = SimplifiedValues.lookup(Op); 1723 1724 Value *SimpleV = SimplifyFNegInst( 1725 COp ? COp : Op, cast<FPMathOperator>(I).getFastMathFlags(), DL); 1726 1727 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) 1728 SimplifiedValues[&I] = C; 1729 1730 if (SimpleV) 1731 return true; 1732 1733 // Disable any SROA on arguments to arbitrary, unsimplified fneg. 1734 disableSROA(Op); 1735 1736 return false; 1737 } 1738 1739 bool CallAnalyzer::visitLoad(LoadInst &I) { 1740 if (handleSROA(I.getPointerOperand(), I.isSimple())) 1741 return true; 1742 1743 // If the data is already loaded from this address and hasn't been clobbered 1744 // by any stores or calls, this load is likely to be redundant and can be 1745 // eliminated. 1746 if (EnableLoadElimination && 1747 !LoadAddrSet.insert(I.getPointerOperand()).second && I.isUnordered()) { 1748 onLoadEliminationOpportunity(); 1749 return true; 1750 } 1751 1752 return false; 1753 } 1754 1755 bool CallAnalyzer::visitStore(StoreInst &I) { 1756 if (handleSROA(I.getPointerOperand(), I.isSimple())) 1757 return true; 1758 1759 // The store can potentially clobber loads and prevent repeated loads from 1760 // being eliminated. 1761 // FIXME: 1762 // 1. We can probably keep an initial set of eliminatable loads substracted 1763 // from the cost even when we finally see a store. We just need to disable 1764 // *further* accumulation of elimination savings. 1765 // 2. We should probably at some point thread MemorySSA for the callee into 1766 // this and then use that to actually compute *really* precise savings. 1767 disableLoadElimination(); 1768 return false; 1769 } 1770 1771 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 1772 // Constant folding for extract value is trivial. 1773 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1774 return ConstantExpr::getExtractValue(COps[0], I.getIndices()); 1775 })) 1776 return true; 1777 1778 // SROA can look through these but give them a cost. 1779 return false; 1780 } 1781 1782 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 1783 // Constant folding for insert value is trivial. 1784 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 1785 return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0], 1786 /*InsertedValueOperand*/ COps[1], 1787 I.getIndices()); 1788 })) 1789 return true; 1790 1791 // SROA can look through these but give them a cost. 1792 return false; 1793 } 1794 1795 /// Try to simplify a call site. 1796 /// 1797 /// Takes a concrete function and callsite and tries to actually simplify it by 1798 /// analyzing the arguments and call itself with instsimplify. Returns true if 1799 /// it has simplified the callsite to some other entity (a constant), making it 1800 /// free. 1801 bool CallAnalyzer::simplifyCallSite(Function *F, CallBase &Call) { 1802 // FIXME: Using the instsimplify logic directly for this is inefficient 1803 // because we have to continually rebuild the argument list even when no 1804 // simplifications can be performed. Until that is fixed with remapping 1805 // inside of instsimplify, directly constant fold calls here. 1806 if (!canConstantFoldCallTo(&Call, F)) 1807 return false; 1808 1809 // Try to re-map the arguments to constants. 1810 SmallVector<Constant *, 4> ConstantArgs; 1811 ConstantArgs.reserve(Call.arg_size()); 1812 for (Value *I : Call.args()) { 1813 Constant *C = dyn_cast<Constant>(I); 1814 if (!C) 1815 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(I)); 1816 if (!C) 1817 return false; // This argument doesn't map to a constant. 1818 1819 ConstantArgs.push_back(C); 1820 } 1821 if (Constant *C = ConstantFoldCall(&Call, F, ConstantArgs)) { 1822 SimplifiedValues[&Call] = C; 1823 return true; 1824 } 1825 1826 return false; 1827 } 1828 1829 bool CallAnalyzer::visitCallBase(CallBase &Call) { 1830 if (Call.hasFnAttr(Attribute::ReturnsTwice) && 1831 !F.hasFnAttribute(Attribute::ReturnsTwice)) { 1832 // This aborts the entire analysis. 1833 ExposesReturnsTwice = true; 1834 return false; 1835 } 1836 if (isa<CallInst>(Call) && cast<CallInst>(Call).cannotDuplicate()) 1837 ContainsNoDuplicateCall = true; 1838 1839 Value *Callee = Call.getCalledOperand(); 1840 Function *F = dyn_cast_or_null<Function>(Callee); 1841 bool IsIndirectCall = !F; 1842 if (IsIndirectCall) { 1843 // Check if this happens to be an indirect function call to a known function 1844 // in this inline context. If not, we've done all we can. 1845 F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 1846 if (!F) { 1847 onCallArgumentSetup(Call); 1848 1849 if (!Call.onlyReadsMemory()) 1850 disableLoadElimination(); 1851 return Base::visitCallBase(Call); 1852 } 1853 } 1854 1855 assert(F && "Expected a call to a known function"); 1856 1857 // When we have a concrete function, first try to simplify it directly. 1858 if (simplifyCallSite(F, Call)) 1859 return true; 1860 1861 // Next check if it is an intrinsic we know about. 1862 // FIXME: Lift this into part of the InstVisitor. 1863 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Call)) { 1864 switch (II->getIntrinsicID()) { 1865 default: 1866 if (!Call.onlyReadsMemory() && !isAssumeLikeIntrinsic(II)) 1867 disableLoadElimination(); 1868 return Base::visitCallBase(Call); 1869 1870 case Intrinsic::load_relative: 1871 onLoadRelativeIntrinsic(); 1872 return false; 1873 1874 case Intrinsic::memset: 1875 case Intrinsic::memcpy: 1876 case Intrinsic::memmove: 1877 disableLoadElimination(); 1878 // SROA can usually chew through these intrinsics, but they aren't free. 1879 return false; 1880 case Intrinsic::icall_branch_funnel: 1881 case Intrinsic::localescape: 1882 HasUninlineableIntrinsic = true; 1883 return false; 1884 case Intrinsic::vastart: 1885 InitsVargArgs = true; 1886 return false; 1887 } 1888 } 1889 1890 if (F == Call.getFunction()) { 1891 // This flag will fully abort the analysis, so don't bother with anything 1892 // else. 1893 IsRecursiveCall = true; 1894 return false; 1895 } 1896 1897 if (TTI.isLoweredToCall(F)) { 1898 onLoweredCall(F, Call, IsIndirectCall); 1899 } 1900 1901 if (!(Call.onlyReadsMemory() || (IsIndirectCall && F->onlyReadsMemory()))) 1902 disableLoadElimination(); 1903 return Base::visitCallBase(Call); 1904 } 1905 1906 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) { 1907 // At least one return instruction will be free after inlining. 1908 bool Free = !HasReturn; 1909 HasReturn = true; 1910 return Free; 1911 } 1912 1913 bool CallAnalyzer::visitBranchInst(BranchInst &BI) { 1914 // We model unconditional branches as essentially free -- they really 1915 // shouldn't exist at all, but handling them makes the behavior of the 1916 // inliner more regular and predictable. Interestingly, conditional branches 1917 // which will fold away are also free. 1918 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) || 1919 dyn_cast_or_null<ConstantInt>( 1920 SimplifiedValues.lookup(BI.getCondition())); 1921 } 1922 1923 bool CallAnalyzer::visitSelectInst(SelectInst &SI) { 1924 bool CheckSROA = SI.getType()->isPointerTy(); 1925 Value *TrueVal = SI.getTrueValue(); 1926 Value *FalseVal = SI.getFalseValue(); 1927 1928 Constant *TrueC = dyn_cast<Constant>(TrueVal); 1929 if (!TrueC) 1930 TrueC = SimplifiedValues.lookup(TrueVal); 1931 Constant *FalseC = dyn_cast<Constant>(FalseVal); 1932 if (!FalseC) 1933 FalseC = SimplifiedValues.lookup(FalseVal); 1934 Constant *CondC = 1935 dyn_cast_or_null<Constant>(SimplifiedValues.lookup(SI.getCondition())); 1936 1937 if (!CondC) { 1938 // Select C, X, X => X 1939 if (TrueC == FalseC && TrueC) { 1940 SimplifiedValues[&SI] = TrueC; 1941 return true; 1942 } 1943 1944 if (!CheckSROA) 1945 return Base::visitSelectInst(SI); 1946 1947 std::pair<Value *, APInt> TrueBaseAndOffset = 1948 ConstantOffsetPtrs.lookup(TrueVal); 1949 std::pair<Value *, APInt> FalseBaseAndOffset = 1950 ConstantOffsetPtrs.lookup(FalseVal); 1951 if (TrueBaseAndOffset == FalseBaseAndOffset && TrueBaseAndOffset.first) { 1952 ConstantOffsetPtrs[&SI] = TrueBaseAndOffset; 1953 1954 if (auto *SROAArg = getSROAArgForValueOrNull(TrueVal)) 1955 SROAArgValues[&SI] = SROAArg; 1956 return true; 1957 } 1958 1959 return Base::visitSelectInst(SI); 1960 } 1961 1962 // Select condition is a constant. 1963 Value *SelectedV = CondC->isAllOnesValue() 1964 ? TrueVal 1965 : (CondC->isNullValue()) ? FalseVal : nullptr; 1966 if (!SelectedV) { 1967 // Condition is a vector constant that is not all 1s or all 0s. If all 1968 // operands are constants, ConstantExpr::getSelect() can handle the cases 1969 // such as select vectors. 1970 if (TrueC && FalseC) { 1971 if (auto *C = ConstantExpr::getSelect(CondC, TrueC, FalseC)) { 1972 SimplifiedValues[&SI] = C; 1973 return true; 1974 } 1975 } 1976 return Base::visitSelectInst(SI); 1977 } 1978 1979 // Condition is either all 1s or all 0s. SI can be simplified. 1980 if (Constant *SelectedC = dyn_cast<Constant>(SelectedV)) { 1981 SimplifiedValues[&SI] = SelectedC; 1982 return true; 1983 } 1984 1985 if (!CheckSROA) 1986 return true; 1987 1988 std::pair<Value *, APInt> BaseAndOffset = 1989 ConstantOffsetPtrs.lookup(SelectedV); 1990 if (BaseAndOffset.first) { 1991 ConstantOffsetPtrs[&SI] = BaseAndOffset; 1992 1993 if (auto *SROAArg = getSROAArgForValueOrNull(SelectedV)) 1994 SROAArgValues[&SI] = SROAArg; 1995 } 1996 1997 return true; 1998 } 1999 2000 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) { 2001 // We model unconditional switches as free, see the comments on handling 2002 // branches. 2003 if (isa<ConstantInt>(SI.getCondition())) 2004 return true; 2005 if (Value *V = SimplifiedValues.lookup(SI.getCondition())) 2006 if (isa<ConstantInt>(V)) 2007 return true; 2008 2009 // Assume the most general case where the switch is lowered into 2010 // either a jump table, bit test, or a balanced binary tree consisting of 2011 // case clusters without merging adjacent clusters with the same 2012 // destination. We do not consider the switches that are lowered with a mix 2013 // of jump table/bit test/binary search tree. The cost of the switch is 2014 // proportional to the size of the tree or the size of jump table range. 2015 // 2016 // NB: We convert large switches which are just used to initialize large phi 2017 // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent 2018 // inlining those. It will prevent inlining in cases where the optimization 2019 // does not (yet) fire. 2020 2021 unsigned JumpTableSize = 0; 2022 BlockFrequencyInfo *BFI = GetBFI ? &(GetBFI(F)) : nullptr; 2023 unsigned NumCaseCluster = 2024 TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize, PSI, BFI); 2025 2026 onFinalizeSwitch(JumpTableSize, NumCaseCluster); 2027 return false; 2028 } 2029 2030 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) { 2031 // We never want to inline functions that contain an indirectbr. This is 2032 // incorrect because all the blockaddress's (in static global initializers 2033 // for example) would be referring to the original function, and this 2034 // indirect jump would jump from the inlined copy of the function into the 2035 // original function which is extremely undefined behavior. 2036 // FIXME: This logic isn't really right; we can safely inline functions with 2037 // indirectbr's as long as no other function or global references the 2038 // blockaddress of a block within the current function. 2039 HasIndirectBr = true; 2040 return false; 2041 } 2042 2043 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) { 2044 // FIXME: It's not clear that a single instruction is an accurate model for 2045 // the inline cost of a resume instruction. 2046 return false; 2047 } 2048 2049 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) { 2050 // FIXME: It's not clear that a single instruction is an accurate model for 2051 // the inline cost of a cleanupret instruction. 2052 return false; 2053 } 2054 2055 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) { 2056 // FIXME: It's not clear that a single instruction is an accurate model for 2057 // the inline cost of a catchret instruction. 2058 return false; 2059 } 2060 2061 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) { 2062 // FIXME: It might be reasonably to discount the cost of instructions leading 2063 // to unreachable as they have the lowest possible impact on both runtime and 2064 // code size. 2065 return true; // No actual code is needed for unreachable. 2066 } 2067 2068 bool CallAnalyzer::visitInstruction(Instruction &I) { 2069 // Some instructions are free. All of the free intrinsics can also be 2070 // handled by SROA, etc. 2071 if (TTI.getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 2072 TargetTransformInfo::TCC_Free) 2073 return true; 2074 2075 // We found something we don't understand or can't handle. Mark any SROA-able 2076 // values in the operand list as no longer viable. 2077 for (const Use &Op : I.operands()) 2078 disableSROA(Op); 2079 2080 return false; 2081 } 2082 2083 /// Analyze a basic block for its contribution to the inline cost. 2084 /// 2085 /// This method walks the analyzer over every instruction in the given basic 2086 /// block and accounts for their cost during inlining at this callsite. It 2087 /// aborts early if the threshold has been exceeded or an impossible to inline 2088 /// construct has been detected. It returns false if inlining is no longer 2089 /// viable, and true if inlining remains viable. 2090 InlineResult 2091 CallAnalyzer::analyzeBlock(BasicBlock *BB, 2092 SmallPtrSetImpl<const Value *> &EphValues) { 2093 for (Instruction &I : *BB) { 2094 // FIXME: Currently, the number of instructions in a function regardless of 2095 // our ability to simplify them during inline to constants or dead code, 2096 // are actually used by the vector bonus heuristic. As long as that's true, 2097 // we have to special case debug intrinsics here to prevent differences in 2098 // inlining due to debug symbols. Eventually, the number of unsimplified 2099 // instructions shouldn't factor into the cost computation, but until then, 2100 // hack around it here. 2101 if (isa<DbgInfoIntrinsic>(I)) 2102 continue; 2103 2104 // Skip pseudo-probes. 2105 if (isa<PseudoProbeInst>(I)) 2106 continue; 2107 2108 // Skip ephemeral values. 2109 if (EphValues.count(&I)) 2110 continue; 2111 2112 ++NumInstructions; 2113 if (isa<ExtractElementInst>(I) || I.getType()->isVectorTy()) 2114 ++NumVectorInstructions; 2115 2116 // If the instruction simplified to a constant, there is no cost to this 2117 // instruction. Visit the instructions using our InstVisitor to account for 2118 // all of the per-instruction logic. The visit tree returns true if we 2119 // consumed the instruction in any way, and false if the instruction's base 2120 // cost should count against inlining. 2121 onInstructionAnalysisStart(&I); 2122 2123 if (Base::visit(&I)) 2124 ++NumInstructionsSimplified; 2125 else 2126 onMissedSimplification(); 2127 2128 onInstructionAnalysisFinish(&I); 2129 using namespace ore; 2130 // If the visit this instruction detected an uninlinable pattern, abort. 2131 InlineResult IR = InlineResult::success(); 2132 if (IsRecursiveCall) 2133 IR = InlineResult::failure("recursive"); 2134 else if (ExposesReturnsTwice) 2135 IR = InlineResult::failure("exposes returns twice"); 2136 else if (HasDynamicAlloca) 2137 IR = InlineResult::failure("dynamic alloca"); 2138 else if (HasIndirectBr) 2139 IR = InlineResult::failure("indirect branch"); 2140 else if (HasUninlineableIntrinsic) 2141 IR = InlineResult::failure("uninlinable intrinsic"); 2142 else if (InitsVargArgs) 2143 IR = InlineResult::failure("varargs"); 2144 if (!IR.isSuccess()) { 2145 if (ORE) 2146 ORE->emit([&]() { 2147 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", 2148 &CandidateCall) 2149 << NV("Callee", &F) << " has uninlinable pattern (" 2150 << NV("InlineResult", IR.getFailureReason()) 2151 << ") and cost is not fully computed"; 2152 }); 2153 return IR; 2154 } 2155 2156 // If the caller is a recursive function then we don't want to inline 2157 // functions which allocate a lot of stack space because it would increase 2158 // the caller stack usage dramatically. 2159 if (IsCallerRecursive && 2160 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) { 2161 auto IR = 2162 InlineResult::failure("recursive and allocates too much stack space"); 2163 if (ORE) 2164 ORE->emit([&]() { 2165 return OptimizationRemarkMissed(DEBUG_TYPE, "NeverInline", 2166 &CandidateCall) 2167 << NV("Callee", &F) << " is " 2168 << NV("InlineResult", IR.getFailureReason()) 2169 << ". Cost is not fully computed"; 2170 }); 2171 return IR; 2172 } 2173 2174 if (shouldStop()) 2175 return InlineResult::failure( 2176 "Call site analysis is not favorable to inlining."); 2177 } 2178 2179 return InlineResult::success(); 2180 } 2181 2182 /// Compute the base pointer and cumulative constant offsets for V. 2183 /// 2184 /// This strips all constant offsets off of V, leaving it the base pointer, and 2185 /// accumulates the total constant offset applied in the returned constant. It 2186 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 2187 /// no constant offsets applied. 2188 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 2189 if (!V->getType()->isPointerTy()) 2190 return nullptr; 2191 2192 unsigned AS = V->getType()->getPointerAddressSpace(); 2193 unsigned IntPtrWidth = DL.getIndexSizeInBits(AS); 2194 APInt Offset = APInt::getNullValue(IntPtrWidth); 2195 2196 // Even though we don't look through PHI nodes, we could be called on an 2197 // instruction in an unreachable block, which may be on a cycle. 2198 SmallPtrSet<Value *, 4> Visited; 2199 Visited.insert(V); 2200 do { 2201 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 2202 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 2203 return nullptr; 2204 V = GEP->getPointerOperand(); 2205 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 2206 V = cast<Operator>(V)->getOperand(0); 2207 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 2208 if (GA->isInterposable()) 2209 break; 2210 V = GA->getAliasee(); 2211 } else { 2212 break; 2213 } 2214 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 2215 } while (Visited.insert(V).second); 2216 2217 Type *IdxPtrTy = DL.getIndexType(V->getType()); 2218 return cast<ConstantInt>(ConstantInt::get(IdxPtrTy, Offset)); 2219 } 2220 2221 /// Find dead blocks due to deleted CFG edges during inlining. 2222 /// 2223 /// If we know the successor of the current block, \p CurrBB, has to be \p 2224 /// NextBB, the other successors of \p CurrBB are dead if these successors have 2225 /// no live incoming CFG edges. If one block is found to be dead, we can 2226 /// continue growing the dead block list by checking the successors of the dead 2227 /// blocks to see if all their incoming edges are dead or not. 2228 void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) { 2229 auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) { 2230 // A CFG edge is dead if the predecessor is dead or the predecessor has a 2231 // known successor which is not the one under exam. 2232 return (DeadBlocks.count(Pred) || 2233 (KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ)); 2234 }; 2235 2236 auto IsNewlyDead = [&](BasicBlock *BB) { 2237 // If all the edges to a block are dead, the block is also dead. 2238 return (!DeadBlocks.count(BB) && 2239 llvm::all_of(predecessors(BB), 2240 [&](BasicBlock *P) { return IsEdgeDead(P, BB); })); 2241 }; 2242 2243 for (BasicBlock *Succ : successors(CurrBB)) { 2244 if (Succ == NextBB || !IsNewlyDead(Succ)) 2245 continue; 2246 SmallVector<BasicBlock *, 4> NewDead; 2247 NewDead.push_back(Succ); 2248 while (!NewDead.empty()) { 2249 BasicBlock *Dead = NewDead.pop_back_val(); 2250 if (DeadBlocks.insert(Dead)) 2251 // Continue growing the dead block lists. 2252 for (BasicBlock *S : successors(Dead)) 2253 if (IsNewlyDead(S)) 2254 NewDead.push_back(S); 2255 } 2256 } 2257 } 2258 2259 /// Analyze a call site for potential inlining. 2260 /// 2261 /// Returns true if inlining this call is viable, and false if it is not 2262 /// viable. It computes the cost and adjusts the threshold based on numerous 2263 /// factors and heuristics. If this method returns false but the computed cost 2264 /// is below the computed threshold, then inlining was forcibly disabled by 2265 /// some artifact of the routine. 2266 InlineResult CallAnalyzer::analyze() { 2267 ++NumCallsAnalyzed; 2268 2269 auto Result = onAnalysisStart(); 2270 if (!Result.isSuccess()) 2271 return Result; 2272 2273 if (F.empty()) 2274 return InlineResult::success(); 2275 2276 Function *Caller = CandidateCall.getFunction(); 2277 // Check if the caller function is recursive itself. 2278 for (User *U : Caller->users()) { 2279 CallBase *Call = dyn_cast<CallBase>(U); 2280 if (Call && Call->getFunction() == Caller) { 2281 IsCallerRecursive = true; 2282 break; 2283 } 2284 } 2285 2286 // Populate our simplified values by mapping from function arguments to call 2287 // arguments with known important simplifications. 2288 auto CAI = CandidateCall.arg_begin(); 2289 for (Argument &FAI : F.args()) { 2290 assert(CAI != CandidateCall.arg_end()); 2291 if (Constant *C = dyn_cast<Constant>(CAI)) 2292 SimplifiedValues[&FAI] = C; 2293 2294 Value *PtrArg = *CAI; 2295 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 2296 ConstantOffsetPtrs[&FAI] = std::make_pair(PtrArg, C->getValue()); 2297 2298 // We can SROA any pointer arguments derived from alloca instructions. 2299 if (auto *SROAArg = dyn_cast<AllocaInst>(PtrArg)) { 2300 SROAArgValues[&FAI] = SROAArg; 2301 onInitializeSROAArg(SROAArg); 2302 EnabledSROAAllocas.insert(SROAArg); 2303 } 2304 } 2305 ++CAI; 2306 } 2307 NumConstantArgs = SimplifiedValues.size(); 2308 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 2309 NumAllocaArgs = SROAArgValues.size(); 2310 2311 // FIXME: If a caller has multiple calls to a callee, we end up recomputing 2312 // the ephemeral values multiple times (and they're completely determined by 2313 // the callee, so this is purely duplicate work). 2314 SmallPtrSet<const Value *, 32> EphValues; 2315 CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues); 2316 2317 // The worklist of live basic blocks in the callee *after* inlining. We avoid 2318 // adding basic blocks of the callee which can be proven to be dead for this 2319 // particular call site in order to get more accurate cost estimates. This 2320 // requires a somewhat heavyweight iteration pattern: we need to walk the 2321 // basic blocks in a breadth-first order as we insert live successors. To 2322 // accomplish this, prioritizing for small iterations because we exit after 2323 // crossing our threshold, we use a small-size optimized SetVector. 2324 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 2325 SmallPtrSet<BasicBlock *, 16>> 2326 BBSetVector; 2327 BBSetVector BBWorklist; 2328 BBWorklist.insert(&F.getEntryBlock()); 2329 2330 // Note that we *must not* cache the size, this loop grows the worklist. 2331 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 2332 if (shouldStop()) 2333 break; 2334 2335 BasicBlock *BB = BBWorklist[Idx]; 2336 if (BB->empty()) 2337 continue; 2338 2339 onBlockStart(BB); 2340 2341 // Disallow inlining a blockaddress with uses other than strictly callbr. 2342 // A blockaddress only has defined behavior for an indirect branch in the 2343 // same function, and we do not currently support inlining indirect 2344 // branches. But, the inliner may not see an indirect branch that ends up 2345 // being dead code at a particular call site. If the blockaddress escapes 2346 // the function, e.g., via a global variable, inlining may lead to an 2347 // invalid cross-function reference. 2348 // FIXME: pr/39560: continue relaxing this overt restriction. 2349 if (BB->hasAddressTaken()) 2350 for (User *U : BlockAddress::get(&*BB)->users()) 2351 if (!isa<CallBrInst>(*U)) 2352 return InlineResult::failure("blockaddress used outside of callbr"); 2353 2354 // Analyze the cost of this block. If we blow through the threshold, this 2355 // returns false, and we can bail on out. 2356 InlineResult IR = analyzeBlock(BB, EphValues); 2357 if (!IR.isSuccess()) 2358 return IR; 2359 2360 Instruction *TI = BB->getTerminator(); 2361 2362 // Add in the live successors by first checking whether we have terminator 2363 // that may be simplified based on the values simplified by this call. 2364 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 2365 if (BI->isConditional()) { 2366 Value *Cond = BI->getCondition(); 2367 if (ConstantInt *SimpleCond = 2368 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 2369 BasicBlock *NextBB = BI->getSuccessor(SimpleCond->isZero() ? 1 : 0); 2370 BBWorklist.insert(NextBB); 2371 KnownSuccessors[BB] = NextBB; 2372 findDeadBlocks(BB, NextBB); 2373 continue; 2374 } 2375 } 2376 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 2377 Value *Cond = SI->getCondition(); 2378 if (ConstantInt *SimpleCond = 2379 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 2380 BasicBlock *NextBB = SI->findCaseValue(SimpleCond)->getCaseSuccessor(); 2381 BBWorklist.insert(NextBB); 2382 KnownSuccessors[BB] = NextBB; 2383 findDeadBlocks(BB, NextBB); 2384 continue; 2385 } 2386 } 2387 2388 // If we're unable to select a particular successor, just count all of 2389 // them. 2390 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 2391 ++TIdx) 2392 BBWorklist.insert(TI->getSuccessor(TIdx)); 2393 2394 onBlockAnalyzed(BB); 2395 } 2396 2397 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && 2398 &F == CandidateCall.getCalledFunction(); 2399 // If this is a noduplicate call, we can still inline as long as 2400 // inlining this would cause the removal of the caller (so the instruction 2401 // is not actually duplicated, just moved). 2402 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 2403 return InlineResult::failure("noduplicate"); 2404 2405 return finalizeAnalysis(); 2406 } 2407 2408 void InlineCostCallAnalyzer::print() { 2409 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" 2410 if (PrintInstructionComments) 2411 F.print(dbgs(), &Writer); 2412 DEBUG_PRINT_STAT(NumConstantArgs); 2413 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 2414 DEBUG_PRINT_STAT(NumAllocaArgs); 2415 DEBUG_PRINT_STAT(NumConstantPtrCmps); 2416 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 2417 DEBUG_PRINT_STAT(NumInstructionsSimplified); 2418 DEBUG_PRINT_STAT(NumInstructions); 2419 DEBUG_PRINT_STAT(SROACostSavings); 2420 DEBUG_PRINT_STAT(SROACostSavingsLost); 2421 DEBUG_PRINT_STAT(LoadEliminationCost); 2422 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 2423 DEBUG_PRINT_STAT(Cost); 2424 DEBUG_PRINT_STAT(Threshold); 2425 #undef DEBUG_PRINT_STAT 2426 } 2427 2428 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2429 /// Dump stats about this call's analysis. 2430 LLVM_DUMP_METHOD void InlineCostCallAnalyzer::dump() { 2431 print(); 2432 } 2433 #endif 2434 2435 /// Test that there are no attribute conflicts between Caller and Callee 2436 /// that prevent inlining. 2437 static bool functionsHaveCompatibleAttributes( 2438 Function *Caller, Function *Callee, TargetTransformInfo &TTI, 2439 function_ref<const TargetLibraryInfo &(Function &)> &GetTLI) { 2440 // Note that CalleeTLI must be a copy not a reference. The legacy pass manager 2441 // caches the most recently created TLI in the TargetLibraryInfoWrapperPass 2442 // object, and always returns the same object (which is overwritten on each 2443 // GetTLI call). Therefore we copy the first result. 2444 auto CalleeTLI = GetTLI(*Callee); 2445 return TTI.areInlineCompatible(Caller, Callee) && 2446 GetTLI(*Caller).areInlineCompatible(CalleeTLI, 2447 InlineCallerSupersetNoBuiltin) && 2448 AttributeFuncs::areInlineCompatible(*Caller, *Callee); 2449 } 2450 2451 int llvm::getCallsiteCost(CallBase &Call, const DataLayout &DL) { 2452 int Cost = 0; 2453 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) { 2454 if (Call.isByValArgument(I)) { 2455 // We approximate the number of loads and stores needed by dividing the 2456 // size of the byval type by the target's pointer size. 2457 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType()); 2458 unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType()); 2459 unsigned AS = PTy->getAddressSpace(); 2460 unsigned PointerSize = DL.getPointerSizeInBits(AS); 2461 // Ceiling division. 2462 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 2463 2464 // If it generates more than 8 stores it is likely to be expanded as an 2465 // inline memcpy so we take that as an upper bound. Otherwise we assume 2466 // one load and one store per word copied. 2467 // FIXME: The maxStoresPerMemcpy setting from the target should be used 2468 // here instead of a magic number of 8, but it's not available via 2469 // DataLayout. 2470 NumStores = std::min(NumStores, 8U); 2471 2472 Cost += 2 * NumStores * InlineConstants::InstrCost; 2473 } else { 2474 // For non-byval arguments subtract off one instruction per call 2475 // argument. 2476 Cost += InlineConstants::InstrCost; 2477 } 2478 } 2479 // The call instruction also disappears after inlining. 2480 Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty; 2481 return Cost; 2482 } 2483 2484 InlineCost llvm::getInlineCost( 2485 CallBase &Call, const InlineParams &Params, TargetTransformInfo &CalleeTTI, 2486 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2487 function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 2488 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2489 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2490 return getInlineCost(Call, Call.getCalledFunction(), Params, CalleeTTI, 2491 GetAssumptionCache, GetTLI, GetBFI, PSI, ORE); 2492 } 2493 2494 Optional<int> llvm::getInliningCostEstimate( 2495 CallBase &Call, TargetTransformInfo &CalleeTTI, 2496 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2497 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2498 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2499 const InlineParams Params = {/* DefaultThreshold*/ 0, 2500 /*HintThreshold*/ {}, 2501 /*ColdThreshold*/ {}, 2502 /*OptSizeThreshold*/ {}, 2503 /*OptMinSizeThreshold*/ {}, 2504 /*HotCallSiteThreshold*/ {}, 2505 /*LocallyHotCallSiteThreshold*/ {}, 2506 /*ColdCallSiteThreshold*/ {}, 2507 /*ComputeFullInlineCost*/ true, 2508 /*EnableDeferral*/ true}; 2509 2510 InlineCostCallAnalyzer CA(*Call.getCalledFunction(), Call, Params, CalleeTTI, 2511 GetAssumptionCache, GetBFI, PSI, ORE, true, 2512 /*IgnoreThreshold*/ true); 2513 auto R = CA.analyze(); 2514 if (!R.isSuccess()) 2515 return None; 2516 return CA.getCost(); 2517 } 2518 2519 Optional<InlineResult> llvm::getAttributeBasedInliningDecision( 2520 CallBase &Call, Function *Callee, TargetTransformInfo &CalleeTTI, 2521 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 2522 2523 // Cannot inline indirect calls. 2524 if (!Callee) 2525 return InlineResult::failure("indirect call"); 2526 2527 // When callee coroutine function is inlined into caller coroutine function 2528 // before coro-split pass, 2529 // coro-early pass can not handle this quiet well. 2530 // So we won't inline the coroutine function if it have not been unsplited 2531 if (Callee->isPresplitCoroutine()) 2532 return InlineResult::failure("unsplited coroutine call"); 2533 2534 // Never inline calls with byval arguments that does not have the alloca 2535 // address space. Since byval arguments can be replaced with a copy to an 2536 // alloca, the inlined code would need to be adjusted to handle that the 2537 // argument is in the alloca address space (so it is a little bit complicated 2538 // to solve). 2539 unsigned AllocaAS = Callee->getParent()->getDataLayout().getAllocaAddrSpace(); 2540 for (unsigned I = 0, E = Call.arg_size(); I != E; ++I) 2541 if (Call.isByValArgument(I)) { 2542 PointerType *PTy = cast<PointerType>(Call.getArgOperand(I)->getType()); 2543 if (PTy->getAddressSpace() != AllocaAS) 2544 return InlineResult::failure("byval arguments without alloca" 2545 " address space"); 2546 } 2547 2548 // Calls to functions with always-inline attributes should be inlined 2549 // whenever possible. 2550 if (Call.hasFnAttr(Attribute::AlwaysInline)) { 2551 auto IsViable = isInlineViable(*Callee); 2552 if (IsViable.isSuccess()) 2553 return InlineResult::success(); 2554 return InlineResult::failure(IsViable.getFailureReason()); 2555 } 2556 2557 // Never inline functions with conflicting attributes (unless callee has 2558 // always-inline attribute). 2559 Function *Caller = Call.getCaller(); 2560 if (!functionsHaveCompatibleAttributes(Caller, Callee, CalleeTTI, GetTLI)) 2561 return InlineResult::failure("conflicting attributes"); 2562 2563 // Don't inline this call if the caller has the optnone attribute. 2564 if (Caller->hasOptNone()) 2565 return InlineResult::failure("optnone attribute"); 2566 2567 // Don't inline a function that treats null pointer as valid into a caller 2568 // that does not have this attribute. 2569 if (!Caller->nullPointerIsDefined() && Callee->nullPointerIsDefined()) 2570 return InlineResult::failure("nullptr definitions incompatible"); 2571 2572 // Don't inline functions which can be interposed at link-time. 2573 if (Callee->isInterposable()) 2574 return InlineResult::failure("interposable"); 2575 2576 // Don't inline functions marked noinline. 2577 if (Callee->hasFnAttribute(Attribute::NoInline)) 2578 return InlineResult::failure("noinline function attribute"); 2579 2580 // Don't inline call sites marked noinline. 2581 if (Call.isNoInline()) 2582 return InlineResult::failure("noinline call site attribute"); 2583 2584 // Don't inline functions if one does not have any stack protector attribute 2585 // but the other does. 2586 if (Caller->hasStackProtectorFnAttr() && !Callee->hasStackProtectorFnAttr()) 2587 return InlineResult::failure( 2588 "stack protected caller but callee requested no stack protector"); 2589 if (Callee->hasStackProtectorFnAttr() && !Caller->hasStackProtectorFnAttr()) 2590 return InlineResult::failure( 2591 "stack protected callee but caller requested no stack protector"); 2592 2593 return None; 2594 } 2595 2596 InlineCost llvm::getInlineCost( 2597 CallBase &Call, Function *Callee, const InlineParams &Params, 2598 TargetTransformInfo &CalleeTTI, 2599 function_ref<AssumptionCache &(Function &)> GetAssumptionCache, 2600 function_ref<const TargetLibraryInfo &(Function &)> GetTLI, 2601 function_ref<BlockFrequencyInfo &(Function &)> GetBFI, 2602 ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE) { 2603 2604 auto UserDecision = 2605 llvm::getAttributeBasedInliningDecision(Call, Callee, CalleeTTI, GetTLI); 2606 2607 if (UserDecision.hasValue()) { 2608 if (UserDecision->isSuccess()) 2609 return llvm::InlineCost::getAlways("always inline attribute"); 2610 return llvm::InlineCost::getNever(UserDecision->getFailureReason()); 2611 } 2612 2613 LLVM_DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 2614 << "... (caller:" << Call.getCaller()->getName() 2615 << ")\n"); 2616 2617 InlineCostCallAnalyzer CA(*Callee, Call, Params, CalleeTTI, 2618 GetAssumptionCache, GetBFI, PSI, ORE); 2619 InlineResult ShouldInline = CA.analyze(); 2620 2621 LLVM_DEBUG(CA.dump()); 2622 2623 // Always make cost benefit based decision explicit. 2624 // We use always/never here since threshold is not meaningful, 2625 // as it's not what drives cost-benefit analysis. 2626 if (CA.wasDecidedByCostBenefit()) { 2627 if (ShouldInline.isSuccess()) 2628 return InlineCost::getAlways("benefit over cost"); 2629 else 2630 return InlineCost::getNever("cost over benefit"); 2631 } 2632 2633 // Check if there was a reason to force inlining or no inlining. 2634 if (!ShouldInline.isSuccess() && CA.getCost() < CA.getThreshold()) 2635 return InlineCost::getNever(ShouldInline.getFailureReason()); 2636 if (ShouldInline.isSuccess() && CA.getCost() >= CA.getThreshold()) 2637 return InlineCost::getAlways("empty function"); 2638 2639 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 2640 } 2641 2642 InlineResult llvm::isInlineViable(Function &F) { 2643 bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice); 2644 for (BasicBlock &BB : F) { 2645 // Disallow inlining of functions which contain indirect branches. 2646 if (isa<IndirectBrInst>(BB.getTerminator())) 2647 return InlineResult::failure("contains indirect branches"); 2648 2649 // Disallow inlining of blockaddresses which are used by non-callbr 2650 // instructions. 2651 if (BB.hasAddressTaken()) 2652 for (User *U : BlockAddress::get(&BB)->users()) 2653 if (!isa<CallBrInst>(*U)) 2654 return InlineResult::failure("blockaddress used outside of callbr"); 2655 2656 for (auto &II : BB) { 2657 CallBase *Call = dyn_cast<CallBase>(&II); 2658 if (!Call) 2659 continue; 2660 2661 // Disallow recursive calls. 2662 Function *Callee = Call->getCalledFunction(); 2663 if (&F == Callee) 2664 return InlineResult::failure("recursive call"); 2665 2666 // Disallow calls which expose returns-twice to a function not previously 2667 // attributed as such. 2668 if (!ReturnsTwice && isa<CallInst>(Call) && 2669 cast<CallInst>(Call)->canReturnTwice()) 2670 return InlineResult::failure("exposes returns-twice attribute"); 2671 2672 if (Callee) 2673 switch (Callee->getIntrinsicID()) { 2674 default: 2675 break; 2676 case llvm::Intrinsic::icall_branch_funnel: 2677 // Disallow inlining of @llvm.icall.branch.funnel because current 2678 // backend can't separate call targets from call arguments. 2679 return InlineResult::failure( 2680 "disallowed inlining of @llvm.icall.branch.funnel"); 2681 case llvm::Intrinsic::localescape: 2682 // Disallow inlining functions that call @llvm.localescape. Doing this 2683 // correctly would require major changes to the inliner. 2684 return InlineResult::failure( 2685 "disallowed inlining of @llvm.localescape"); 2686 case llvm::Intrinsic::vastart: 2687 // Disallow inlining of functions that initialize VarArgs with 2688 // va_start. 2689 return InlineResult::failure( 2690 "contains VarArgs initialized with va_start"); 2691 } 2692 } 2693 } 2694 2695 return InlineResult::success(); 2696 } 2697 2698 // APIs to create InlineParams based on command line flags and/or other 2699 // parameters. 2700 2701 InlineParams llvm::getInlineParams(int Threshold) { 2702 InlineParams Params; 2703 2704 // This field is the threshold to use for a callee by default. This is 2705 // derived from one or more of: 2706 // * optimization or size-optimization levels, 2707 // * a value passed to createFunctionInliningPass function, or 2708 // * the -inline-threshold flag. 2709 // If the -inline-threshold flag is explicitly specified, that is used 2710 // irrespective of anything else. 2711 if (InlineThreshold.getNumOccurrences() > 0) 2712 Params.DefaultThreshold = InlineThreshold; 2713 else 2714 Params.DefaultThreshold = Threshold; 2715 2716 // Set the HintThreshold knob from the -inlinehint-threshold. 2717 Params.HintThreshold = HintThreshold; 2718 2719 // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold. 2720 Params.HotCallSiteThreshold = HotCallSiteThreshold; 2721 2722 // If the -locally-hot-callsite-threshold is explicitly specified, use it to 2723 // populate LocallyHotCallSiteThreshold. Later, we populate 2724 // Params.LocallyHotCallSiteThreshold from -locally-hot-callsite-threshold if 2725 // we know that optimization level is O3 (in the getInlineParams variant that 2726 // takes the opt and size levels). 2727 // FIXME: Remove this check (and make the assignment unconditional) after 2728 // addressing size regression issues at O2. 2729 if (LocallyHotCallSiteThreshold.getNumOccurrences() > 0) 2730 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold; 2731 2732 // Set the ColdCallSiteThreshold knob from the 2733 // -inline-cold-callsite-threshold. 2734 Params.ColdCallSiteThreshold = ColdCallSiteThreshold; 2735 2736 // Set the OptMinSizeThreshold and OptSizeThreshold params only if the 2737 // -inlinehint-threshold commandline option is not explicitly given. If that 2738 // option is present, then its value applies even for callees with size and 2739 // minsize attributes. 2740 // If the -inline-threshold is not specified, set the ColdThreshold from the 2741 // -inlinecold-threshold even if it is not explicitly passed. If 2742 // -inline-threshold is specified, then -inlinecold-threshold needs to be 2743 // explicitly specified to set the ColdThreshold knob 2744 if (InlineThreshold.getNumOccurrences() == 0) { 2745 Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold; 2746 Params.OptSizeThreshold = InlineConstants::OptSizeThreshold; 2747 Params.ColdThreshold = ColdThreshold; 2748 } else if (ColdThreshold.getNumOccurrences() > 0) { 2749 Params.ColdThreshold = ColdThreshold; 2750 } 2751 return Params; 2752 } 2753 2754 InlineParams llvm::getInlineParams() { 2755 return getInlineParams(DefaultThreshold); 2756 } 2757 2758 // Compute the default threshold for inlining based on the opt level and the 2759 // size opt level. 2760 static int computeThresholdFromOptLevels(unsigned OptLevel, 2761 unsigned SizeOptLevel) { 2762 if (OptLevel > 2) 2763 return InlineConstants::OptAggressiveThreshold; 2764 if (SizeOptLevel == 1) // -Os 2765 return InlineConstants::OptSizeThreshold; 2766 if (SizeOptLevel == 2) // -Oz 2767 return InlineConstants::OptMinSizeThreshold; 2768 return DefaultThreshold; 2769 } 2770 2771 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) { 2772 auto Params = 2773 getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel)); 2774 // At O3, use the value of -locally-hot-callsite-threshold option to populate 2775 // Params.LocallyHotCallSiteThreshold. Below O3, this flag has effect only 2776 // when it is specified explicitly. 2777 if (OptLevel > 2) 2778 Params.LocallyHotCallSiteThreshold = LocallyHotCallSiteThreshold; 2779 return Params; 2780 } 2781 2782 PreservedAnalyses 2783 InlineCostAnnotationPrinterPass::run(Function &F, 2784 FunctionAnalysisManager &FAM) { 2785 PrintInstructionComments = true; 2786 std::function<AssumptionCache &(Function &)> GetAssumptionCache = [&]( 2787 Function &F) -> AssumptionCache & { 2788 return FAM.getResult<AssumptionAnalysis>(F); 2789 }; 2790 Module *M = F.getParent(); 2791 ProfileSummaryInfo PSI(*M); 2792 DataLayout DL(M); 2793 TargetTransformInfo TTI(DL); 2794 // FIXME: Redesign the usage of InlineParams to expand the scope of this pass. 2795 // In the current implementation, the type of InlineParams doesn't matter as 2796 // the pass serves only for verification of inliner's decisions. 2797 // We can add a flag which determines InlineParams for this run. Right now, 2798 // the default InlineParams are used. 2799 const InlineParams Params = llvm::getInlineParams(); 2800 for (BasicBlock &BB : F) { 2801 for (Instruction &I : BB) { 2802 if (CallInst *CI = dyn_cast<CallInst>(&I)) { 2803 Function *CalledFunction = CI->getCalledFunction(); 2804 if (!CalledFunction || CalledFunction->isDeclaration()) 2805 continue; 2806 OptimizationRemarkEmitter ORE(CalledFunction); 2807 InlineCostCallAnalyzer ICCA(*CalledFunction, *CI, Params, TTI, 2808 GetAssumptionCache, nullptr, &PSI, &ORE); 2809 ICCA.analyze(); 2810 OS << " Analyzing call of " << CalledFunction->getName() 2811 << "... (caller:" << CI->getCaller()->getName() << ")\n"; 2812 ICCA.print(); 2813 } 2814 } 2815 } 2816 return PreservedAnalyses::all(); 2817 } 2818