1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inline cost analysis. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/InlineCost.h" 15 #include "llvm/ADT/STLExtras.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/SmallPtrSet.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/BlockFrequencyInfo.h" 22 #include "llvm/Analysis/CodeMetrics.h" 23 #include "llvm/Analysis/ConstantFolding.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/ProfileSummaryInfo.h" 26 #include "llvm/Analysis/TargetTransformInfo.h" 27 #include "llvm/IR/CallSite.h" 28 #include "llvm/IR/CallingConv.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/GetElementPtrTypeIterator.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/InstVisitor.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Operator.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/raw_ostream.h" 37 38 using namespace llvm; 39 40 #define DEBUG_TYPE "inline-cost" 41 42 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 43 44 static cl::opt<int> InlineThreshold( 45 "inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore, 46 cl::desc("Control the amount of inlining to perform (default = 225)")); 47 48 static cl::opt<int> HintThreshold( 49 "inlinehint-threshold", cl::Hidden, cl::init(325), 50 cl::desc("Threshold for inlining functions with inline hint")); 51 52 static cl::opt<int> 53 ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden, 54 cl::init(45), 55 cl::desc("Threshold for inlining cold callsites")); 56 57 // We introduce this threshold to help performance of instrumentation based 58 // PGO before we actually hook up inliner with analysis passes such as BPI and 59 // BFI. 60 static cl::opt<int> ColdThreshold( 61 "inlinecold-threshold", cl::Hidden, cl::init(45), 62 cl::desc("Threshold for inlining functions with cold attribute")); 63 64 static cl::opt<int> 65 HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000), 66 cl::ZeroOrMore, 67 cl::desc("Threshold for hot callsites ")); 68 69 namespace { 70 71 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 72 typedef InstVisitor<CallAnalyzer, bool> Base; 73 friend class InstVisitor<CallAnalyzer, bool>; 74 75 /// The TargetTransformInfo available for this compilation. 76 const TargetTransformInfo &TTI; 77 78 /// Getter for the cache of @llvm.assume intrinsics. 79 std::function<AssumptionCache &(Function &)> &GetAssumptionCache; 80 81 /// Getter for BlockFrequencyInfo 82 Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI; 83 84 /// Profile summary information. 85 ProfileSummaryInfo *PSI; 86 87 /// The called function. 88 Function &F; 89 90 // Cache the DataLayout since we use it a lot. 91 const DataLayout &DL; 92 93 /// The candidate callsite being analyzed. Please do not use this to do 94 /// analysis in the caller function; we want the inline cost query to be 95 /// easily cacheable. Instead, use the cover function paramHasAttr. 96 CallSite CandidateCS; 97 98 /// Tunable parameters that control the analysis. 99 const InlineParams &Params; 100 101 int Threshold; 102 int Cost; 103 104 bool IsCallerRecursive; 105 bool IsRecursiveCall; 106 bool ExposesReturnsTwice; 107 bool HasDynamicAlloca; 108 bool ContainsNoDuplicateCall; 109 bool HasReturn; 110 bool HasIndirectBr; 111 bool HasFrameEscape; 112 113 /// Number of bytes allocated statically by the callee. 114 uint64_t AllocatedSize; 115 unsigned NumInstructions, NumVectorInstructions; 116 int FiftyPercentVectorBonus, TenPercentVectorBonus; 117 int VectorBonus; 118 119 /// While we walk the potentially-inlined instructions, we build up and 120 /// maintain a mapping of simplified values specific to this callsite. The 121 /// idea is to propagate any special information we have about arguments to 122 /// this call through the inlinable section of the function, and account for 123 /// likely simplifications post-inlining. The most important aspect we track 124 /// is CFG altering simplifications -- when we prove a basic block dead, that 125 /// can cause dramatic shifts in the cost of inlining a function. 126 DenseMap<Value *, Constant *> SimplifiedValues; 127 128 /// Keep track of the values which map back (through function arguments) to 129 /// allocas on the caller stack which could be simplified through SROA. 130 DenseMap<Value *, Value *> SROAArgValues; 131 132 /// The mapping of caller Alloca values to their accumulated cost savings. If 133 /// we have to disable SROA for one of the allocas, this tells us how much 134 /// cost must be added. 135 DenseMap<Value *, int> SROAArgCosts; 136 137 /// Keep track of values which map to a pointer base and constant offset. 138 DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs; 139 140 // Custom simplification helper routines. 141 bool isAllocaDerivedArg(Value *V); 142 bool lookupSROAArgAndCost(Value *V, Value *&Arg, 143 DenseMap<Value *, int>::iterator &CostIt); 144 void disableSROA(DenseMap<Value *, int>::iterator CostIt); 145 void disableSROA(Value *V); 146 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 147 int InstructionCost); 148 bool isGEPFree(GetElementPtrInst &GEP); 149 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 150 bool simplifyCallSite(Function *F, CallSite CS); 151 template <typename Callable> 152 bool simplifyInstruction(Instruction &I, Callable Evaluate); 153 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 154 155 /// Return true if the given argument to the function being considered for 156 /// inlining has the given attribute set either at the call site or the 157 /// function declaration. Primarily used to inspect call site specific 158 /// attributes since these can be more precise than the ones on the callee 159 /// itself. 160 bool paramHasAttr(Argument *A, Attribute::AttrKind Attr); 161 162 /// Return true if the given value is known non null within the callee if 163 /// inlined through this particular callsite. 164 bool isKnownNonNullInCallee(Value *V); 165 166 /// Update Threshold based on callsite properties such as callee 167 /// attributes and callee hotness for PGO builds. The Callee is explicitly 168 /// passed to support analyzing indirect calls whose target is inferred by 169 /// analysis. 170 void updateThreshold(CallSite CS, Function &Callee); 171 172 /// Return true if size growth is allowed when inlining the callee at CS. 173 bool allowSizeGrowth(CallSite CS); 174 175 // Custom analysis routines. 176 bool analyzeBlock(BasicBlock *BB, SmallPtrSetImpl<const Value *> &EphValues); 177 178 // Disable several entry points to the visitor so we don't accidentally use 179 // them by declaring but not defining them here. 180 void visit(Module *); 181 void visit(Module &); 182 void visit(Function *); 183 void visit(Function &); 184 void visit(BasicBlock *); 185 void visit(BasicBlock &); 186 187 // Provide base case for our instruction visit. 188 bool visitInstruction(Instruction &I); 189 190 // Our visit overrides. 191 bool visitAlloca(AllocaInst &I); 192 bool visitPHI(PHINode &I); 193 bool visitGetElementPtr(GetElementPtrInst &I); 194 bool visitBitCast(BitCastInst &I); 195 bool visitPtrToInt(PtrToIntInst &I); 196 bool visitIntToPtr(IntToPtrInst &I); 197 bool visitCastInst(CastInst &I); 198 bool visitUnaryInstruction(UnaryInstruction &I); 199 bool visitCmpInst(CmpInst &I); 200 bool visitSub(BinaryOperator &I); 201 bool visitBinaryOperator(BinaryOperator &I); 202 bool visitLoad(LoadInst &I); 203 bool visitStore(StoreInst &I); 204 bool visitExtractValue(ExtractValueInst &I); 205 bool visitInsertValue(InsertValueInst &I); 206 bool visitCallSite(CallSite CS); 207 bool visitReturnInst(ReturnInst &RI); 208 bool visitBranchInst(BranchInst &BI); 209 bool visitSwitchInst(SwitchInst &SI); 210 bool visitIndirectBrInst(IndirectBrInst &IBI); 211 bool visitResumeInst(ResumeInst &RI); 212 bool visitCleanupReturnInst(CleanupReturnInst &RI); 213 bool visitCatchReturnInst(CatchReturnInst &RI); 214 bool visitUnreachableInst(UnreachableInst &I); 215 216 public: 217 CallAnalyzer(const TargetTransformInfo &TTI, 218 std::function<AssumptionCache &(Function &)> &GetAssumptionCache, 219 Optional<function_ref<BlockFrequencyInfo &(Function &)>> &GetBFI, 220 ProfileSummaryInfo *PSI, Function &Callee, CallSite CSArg, 221 const InlineParams &Params) 222 : TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI), 223 PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), 224 CandidateCS(CSArg), Params(Params), Threshold(Params.DefaultThreshold), 225 Cost(0), IsCallerRecursive(false), IsRecursiveCall(false), 226 ExposesReturnsTwice(false), HasDynamicAlloca(false), 227 ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false), 228 HasFrameEscape(false), AllocatedSize(0), NumInstructions(0), 229 NumVectorInstructions(0), FiftyPercentVectorBonus(0), 230 TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0), 231 NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0), 232 NumConstantPtrDiffs(0), NumInstructionsSimplified(0), 233 SROACostSavings(0), SROACostSavingsLost(0) {} 234 235 bool analyzeCall(CallSite CS); 236 237 int getThreshold() { return Threshold; } 238 int getCost() { return Cost; } 239 240 // Keep a bunch of stats about the cost savings found so we can print them 241 // out when debugging. 242 unsigned NumConstantArgs; 243 unsigned NumConstantOffsetPtrArgs; 244 unsigned NumAllocaArgs; 245 unsigned NumConstantPtrCmps; 246 unsigned NumConstantPtrDiffs; 247 unsigned NumInstructionsSimplified; 248 unsigned SROACostSavings; 249 unsigned SROACostSavingsLost; 250 251 void dump(); 252 }; 253 254 } // namespace 255 256 /// \brief Test whether the given value is an Alloca-derived function argument. 257 bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 258 return SROAArgValues.count(V); 259 } 260 261 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. 262 /// Returns false if V does not map to a SROA-candidate. 263 bool CallAnalyzer::lookupSROAArgAndCost( 264 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { 265 if (SROAArgValues.empty() || SROAArgCosts.empty()) 266 return false; 267 268 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); 269 if (ArgIt == SROAArgValues.end()) 270 return false; 271 272 Arg = ArgIt->second; 273 CostIt = SROAArgCosts.find(Arg); 274 return CostIt != SROAArgCosts.end(); 275 } 276 277 /// \brief Disable SROA for the candidate marked by this cost iterator. 278 /// 279 /// This marks the candidate as no longer viable for SROA, and adds the cost 280 /// savings associated with it back into the inline cost measurement. 281 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { 282 // If we're no longer able to perform SROA we need to undo its cost savings 283 // and prevent subsequent analysis. 284 Cost += CostIt->second; 285 SROACostSavings -= CostIt->second; 286 SROACostSavingsLost += CostIt->second; 287 SROAArgCosts.erase(CostIt); 288 } 289 290 /// \brief If 'V' maps to a SROA candidate, disable SROA for it. 291 void CallAnalyzer::disableSROA(Value *V) { 292 Value *SROAArg; 293 DenseMap<Value *, int>::iterator CostIt; 294 if (lookupSROAArgAndCost(V, SROAArg, CostIt)) 295 disableSROA(CostIt); 296 } 297 298 /// \brief Accumulate the given cost for a particular SROA candidate. 299 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 300 int InstructionCost) { 301 CostIt->second += InstructionCost; 302 SROACostSavings += InstructionCost; 303 } 304 305 /// \brief Accumulate a constant GEP offset into an APInt if possible. 306 /// 307 /// Returns false if unable to compute the offset for any reason. Respects any 308 /// simplified values known during the analysis of this callsite. 309 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 310 unsigned IntPtrWidth = DL.getPointerSizeInBits(); 311 assert(IntPtrWidth == Offset.getBitWidth()); 312 313 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 314 GTI != GTE; ++GTI) { 315 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 316 if (!OpC) 317 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 318 OpC = dyn_cast<ConstantInt>(SimpleOp); 319 if (!OpC) 320 return false; 321 if (OpC->isZero()) 322 continue; 323 324 // Handle a struct index, which adds its field offset to the pointer. 325 if (StructType *STy = GTI.getStructTypeOrNull()) { 326 unsigned ElementIdx = OpC->getZExtValue(); 327 const StructLayout *SL = DL.getStructLayout(STy); 328 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 329 continue; 330 } 331 332 APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType())); 333 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 334 } 335 return true; 336 } 337 338 /// \brief Use TTI to check whether a GEP is free. 339 /// 340 /// Respects any simplified values known during the analysis of this callsite. 341 bool CallAnalyzer::isGEPFree(GetElementPtrInst &GEP) { 342 SmallVector<Value *, 4> Indices; 343 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) 344 if (Constant *SimpleOp = SimplifiedValues.lookup(*I)) 345 Indices.push_back(SimpleOp); 346 else 347 Indices.push_back(*I); 348 return TargetTransformInfo::TCC_Free == 349 TTI.getGEPCost(GEP.getSourceElementType(), GEP.getPointerOperand(), 350 Indices); 351 } 352 353 bool CallAnalyzer::visitAlloca(AllocaInst &I) { 354 // Check whether inlining will turn a dynamic alloca into a static 355 // alloca and handle that case. 356 if (I.isArrayAllocation()) { 357 Constant *Size = SimplifiedValues.lookup(I.getArraySize()); 358 if (auto *AllocSize = dyn_cast_or_null<ConstantInt>(Size)) { 359 Type *Ty = I.getAllocatedType(); 360 AllocatedSize = SaturatingMultiplyAdd( 361 AllocSize->getLimitedValue(), DL.getTypeAllocSize(Ty), AllocatedSize); 362 return Base::visitAlloca(I); 363 } 364 } 365 366 // Accumulate the allocated size. 367 if (I.isStaticAlloca()) { 368 Type *Ty = I.getAllocatedType(); 369 AllocatedSize = SaturatingAdd(DL.getTypeAllocSize(Ty), AllocatedSize); 370 } 371 372 // We will happily inline static alloca instructions. 373 if (I.isStaticAlloca()) 374 return Base::visitAlloca(I); 375 376 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 377 // a variety of reasons, and so we would like to not inline them into 378 // functions which don't currently have a dynamic alloca. This simply 379 // disables inlining altogether in the presence of a dynamic alloca. 380 HasDynamicAlloca = true; 381 return false; 382 } 383 384 bool CallAnalyzer::visitPHI(PHINode &I) { 385 // FIXME: We should potentially be tracking values through phi nodes, 386 // especially when they collapse to a single value due to deleted CFG edges 387 // during inlining. 388 389 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 390 // though we don't want to propagate it's bonuses. The idea is to disable 391 // SROA if it *might* be used in an inappropriate manner. 392 393 // Phi nodes are always zero-cost. 394 return true; 395 } 396 397 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 398 Value *SROAArg; 399 DenseMap<Value *, int>::iterator CostIt; 400 bool SROACandidate = 401 lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt); 402 403 // Try to fold GEPs of constant-offset call site argument pointers. This 404 // requires target data and inbounds GEPs. 405 if (I.isInBounds()) { 406 // Check if we have a base + offset for the pointer. 407 Value *Ptr = I.getPointerOperand(); 408 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); 409 if (BaseAndOffset.first) { 410 // Check if the offset of this GEP is constant, and if so accumulate it 411 // into Offset. 412 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { 413 // Non-constant GEPs aren't folded, and disable SROA. 414 if (SROACandidate) 415 disableSROA(CostIt); 416 return isGEPFree(I); 417 } 418 419 // Add the result as a new mapping to Base + Offset. 420 ConstantOffsetPtrs[&I] = BaseAndOffset; 421 422 // Also handle SROA candidates here, we already know that the GEP is 423 // all-constant indexed. 424 if (SROACandidate) 425 SROAArgValues[&I] = SROAArg; 426 427 return true; 428 } 429 } 430 431 // Lambda to check whether a GEP's indices are all constant. 432 auto IsGEPOffsetConstant = [&](GetElementPtrInst &GEP) { 433 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) 434 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) 435 return false; 436 return true; 437 }; 438 439 if (IsGEPOffsetConstant(I)) { 440 if (SROACandidate) 441 SROAArgValues[&I] = SROAArg; 442 443 // Constant GEPs are modeled as free. 444 return true; 445 } 446 447 // Variable GEPs will require math and will disable SROA. 448 if (SROACandidate) 449 disableSROA(CostIt); 450 return isGEPFree(I); 451 } 452 453 /// Simplify \p I if its operands are constants and update SimplifiedValues. 454 /// \p Evaluate is a callable specific to instruction type that evaluates the 455 /// instruction when all the operands are constants. 456 template <typename Callable> 457 bool CallAnalyzer::simplifyInstruction(Instruction &I, Callable Evaluate) { 458 SmallVector<Constant *, 2> COps; 459 for (Value *Op : I.operands()) { 460 Constant *COp = dyn_cast<Constant>(Op); 461 if (!COp) 462 COp = SimplifiedValues.lookup(Op); 463 if (!COp) 464 return false; 465 COps.push_back(COp); 466 } 467 auto *C = Evaluate(COps); 468 if (!C) 469 return false; 470 SimplifiedValues[&I] = C; 471 return true; 472 } 473 474 bool CallAnalyzer::visitBitCast(BitCastInst &I) { 475 // Propagate constants through bitcasts. 476 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 477 return ConstantExpr::getBitCast(COps[0], I.getType()); 478 })) 479 return true; 480 481 // Track base/offsets through casts 482 std::pair<Value *, APInt> BaseAndOffset = 483 ConstantOffsetPtrs.lookup(I.getOperand(0)); 484 // Casts don't change the offset, just wrap it up. 485 if (BaseAndOffset.first) 486 ConstantOffsetPtrs[&I] = BaseAndOffset; 487 488 // Also look for SROA candidates here. 489 Value *SROAArg; 490 DenseMap<Value *, int>::iterator CostIt; 491 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 492 SROAArgValues[&I] = SROAArg; 493 494 // Bitcasts are always zero cost. 495 return true; 496 } 497 498 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 499 // Propagate constants through ptrtoint. 500 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 501 return ConstantExpr::getPtrToInt(COps[0], I.getType()); 502 })) 503 return true; 504 505 // Track base/offset pairs when converted to a plain integer provided the 506 // integer is large enough to represent the pointer. 507 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 508 if (IntegerSize >= DL.getPointerSizeInBits()) { 509 std::pair<Value *, APInt> BaseAndOffset = 510 ConstantOffsetPtrs.lookup(I.getOperand(0)); 511 if (BaseAndOffset.first) 512 ConstantOffsetPtrs[&I] = BaseAndOffset; 513 } 514 515 // This is really weird. Technically, ptrtoint will disable SROA. However, 516 // unless that ptrtoint is *used* somewhere in the live basic blocks after 517 // inlining, it will be nuked, and SROA should proceed. All of the uses which 518 // would block SROA would also block SROA if applied directly to a pointer, 519 // and so we can just add the integer in here. The only places where SROA is 520 // preserved either cannot fire on an integer, or won't in-and-of themselves 521 // disable SROA (ext) w/o some later use that we would see and disable. 522 Value *SROAArg; 523 DenseMap<Value *, int>::iterator CostIt; 524 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 525 SROAArgValues[&I] = SROAArg; 526 527 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 528 } 529 530 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 531 // Propagate constants through ptrtoint. 532 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 533 return ConstantExpr::getIntToPtr(COps[0], I.getType()); 534 })) 535 return true; 536 537 // Track base/offset pairs when round-tripped through a pointer without 538 // modifications provided the integer is not too large. 539 Value *Op = I.getOperand(0); 540 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 541 if (IntegerSize <= DL.getPointerSizeInBits()) { 542 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 543 if (BaseAndOffset.first) 544 ConstantOffsetPtrs[&I] = BaseAndOffset; 545 } 546 547 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 548 Value *SROAArg; 549 DenseMap<Value *, int>::iterator CostIt; 550 if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) 551 SROAArgValues[&I] = SROAArg; 552 553 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 554 } 555 556 bool CallAnalyzer::visitCastInst(CastInst &I) { 557 // Propagate constants through ptrtoint. 558 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 559 return ConstantExpr::getCast(I.getOpcode(), COps[0], I.getType()); 560 })) 561 return true; 562 563 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. 564 disableSROA(I.getOperand(0)); 565 566 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I); 567 } 568 569 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { 570 Value *Operand = I.getOperand(0); 571 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 572 return ConstantFoldInstOperands(&I, COps[0], DL); 573 })) 574 return true; 575 576 // Disable any SROA on the argument to arbitrary unary operators. 577 disableSROA(Operand); 578 579 return false; 580 } 581 582 bool CallAnalyzer::paramHasAttr(Argument *A, Attribute::AttrKind Attr) { 583 return CandidateCS.paramHasAttr(A->getArgNo(), Attr); 584 } 585 586 bool CallAnalyzer::isKnownNonNullInCallee(Value *V) { 587 // Does the *call site* have the NonNull attribute set on an argument? We 588 // use the attribute on the call site to memoize any analysis done in the 589 // caller. This will also trip if the callee function has a non-null 590 // parameter attribute, but that's a less interesting case because hopefully 591 // the callee would already have been simplified based on that. 592 if (Argument *A = dyn_cast<Argument>(V)) 593 if (paramHasAttr(A, Attribute::NonNull)) 594 return true; 595 596 // Is this an alloca in the caller? This is distinct from the attribute case 597 // above because attributes aren't updated within the inliner itself and we 598 // always want to catch the alloca derived case. 599 if (isAllocaDerivedArg(V)) 600 // We can actually predict the result of comparisons between an 601 // alloca-derived value and null. Note that this fires regardless of 602 // SROA firing. 603 return true; 604 605 return false; 606 } 607 608 bool CallAnalyzer::allowSizeGrowth(CallSite CS) { 609 // If the normal destination of the invoke or the parent block of the call 610 // site is unreachable-terminated, there is little point in inlining this 611 // unless there is literally zero cost. 612 // FIXME: Note that it is possible that an unreachable-terminated block has a 613 // hot entry. For example, in below scenario inlining hot_call_X() may be 614 // beneficial : 615 // main() { 616 // hot_call_1(); 617 // ... 618 // hot_call_N() 619 // exit(0); 620 // } 621 // For now, we are not handling this corner case here as it is rare in real 622 // code. In future, we should elaborate this based on BPI and BFI in more 623 // general threshold adjusting heuristics in updateThreshold(). 624 Instruction *Instr = CS.getInstruction(); 625 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) { 626 if (isa<UnreachableInst>(II->getNormalDest()->getTerminator())) 627 return false; 628 } else if (isa<UnreachableInst>(Instr->getParent()->getTerminator())) 629 return false; 630 631 return true; 632 } 633 634 void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) { 635 // If no size growth is allowed for this inlining, set Threshold to 0. 636 if (!allowSizeGrowth(CS)) { 637 Threshold = 0; 638 return; 639 } 640 641 Function *Caller = CS.getCaller(); 642 643 // return min(A, B) if B is valid. 644 auto MinIfValid = [](int A, Optional<int> B) { 645 return B ? std::min(A, B.getValue()) : A; 646 }; 647 648 // return max(A, B) if B is valid. 649 auto MaxIfValid = [](int A, Optional<int> B) { 650 return B ? std::max(A, B.getValue()) : A; 651 }; 652 653 // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available 654 // and reduce the threshold if the caller has the necessary attribute. 655 if (Caller->optForMinSize()) 656 Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold); 657 else if (Caller->optForSize()) 658 Threshold = MinIfValid(Threshold, Params.OptSizeThreshold); 659 660 // Adjust the threshold based on inlinehint attribute and profile based 661 // hotness information if the caller does not have MinSize attribute. 662 if (!Caller->optForMinSize()) { 663 if (Callee.hasFnAttribute(Attribute::InlineHint)) 664 Threshold = MaxIfValid(Threshold, Params.HintThreshold); 665 if (PSI) { 666 BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr; 667 // FIXME: After switching to the new passmanager, simplify the logic below 668 // by checking only the callsite hotness/coldness. The check for CallerBFI 669 // exists only because we do not have BFI available with the old PM. 670 // 671 // Use callee's hotness information only if we have no way of determining 672 // callsite's hotness information. Callsite hotness can be determined if 673 // sample profile is used (which adds hotness metadata to calls) or if 674 // caller's BlockFrequencyInfo is available. 675 if (CallerBFI || PSI->hasSampleProfile()) { 676 if (PSI->isHotCallSite(CS, CallerBFI)) { 677 DEBUG(dbgs() << "Hot callsite.\n"); 678 Threshold = Params.HotCallSiteThreshold.getValue(); 679 } else if (PSI->isColdCallSite(CS, CallerBFI)) { 680 DEBUG(dbgs() << "Cold callsite.\n"); 681 Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold); 682 } 683 } else { 684 if (PSI->isFunctionEntryHot(&Callee)) { 685 DEBUG(dbgs() << "Hot callee.\n"); 686 // If callsite hotness can not be determined, we may still know 687 // that the callee is hot and treat it as a weaker hint for threshold 688 // increase. 689 Threshold = MaxIfValid(Threshold, Params.HintThreshold); 690 } else if (PSI->isFunctionEntryCold(&Callee)) { 691 DEBUG(dbgs() << "Cold callee.\n"); 692 Threshold = MinIfValid(Threshold, Params.ColdThreshold); 693 } 694 } 695 } 696 } 697 698 // Finally, take the target-specific inlining threshold multiplier into 699 // account. 700 Threshold *= TTI.getInliningThresholdMultiplier(); 701 } 702 703 bool CallAnalyzer::visitCmpInst(CmpInst &I) { 704 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 705 // First try to handle simplified comparisons. 706 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 707 return ConstantExpr::getCompare(I.getPredicate(), COps[0], COps[1]); 708 })) 709 return true; 710 711 if (I.getOpcode() == Instruction::FCmp) 712 return false; 713 714 // Otherwise look for a comparison between constant offset pointers with 715 // a common base. 716 Value *LHSBase, *RHSBase; 717 APInt LHSOffset, RHSOffset; 718 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 719 if (LHSBase) { 720 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 721 if (RHSBase && LHSBase == RHSBase) { 722 // We have common bases, fold the icmp to a constant based on the 723 // offsets. 724 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 725 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 726 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 727 SimplifiedValues[&I] = C; 728 ++NumConstantPtrCmps; 729 return true; 730 } 731 } 732 } 733 734 // If the comparison is an equality comparison with null, we can simplify it 735 // if we know the value (argument) can't be null 736 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)) && 737 isKnownNonNullInCallee(I.getOperand(0))) { 738 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 739 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 740 : ConstantInt::getFalse(I.getType()); 741 return true; 742 } 743 // Finally check for SROA candidates in comparisons. 744 Value *SROAArg; 745 DenseMap<Value *, int>::iterator CostIt; 746 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 747 if (isa<ConstantPointerNull>(I.getOperand(1))) { 748 accumulateSROACost(CostIt, InlineConstants::InstrCost); 749 return true; 750 } 751 752 disableSROA(CostIt); 753 } 754 755 return false; 756 } 757 758 bool CallAnalyzer::visitSub(BinaryOperator &I) { 759 // Try to handle a special case: we can fold computing the difference of two 760 // constant-related pointers. 761 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 762 Value *LHSBase, *RHSBase; 763 APInt LHSOffset, RHSOffset; 764 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 765 if (LHSBase) { 766 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 767 if (RHSBase && LHSBase == RHSBase) { 768 // We have common bases, fold the subtract to a constant based on the 769 // offsets. 770 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 771 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 772 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 773 SimplifiedValues[&I] = C; 774 ++NumConstantPtrDiffs; 775 return true; 776 } 777 } 778 } 779 780 // Otherwise, fall back to the generic logic for simplifying and handling 781 // instructions. 782 return Base::visitSub(I); 783 } 784 785 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 786 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 787 auto Evaluate = [&](SmallVectorImpl<Constant *> &COps) { 788 Value *SimpleV = nullptr; 789 if (auto FI = dyn_cast<FPMathOperator>(&I)) 790 SimpleV = SimplifyFPBinOp(I.getOpcode(), COps[0], COps[1], 791 FI->getFastMathFlags(), DL); 792 else 793 SimpleV = SimplifyBinOp(I.getOpcode(), COps[0], COps[1], DL); 794 return dyn_cast_or_null<Constant>(SimpleV); 795 }; 796 797 if (simplifyInstruction(I, Evaluate)) 798 return true; 799 800 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 801 disableSROA(LHS); 802 disableSROA(RHS); 803 804 return false; 805 } 806 807 bool CallAnalyzer::visitLoad(LoadInst &I) { 808 Value *SROAArg; 809 DenseMap<Value *, int>::iterator CostIt; 810 if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) { 811 if (I.isSimple()) { 812 accumulateSROACost(CostIt, InlineConstants::InstrCost); 813 return true; 814 } 815 816 disableSROA(CostIt); 817 } 818 819 return false; 820 } 821 822 bool CallAnalyzer::visitStore(StoreInst &I) { 823 Value *SROAArg; 824 DenseMap<Value *, int>::iterator CostIt; 825 if (lookupSROAArgAndCost(I.getPointerOperand(), SROAArg, CostIt)) { 826 if (I.isSimple()) { 827 accumulateSROACost(CostIt, InlineConstants::InstrCost); 828 return true; 829 } 830 831 disableSROA(CostIt); 832 } 833 834 return false; 835 } 836 837 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 838 // Constant folding for extract value is trivial. 839 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 840 return ConstantExpr::getExtractValue(COps[0], I.getIndices()); 841 })) 842 return true; 843 844 // SROA can look through these but give them a cost. 845 return false; 846 } 847 848 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 849 // Constant folding for insert value is trivial. 850 if (simplifyInstruction(I, [&](SmallVectorImpl<Constant *> &COps) { 851 return ConstantExpr::getInsertValue(/*AggregateOperand*/ COps[0], 852 /*InsertedValueOperand*/ COps[1], 853 I.getIndices()); 854 })) 855 return true; 856 857 // SROA can look through these but give them a cost. 858 return false; 859 } 860 861 /// \brief Try to simplify a call site. 862 /// 863 /// Takes a concrete function and callsite and tries to actually simplify it by 864 /// analyzing the arguments and call itself with instsimplify. Returns true if 865 /// it has simplified the callsite to some other entity (a constant), making it 866 /// free. 867 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { 868 // FIXME: Using the instsimplify logic directly for this is inefficient 869 // because we have to continually rebuild the argument list even when no 870 // simplifications can be performed. Until that is fixed with remapping 871 // inside of instsimplify, directly constant fold calls here. 872 if (!canConstantFoldCallTo(CS, F)) 873 return false; 874 875 // Try to re-map the arguments to constants. 876 SmallVector<Constant *, 4> ConstantArgs; 877 ConstantArgs.reserve(CS.arg_size()); 878 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; 879 ++I) { 880 Constant *C = dyn_cast<Constant>(*I); 881 if (!C) 882 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I)); 883 if (!C) 884 return false; // This argument doesn't map to a constant. 885 886 ConstantArgs.push_back(C); 887 } 888 if (Constant *C = ConstantFoldCall(CS, F, ConstantArgs)) { 889 SimplifiedValues[CS.getInstruction()] = C; 890 return true; 891 } 892 893 return false; 894 } 895 896 bool CallAnalyzer::visitCallSite(CallSite CS) { 897 if (CS.hasFnAttr(Attribute::ReturnsTwice) && 898 !F.hasFnAttribute(Attribute::ReturnsTwice)) { 899 // This aborts the entire analysis. 900 ExposesReturnsTwice = true; 901 return false; 902 } 903 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->cannotDuplicate()) 904 ContainsNoDuplicateCall = true; 905 906 if (Function *F = CS.getCalledFunction()) { 907 // When we have a concrete function, first try to simplify it directly. 908 if (simplifyCallSite(F, CS)) 909 return true; 910 911 // Next check if it is an intrinsic we know about. 912 // FIXME: Lift this into part of the InstVisitor. 913 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 914 switch (II->getIntrinsicID()) { 915 default: 916 return Base::visitCallSite(CS); 917 918 case Intrinsic::load_relative: 919 // This is normally lowered to 4 LLVM instructions. 920 Cost += 3 * InlineConstants::InstrCost; 921 return false; 922 923 case Intrinsic::memset: 924 case Intrinsic::memcpy: 925 case Intrinsic::memmove: 926 // SROA can usually chew through these intrinsics, but they aren't free. 927 return false; 928 case Intrinsic::localescape: 929 HasFrameEscape = true; 930 return false; 931 } 932 } 933 934 if (F == CS.getInstruction()->getParent()->getParent()) { 935 // This flag will fully abort the analysis, so don't bother with anything 936 // else. 937 IsRecursiveCall = true; 938 return false; 939 } 940 941 if (TTI.isLoweredToCall(F)) { 942 // We account for the average 1 instruction per call argument setup 943 // here. 944 Cost += CS.arg_size() * InlineConstants::InstrCost; 945 946 // Everything other than inline ASM will also have a significant cost 947 // merely from making the call. 948 if (!isa<InlineAsm>(CS.getCalledValue())) 949 Cost += InlineConstants::CallPenalty; 950 } 951 952 return Base::visitCallSite(CS); 953 } 954 955 // Otherwise we're in a very special case -- an indirect function call. See 956 // if we can be particularly clever about this. 957 Value *Callee = CS.getCalledValue(); 958 959 // First, pay the price of the argument setup. We account for the average 960 // 1 instruction per call argument setup here. 961 Cost += CS.arg_size() * InlineConstants::InstrCost; 962 963 // Next, check if this happens to be an indirect function call to a known 964 // function in this inline context. If not, we've done all we can. 965 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 966 if (!F) 967 return Base::visitCallSite(CS); 968 969 // If we have a constant that we are calling as a function, we can peer 970 // through it and see the function target. This happens not infrequently 971 // during devirtualization and so we want to give it a hefty bonus for 972 // inlining, but cap that bonus in the event that inlining wouldn't pan 973 // out. Pretend to inline the function, with a custom threshold. 974 auto IndirectCallParams = Params; 975 IndirectCallParams.DefaultThreshold = InlineConstants::IndirectCallThreshold; 976 CallAnalyzer CA(TTI, GetAssumptionCache, GetBFI, PSI, *F, CS, 977 IndirectCallParams); 978 if (CA.analyzeCall(CS)) { 979 // We were able to inline the indirect call! Subtract the cost from the 980 // threshold to get the bonus we want to apply, but don't go below zero. 981 Cost -= std::max(0, CA.getThreshold() - CA.getCost()); 982 } 983 984 return Base::visitCallSite(CS); 985 } 986 987 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) { 988 // At least one return instruction will be free after inlining. 989 bool Free = !HasReturn; 990 HasReturn = true; 991 return Free; 992 } 993 994 bool CallAnalyzer::visitBranchInst(BranchInst &BI) { 995 // We model unconditional branches as essentially free -- they really 996 // shouldn't exist at all, but handling them makes the behavior of the 997 // inliner more regular and predictable. Interestingly, conditional branches 998 // which will fold away are also free. 999 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) || 1000 dyn_cast_or_null<ConstantInt>( 1001 SimplifiedValues.lookup(BI.getCondition())); 1002 } 1003 1004 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) { 1005 // We model unconditional switches as free, see the comments on handling 1006 // branches. 1007 if (isa<ConstantInt>(SI.getCondition())) 1008 return true; 1009 if (Value *V = SimplifiedValues.lookup(SI.getCondition())) 1010 if (isa<ConstantInt>(V)) 1011 return true; 1012 1013 // Assume the most general case where the swith is lowered into 1014 // either a jump table, bit test, or a balanced binary tree consisting of 1015 // case clusters without merging adjacent clusters with the same 1016 // destination. We do not consider the switches that are lowered with a mix 1017 // of jump table/bit test/binary search tree. The cost of the switch is 1018 // proportional to the size of the tree or the size of jump table range. 1019 // 1020 // NB: We convert large switches which are just used to initialize large phi 1021 // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent 1022 // inlining those. It will prevent inlining in cases where the optimization 1023 // does not (yet) fire. 1024 1025 // Exit early for a large switch, assuming one case needs at least one 1026 // instruction. 1027 // FIXME: This is not true for a bit test, but ignore such case for now to 1028 // save compile-time. 1029 int64_t CostLowerBound = 1030 std::min((int64_t)INT_MAX, 1031 (int64_t)SI.getNumCases() * InlineConstants::InstrCost + Cost); 1032 1033 if (CostLowerBound > Threshold) { 1034 Cost = CostLowerBound; 1035 return false; 1036 } 1037 1038 unsigned JumpTableSize = 0; 1039 unsigned NumCaseCluster = 1040 TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize); 1041 1042 // If suitable for a jump table, consider the cost for the table size and 1043 // branch to destination. 1044 if (JumpTableSize) { 1045 int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost + 1046 4 * InlineConstants::InstrCost; 1047 Cost = std::min((int64_t)INT_MAX, JTCost + Cost); 1048 return false; 1049 } 1050 1051 // Considering forming a binary search, we should find the number of nodes 1052 // which is same as the number of comparisons when lowered. For a given 1053 // number of clusters, n, we can define a recursive function, f(n), to find 1054 // the number of nodes in the tree. The recursion is : 1055 // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3, 1056 // and f(n) = n, when n <= 3. 1057 // This will lead a binary tree where the leaf should be either f(2) or f(3) 1058 // when n > 3. So, the number of comparisons from leaves should be n, while 1059 // the number of non-leaf should be : 1060 // 2^(log2(n) - 1) - 1 1061 // = 2^log2(n) * 2^-1 - 1 1062 // = n / 2 - 1. 1063 // Considering comparisons from leaf and non-leaf nodes, we can estimate the 1064 // number of comparisons in a simple closed form : 1065 // n + n / 2 - 1 = n * 3 / 2 - 1 1066 if (NumCaseCluster <= 3) { 1067 // Suppose a comparison includes one compare and one conditional branch. 1068 Cost += NumCaseCluster * 2 * InlineConstants::InstrCost; 1069 return false; 1070 } 1071 int64_t ExpectedNumberOfCompare = 3 * (uint64_t)NumCaseCluster / 2 - 1; 1072 uint64_t SwitchCost = 1073 ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost; 1074 Cost = std::min((uint64_t)INT_MAX, SwitchCost + Cost); 1075 return false; 1076 } 1077 1078 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) { 1079 // We never want to inline functions that contain an indirectbr. This is 1080 // incorrect because all the blockaddress's (in static global initializers 1081 // for example) would be referring to the original function, and this 1082 // indirect jump would jump from the inlined copy of the function into the 1083 // original function which is extremely undefined behavior. 1084 // FIXME: This logic isn't really right; we can safely inline functions with 1085 // indirectbr's as long as no other function or global references the 1086 // blockaddress of a block within the current function. 1087 HasIndirectBr = true; 1088 return false; 1089 } 1090 1091 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) { 1092 // FIXME: It's not clear that a single instruction is an accurate model for 1093 // the inline cost of a resume instruction. 1094 return false; 1095 } 1096 1097 bool CallAnalyzer::visitCleanupReturnInst(CleanupReturnInst &CRI) { 1098 // FIXME: It's not clear that a single instruction is an accurate model for 1099 // the inline cost of a cleanupret instruction. 1100 return false; 1101 } 1102 1103 bool CallAnalyzer::visitCatchReturnInst(CatchReturnInst &CRI) { 1104 // FIXME: It's not clear that a single instruction is an accurate model for 1105 // the inline cost of a catchret instruction. 1106 return false; 1107 } 1108 1109 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) { 1110 // FIXME: It might be reasonably to discount the cost of instructions leading 1111 // to unreachable as they have the lowest possible impact on both runtime and 1112 // code size. 1113 return true; // No actual code is needed for unreachable. 1114 } 1115 1116 bool CallAnalyzer::visitInstruction(Instruction &I) { 1117 // Some instructions are free. All of the free intrinsics can also be 1118 // handled by SROA, etc. 1119 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I)) 1120 return true; 1121 1122 // We found something we don't understand or can't handle. Mark any SROA-able 1123 // values in the operand list as no longer viable. 1124 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) 1125 disableSROA(*OI); 1126 1127 return false; 1128 } 1129 1130 /// \brief Analyze a basic block for its contribution to the inline cost. 1131 /// 1132 /// This method walks the analyzer over every instruction in the given basic 1133 /// block and accounts for their cost during inlining at this callsite. It 1134 /// aborts early if the threshold has been exceeded or an impossible to inline 1135 /// construct has been detected. It returns false if inlining is no longer 1136 /// viable, and true if inlining remains viable. 1137 bool CallAnalyzer::analyzeBlock(BasicBlock *BB, 1138 SmallPtrSetImpl<const Value *> &EphValues) { 1139 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { 1140 // FIXME: Currently, the number of instructions in a function regardless of 1141 // our ability to simplify them during inline to constants or dead code, 1142 // are actually used by the vector bonus heuristic. As long as that's true, 1143 // we have to special case debug intrinsics here to prevent differences in 1144 // inlining due to debug symbols. Eventually, the number of unsimplified 1145 // instructions shouldn't factor into the cost computation, but until then, 1146 // hack around it here. 1147 if (isa<DbgInfoIntrinsic>(I)) 1148 continue; 1149 1150 // Skip ephemeral values. 1151 if (EphValues.count(&*I)) 1152 continue; 1153 1154 ++NumInstructions; 1155 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) 1156 ++NumVectorInstructions; 1157 1158 // If the instruction is floating point, and the target says this operation 1159 // is expensive or the function has the "use-soft-float" attribute, this may 1160 // eventually become a library call. Treat the cost as such. 1161 if (I->getType()->isFloatingPointTy()) { 1162 // If the function has the "use-soft-float" attribute, mark it as 1163 // expensive. 1164 if (TTI.getFPOpCost(I->getType()) == TargetTransformInfo::TCC_Expensive || 1165 (F.getFnAttribute("use-soft-float").getValueAsString() == "true")) 1166 Cost += InlineConstants::CallPenalty; 1167 } 1168 1169 // If the instruction simplified to a constant, there is no cost to this 1170 // instruction. Visit the instructions using our InstVisitor to account for 1171 // all of the per-instruction logic. The visit tree returns true if we 1172 // consumed the instruction in any way, and false if the instruction's base 1173 // cost should count against inlining. 1174 if (Base::visit(&*I)) 1175 ++NumInstructionsSimplified; 1176 else 1177 Cost += InlineConstants::InstrCost; 1178 1179 // If the visit this instruction detected an uninlinable pattern, abort. 1180 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca || 1181 HasIndirectBr || HasFrameEscape) 1182 return false; 1183 1184 // If the caller is a recursive function then we don't want to inline 1185 // functions which allocate a lot of stack space because it would increase 1186 // the caller stack usage dramatically. 1187 if (IsCallerRecursive && 1188 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 1189 return false; 1190 1191 // Check if we've past the maximum possible threshold so we don't spin in 1192 // huge basic blocks that will never inline. 1193 if (Cost > Threshold) 1194 return false; 1195 } 1196 1197 return true; 1198 } 1199 1200 /// \brief Compute the base pointer and cumulative constant offsets for V. 1201 /// 1202 /// This strips all constant offsets off of V, leaving it the base pointer, and 1203 /// accumulates the total constant offset applied in the returned constant. It 1204 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 1205 /// no constant offsets applied. 1206 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 1207 if (!V->getType()->isPointerTy()) 1208 return nullptr; 1209 1210 unsigned IntPtrWidth = DL.getPointerSizeInBits(); 1211 APInt Offset = APInt::getNullValue(IntPtrWidth); 1212 1213 // Even though we don't look through PHI nodes, we could be called on an 1214 // instruction in an unreachable block, which may be on a cycle. 1215 SmallPtrSet<Value *, 4> Visited; 1216 Visited.insert(V); 1217 do { 1218 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1219 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 1220 return nullptr; 1221 V = GEP->getPointerOperand(); 1222 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 1223 V = cast<Operator>(V)->getOperand(0); 1224 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 1225 if (GA->isInterposable()) 1226 break; 1227 V = GA->getAliasee(); 1228 } else { 1229 break; 1230 } 1231 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 1232 } while (Visited.insert(V).second); 1233 1234 Type *IntPtrTy = DL.getIntPtrType(V->getContext()); 1235 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); 1236 } 1237 1238 /// \brief Analyze a call site for potential inlining. 1239 /// 1240 /// Returns true if inlining this call is viable, and false if it is not 1241 /// viable. It computes the cost and adjusts the threshold based on numerous 1242 /// factors and heuristics. If this method returns false but the computed cost 1243 /// is below the computed threshold, then inlining was forcibly disabled by 1244 /// some artifact of the routine. 1245 bool CallAnalyzer::analyzeCall(CallSite CS) { 1246 ++NumCallsAnalyzed; 1247 1248 // Perform some tweaks to the cost and threshold based on the direct 1249 // callsite information. 1250 1251 // We want to more aggressively inline vector-dense kernels, so up the 1252 // threshold, and we'll lower it if the % of vector instructions gets too 1253 // low. Note that these bonuses are some what arbitrary and evolved over time 1254 // by accident as much as because they are principled bonuses. 1255 // 1256 // FIXME: It would be nice to remove all such bonuses. At least it would be 1257 // nice to base the bonus values on something more scientific. 1258 assert(NumInstructions == 0); 1259 assert(NumVectorInstructions == 0); 1260 1261 // Update the threshold based on callsite properties 1262 updateThreshold(CS, F); 1263 1264 FiftyPercentVectorBonus = 3 * Threshold / 2; 1265 TenPercentVectorBonus = 3 * Threshold / 4; 1266 1267 // Track whether the post-inlining function would have more than one basic 1268 // block. A single basic block is often intended for inlining. Balloon the 1269 // threshold by 50% until we pass the single-BB phase. 1270 bool SingleBB = true; 1271 int SingleBBBonus = Threshold / 2; 1272 1273 // Speculatively apply all possible bonuses to Threshold. If cost exceeds 1274 // this Threshold any time, and cost cannot decrease, we can stop processing 1275 // the rest of the function body. 1276 Threshold += (SingleBBBonus + FiftyPercentVectorBonus); 1277 1278 // Give out bonuses for the callsite, as the instructions setting them up 1279 // will be gone after inlining. 1280 Cost -= getCallsiteCost(CS, DL); 1281 1282 // If there is only one call of the function, and it has internal linkage, 1283 // the cost of inlining it drops dramatically. 1284 bool OnlyOneCallAndLocalLinkage = 1285 F.hasLocalLinkage() && F.hasOneUse() && &F == CS.getCalledFunction(); 1286 if (OnlyOneCallAndLocalLinkage) 1287 Cost -= InlineConstants::LastCallToStaticBonus; 1288 1289 // If this function uses the coldcc calling convention, prefer not to inline 1290 // it. 1291 if (F.getCallingConv() == CallingConv::Cold) 1292 Cost += InlineConstants::ColdccPenalty; 1293 1294 // Check if we're done. This can happen due to bonuses and penalties. 1295 if (Cost > Threshold) 1296 return false; 1297 1298 if (F.empty()) 1299 return true; 1300 1301 Function *Caller = CS.getInstruction()->getParent()->getParent(); 1302 // Check if the caller function is recursive itself. 1303 for (User *U : Caller->users()) { 1304 CallSite Site(U); 1305 if (!Site) 1306 continue; 1307 Instruction *I = Site.getInstruction(); 1308 if (I->getParent()->getParent() == Caller) { 1309 IsCallerRecursive = true; 1310 break; 1311 } 1312 } 1313 1314 // Populate our simplified values by mapping from function arguments to call 1315 // arguments with known important simplifications. 1316 CallSite::arg_iterator CAI = CS.arg_begin(); 1317 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); 1318 FAI != FAE; ++FAI, ++CAI) { 1319 assert(CAI != CS.arg_end()); 1320 if (Constant *C = dyn_cast<Constant>(CAI)) 1321 SimplifiedValues[&*FAI] = C; 1322 1323 Value *PtrArg = *CAI; 1324 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 1325 ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue()); 1326 1327 // We can SROA any pointer arguments derived from alloca instructions. 1328 if (isa<AllocaInst>(PtrArg)) { 1329 SROAArgValues[&*FAI] = PtrArg; 1330 SROAArgCosts[PtrArg] = 0; 1331 } 1332 } 1333 } 1334 NumConstantArgs = SimplifiedValues.size(); 1335 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 1336 NumAllocaArgs = SROAArgValues.size(); 1337 1338 // FIXME: If a caller has multiple calls to a callee, we end up recomputing 1339 // the ephemeral values multiple times (and they're completely determined by 1340 // the callee, so this is purely duplicate work). 1341 SmallPtrSet<const Value *, 32> EphValues; 1342 CodeMetrics::collectEphemeralValues(&F, &GetAssumptionCache(F), EphValues); 1343 1344 // The worklist of live basic blocks in the callee *after* inlining. We avoid 1345 // adding basic blocks of the callee which can be proven to be dead for this 1346 // particular call site in order to get more accurate cost estimates. This 1347 // requires a somewhat heavyweight iteration pattern: we need to walk the 1348 // basic blocks in a breadth-first order as we insert live successors. To 1349 // accomplish this, prioritizing for small iterations because we exit after 1350 // crossing our threshold, we use a small-size optimized SetVector. 1351 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 1352 SmallPtrSet<BasicBlock *, 16>> 1353 BBSetVector; 1354 BBSetVector BBWorklist; 1355 BBWorklist.insert(&F.getEntryBlock()); 1356 // Note that we *must not* cache the size, this loop grows the worklist. 1357 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 1358 // Bail out the moment we cross the threshold. This means we'll under-count 1359 // the cost, but only when undercounting doesn't matter. 1360 if (Cost > Threshold) 1361 break; 1362 1363 BasicBlock *BB = BBWorklist[Idx]; 1364 if (BB->empty()) 1365 continue; 1366 1367 // Disallow inlining a blockaddress. A blockaddress only has defined 1368 // behavior for an indirect branch in the same function, and we do not 1369 // currently support inlining indirect branches. But, the inliner may not 1370 // see an indirect branch that ends up being dead code at a particular call 1371 // site. If the blockaddress escapes the function, e.g., via a global 1372 // variable, inlining may lead to an invalid cross-function reference. 1373 if (BB->hasAddressTaken()) 1374 return false; 1375 1376 // Analyze the cost of this block. If we blow through the threshold, this 1377 // returns false, and we can bail on out. 1378 if (!analyzeBlock(BB, EphValues)) 1379 return false; 1380 1381 TerminatorInst *TI = BB->getTerminator(); 1382 1383 // Add in the live successors by first checking whether we have terminator 1384 // that may be simplified based on the values simplified by this call. 1385 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1386 if (BI->isConditional()) { 1387 Value *Cond = BI->getCondition(); 1388 if (ConstantInt *SimpleCond = 1389 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1390 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); 1391 continue; 1392 } 1393 } 1394 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1395 Value *Cond = SI->getCondition(); 1396 if (ConstantInt *SimpleCond = 1397 dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1398 BBWorklist.insert(SI->findCaseValue(SimpleCond)->getCaseSuccessor()); 1399 continue; 1400 } 1401 } 1402 1403 // If we're unable to select a particular successor, just count all of 1404 // them. 1405 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 1406 ++TIdx) 1407 BBWorklist.insert(TI->getSuccessor(TIdx)); 1408 1409 // If we had any successors at this point, than post-inlining is likely to 1410 // have them as well. Note that we assume any basic blocks which existed 1411 // due to branches or switches which folded above will also fold after 1412 // inlining. 1413 if (SingleBB && TI->getNumSuccessors() > 1) { 1414 // Take off the bonus we applied to the threshold. 1415 Threshold -= SingleBBBonus; 1416 SingleBB = false; 1417 } 1418 } 1419 1420 // If this is a noduplicate call, we can still inline as long as 1421 // inlining this would cause the removal of the caller (so the instruction 1422 // is not actually duplicated, just moved). 1423 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 1424 return false; 1425 1426 // We applied the maximum possible vector bonus at the beginning. Now, 1427 // subtract the excess bonus, if any, from the Threshold before 1428 // comparing against Cost. 1429 if (NumVectorInstructions <= NumInstructions / 10) 1430 Threshold -= FiftyPercentVectorBonus; 1431 else if (NumVectorInstructions <= NumInstructions / 2) 1432 Threshold -= (FiftyPercentVectorBonus - TenPercentVectorBonus); 1433 1434 return Cost < std::max(1, Threshold); 1435 } 1436 1437 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1438 /// \brief Dump stats about this call's analysis. 1439 LLVM_DUMP_METHOD void CallAnalyzer::dump() { 1440 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n" 1441 DEBUG_PRINT_STAT(NumConstantArgs); 1442 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 1443 DEBUG_PRINT_STAT(NumAllocaArgs); 1444 DEBUG_PRINT_STAT(NumConstantPtrCmps); 1445 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 1446 DEBUG_PRINT_STAT(NumInstructionsSimplified); 1447 DEBUG_PRINT_STAT(NumInstructions); 1448 DEBUG_PRINT_STAT(SROACostSavings); 1449 DEBUG_PRINT_STAT(SROACostSavingsLost); 1450 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 1451 DEBUG_PRINT_STAT(Cost); 1452 DEBUG_PRINT_STAT(Threshold); 1453 #undef DEBUG_PRINT_STAT 1454 } 1455 #endif 1456 1457 /// \brief Test that there are no attribute conflicts between Caller and Callee 1458 /// that prevent inlining. 1459 static bool functionsHaveCompatibleAttributes(Function *Caller, 1460 Function *Callee, 1461 TargetTransformInfo &TTI) { 1462 return TTI.areInlineCompatible(Caller, Callee) && 1463 AttributeFuncs::areInlineCompatible(*Caller, *Callee); 1464 } 1465 1466 int llvm::getCallsiteCost(CallSite CS, const DataLayout &DL) { 1467 int Cost = 0; 1468 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { 1469 if (CS.isByValArgument(I)) { 1470 // We approximate the number of loads and stores needed by dividing the 1471 // size of the byval type by the target's pointer size. 1472 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); 1473 unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType()); 1474 unsigned PointerSize = DL.getPointerSizeInBits(); 1475 // Ceiling division. 1476 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 1477 1478 // If it generates more than 8 stores it is likely to be expanded as an 1479 // inline memcpy so we take that as an upper bound. Otherwise we assume 1480 // one load and one store per word copied. 1481 // FIXME: The maxStoresPerMemcpy setting from the target should be used 1482 // here instead of a magic number of 8, but it's not available via 1483 // DataLayout. 1484 NumStores = std::min(NumStores, 8U); 1485 1486 Cost += 2 * NumStores * InlineConstants::InstrCost; 1487 } else { 1488 // For non-byval arguments subtract off one instruction per call 1489 // argument. 1490 Cost += InlineConstants::InstrCost; 1491 } 1492 } 1493 // The call instruction also disappears after inlining. 1494 Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty; 1495 return Cost; 1496 } 1497 1498 InlineCost llvm::getInlineCost( 1499 CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI, 1500 std::function<AssumptionCache &(Function &)> &GetAssumptionCache, 1501 Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI, 1502 ProfileSummaryInfo *PSI) { 1503 return getInlineCost(CS, CS.getCalledFunction(), Params, CalleeTTI, 1504 GetAssumptionCache, GetBFI, PSI); 1505 } 1506 1507 InlineCost llvm::getInlineCost( 1508 CallSite CS, Function *Callee, const InlineParams &Params, 1509 TargetTransformInfo &CalleeTTI, 1510 std::function<AssumptionCache &(Function &)> &GetAssumptionCache, 1511 Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI, 1512 ProfileSummaryInfo *PSI) { 1513 1514 // Cannot inline indirect calls. 1515 if (!Callee) 1516 return llvm::InlineCost::getNever(); 1517 1518 // Calls to functions with always-inline attributes should be inlined 1519 // whenever possible. 1520 if (CS.hasFnAttr(Attribute::AlwaysInline)) { 1521 if (isInlineViable(*Callee)) 1522 return llvm::InlineCost::getAlways(); 1523 return llvm::InlineCost::getNever(); 1524 } 1525 1526 // Never inline functions with conflicting attributes (unless callee has 1527 // always-inline attribute). 1528 if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee, CalleeTTI)) 1529 return llvm::InlineCost::getNever(); 1530 1531 // Don't inline this call if the caller has the optnone attribute. 1532 if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone)) 1533 return llvm::InlineCost::getNever(); 1534 1535 // Don't inline functions which can be interposed at link-time. Don't inline 1536 // functions marked noinline or call sites marked noinline. 1537 // Note: inlining non-exact non-interposable functions is fine, since we know 1538 // we have *a* correct implementation of the source level function. 1539 if (Callee->isInterposable() || Callee->hasFnAttribute(Attribute::NoInline) || 1540 CS.isNoInline()) 1541 return llvm::InlineCost::getNever(); 1542 1543 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 1544 << "...\n"); 1545 1546 CallAnalyzer CA(CalleeTTI, GetAssumptionCache, GetBFI, PSI, *Callee, CS, 1547 Params); 1548 bool ShouldInline = CA.analyzeCall(CS); 1549 1550 DEBUG(CA.dump()); 1551 1552 // Check if there was a reason to force inlining or no inlining. 1553 if (!ShouldInline && CA.getCost() < CA.getThreshold()) 1554 return InlineCost::getNever(); 1555 if (ShouldInline && CA.getCost() >= CA.getThreshold()) 1556 return InlineCost::getAlways(); 1557 1558 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 1559 } 1560 1561 bool llvm::isInlineViable(Function &F) { 1562 bool ReturnsTwice = F.hasFnAttribute(Attribute::ReturnsTwice); 1563 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { 1564 // Disallow inlining of functions which contain indirect branches or 1565 // blockaddresses. 1566 if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken()) 1567 return false; 1568 1569 for (auto &II : *BI) { 1570 CallSite CS(&II); 1571 if (!CS) 1572 continue; 1573 1574 // Disallow recursive calls. 1575 if (&F == CS.getCalledFunction()) 1576 return false; 1577 1578 // Disallow calls which expose returns-twice to a function not previously 1579 // attributed as such. 1580 if (!ReturnsTwice && CS.isCall() && 1581 cast<CallInst>(CS.getInstruction())->canReturnTwice()) 1582 return false; 1583 1584 // Disallow inlining functions that call @llvm.localescape. Doing this 1585 // correctly would require major changes to the inliner. 1586 if (CS.getCalledFunction() && 1587 CS.getCalledFunction()->getIntrinsicID() == 1588 llvm::Intrinsic::localescape) 1589 return false; 1590 } 1591 } 1592 1593 return true; 1594 } 1595 1596 // APIs to create InlineParams based on command line flags and/or other 1597 // parameters. 1598 1599 InlineParams llvm::getInlineParams(int Threshold) { 1600 InlineParams Params; 1601 1602 // This field is the threshold to use for a callee by default. This is 1603 // derived from one or more of: 1604 // * optimization or size-optimization levels, 1605 // * a value passed to createFunctionInliningPass function, or 1606 // * the -inline-threshold flag. 1607 // If the -inline-threshold flag is explicitly specified, that is used 1608 // irrespective of anything else. 1609 if (InlineThreshold.getNumOccurrences() > 0) 1610 Params.DefaultThreshold = InlineThreshold; 1611 else 1612 Params.DefaultThreshold = Threshold; 1613 1614 // Set the HintThreshold knob from the -inlinehint-threshold. 1615 Params.HintThreshold = HintThreshold; 1616 1617 // Set the HotCallSiteThreshold knob from the -hot-callsite-threshold. 1618 Params.HotCallSiteThreshold = HotCallSiteThreshold; 1619 1620 // Set the ColdCallSiteThreshold knob from the -inline-cold-callsite-threshold. 1621 Params.ColdCallSiteThreshold = ColdCallSiteThreshold; 1622 1623 // Set the OptMinSizeThreshold and OptSizeThreshold params only if the 1624 // -inlinehint-threshold commandline option is not explicitly given. If that 1625 // option is present, then its value applies even for callees with size and 1626 // minsize attributes. 1627 // If the -inline-threshold is not specified, set the ColdThreshold from the 1628 // -inlinecold-threshold even if it is not explicitly passed. If 1629 // -inline-threshold is specified, then -inlinecold-threshold needs to be 1630 // explicitly specified to set the ColdThreshold knob 1631 if (InlineThreshold.getNumOccurrences() == 0) { 1632 Params.OptMinSizeThreshold = InlineConstants::OptMinSizeThreshold; 1633 Params.OptSizeThreshold = InlineConstants::OptSizeThreshold; 1634 Params.ColdThreshold = ColdThreshold; 1635 } else if (ColdThreshold.getNumOccurrences() > 0) { 1636 Params.ColdThreshold = ColdThreshold; 1637 } 1638 return Params; 1639 } 1640 1641 InlineParams llvm::getInlineParams() { 1642 return getInlineParams(InlineThreshold); 1643 } 1644 1645 // Compute the default threshold for inlining based on the opt level and the 1646 // size opt level. 1647 static int computeThresholdFromOptLevels(unsigned OptLevel, 1648 unsigned SizeOptLevel) { 1649 if (OptLevel > 2) 1650 return InlineConstants::OptAggressiveThreshold; 1651 if (SizeOptLevel == 1) // -Os 1652 return InlineConstants::OptSizeThreshold; 1653 if (SizeOptLevel == 2) // -Oz 1654 return InlineConstants::OptMinSizeThreshold; 1655 return InlineThreshold; 1656 } 1657 1658 InlineParams llvm::getInlineParams(unsigned OptLevel, unsigned SizeOptLevel) { 1659 return getInlineParams(computeThresholdFromOptLevels(OptLevel, SizeOptLevel)); 1660 } 1661