1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements inline cost analysis. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #define DEBUG_TYPE "inline-cost" 15 #include "llvm/Analysis/InlineCost.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SetVector.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/ConstantFolding.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/IR/CallingConv.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/GlobalAlias.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/Operator.h" 28 #include "llvm/InstVisitor.h" 29 #include "llvm/Support/CallSite.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/GetElementPtrTypeIterator.h" 32 #include "llvm/Support/raw_ostream.h" 33 34 using namespace llvm; 35 36 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed"); 37 38 namespace { 39 40 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> { 41 typedef InstVisitor<CallAnalyzer, bool> Base; 42 friend class InstVisitor<CallAnalyzer, bool>; 43 44 // DataLayout if available, or null. 45 const DataLayout *const TD; 46 47 // The called function. 48 Function &F; 49 50 int Threshold; 51 int Cost; 52 53 bool IsCallerRecursive; 54 bool IsRecursiveCall; 55 bool ExposesReturnsTwice; 56 bool HasDynamicAlloca; 57 bool ContainsNoDuplicateCall; 58 59 /// Number of bytes allocated statically by the callee. 60 uint64_t AllocatedSize; 61 unsigned NumInstructions, NumVectorInstructions; 62 int FiftyPercentVectorBonus, TenPercentVectorBonus; 63 int VectorBonus; 64 65 // While we walk the potentially-inlined instructions, we build up and 66 // maintain a mapping of simplified values specific to this callsite. The 67 // idea is to propagate any special information we have about arguments to 68 // this call through the inlinable section of the function, and account for 69 // likely simplifications post-inlining. The most important aspect we track 70 // is CFG altering simplifications -- when we prove a basic block dead, that 71 // can cause dramatic shifts in the cost of inlining a function. 72 DenseMap<Value *, Constant *> SimplifiedValues; 73 74 // Keep track of the values which map back (through function arguments) to 75 // allocas on the caller stack which could be simplified through SROA. 76 DenseMap<Value *, Value *> SROAArgValues; 77 78 // The mapping of caller Alloca values to their accumulated cost savings. If 79 // we have to disable SROA for one of the allocas, this tells us how much 80 // cost must be added. 81 DenseMap<Value *, int> SROAArgCosts; 82 83 // Keep track of values which map to a pointer base and constant offset. 84 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs; 85 86 // Custom simplification helper routines. 87 bool isAllocaDerivedArg(Value *V); 88 bool lookupSROAArgAndCost(Value *V, Value *&Arg, 89 DenseMap<Value *, int>::iterator &CostIt); 90 void disableSROA(DenseMap<Value *, int>::iterator CostIt); 91 void disableSROA(Value *V); 92 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 93 int InstructionCost); 94 bool handleSROACandidate(bool IsSROAValid, 95 DenseMap<Value *, int>::iterator CostIt, 96 int InstructionCost); 97 bool isGEPOffsetConstant(GetElementPtrInst &GEP); 98 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset); 99 bool simplifyCallSite(Function *F, CallSite CS); 100 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V); 101 102 // Custom analysis routines. 103 bool analyzeBlock(BasicBlock *BB); 104 105 // Disable several entry points to the visitor so we don't accidentally use 106 // them by declaring but not defining them here. 107 void visit(Module *); void visit(Module &); 108 void visit(Function *); void visit(Function &); 109 void visit(BasicBlock *); void visit(BasicBlock &); 110 111 // Provide base case for our instruction visit. 112 bool visitInstruction(Instruction &I); 113 114 // Our visit overrides. 115 bool visitAlloca(AllocaInst &I); 116 bool visitPHI(PHINode &I); 117 bool visitGetElementPtr(GetElementPtrInst &I); 118 bool visitBitCast(BitCastInst &I); 119 bool visitPtrToInt(PtrToIntInst &I); 120 bool visitIntToPtr(IntToPtrInst &I); 121 bool visitCastInst(CastInst &I); 122 bool visitUnaryInstruction(UnaryInstruction &I); 123 bool visitICmp(ICmpInst &I); 124 bool visitSub(BinaryOperator &I); 125 bool visitBinaryOperator(BinaryOperator &I); 126 bool visitLoad(LoadInst &I); 127 bool visitStore(StoreInst &I); 128 bool visitExtractValue(ExtractValueInst &I); 129 bool visitInsertValue(InsertValueInst &I); 130 bool visitCallSite(CallSite CS); 131 132 public: 133 CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold) 134 : TD(TD), F(Callee), Threshold(Threshold), Cost(0), 135 IsCallerRecursive(false), IsRecursiveCall(false), 136 ExposesReturnsTwice(false), HasDynamicAlloca(false), ContainsNoDuplicateCall(false), 137 AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0), 138 FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0), 139 NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), 140 NumConstantPtrCmps(0), NumConstantPtrDiffs(0), 141 NumInstructionsSimplified(0), SROACostSavings(0), SROACostSavingsLost(0) { 142 } 143 144 bool analyzeCall(CallSite CS); 145 146 int getThreshold() { return Threshold; } 147 int getCost() { return Cost; } 148 149 // Keep a bunch of stats about the cost savings found so we can print them 150 // out when debugging. 151 unsigned NumConstantArgs; 152 unsigned NumConstantOffsetPtrArgs; 153 unsigned NumAllocaArgs; 154 unsigned NumConstantPtrCmps; 155 unsigned NumConstantPtrDiffs; 156 unsigned NumInstructionsSimplified; 157 unsigned SROACostSavings; 158 unsigned SROACostSavingsLost; 159 160 void dump(); 161 }; 162 163 } // namespace 164 165 /// \brief Test whether the given value is an Alloca-derived function argument. 166 bool CallAnalyzer::isAllocaDerivedArg(Value *V) { 167 return SROAArgValues.count(V); 168 } 169 170 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to. 171 /// Returns false if V does not map to a SROA-candidate. 172 bool CallAnalyzer::lookupSROAArgAndCost( 173 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) { 174 if (SROAArgValues.empty() || SROAArgCosts.empty()) 175 return false; 176 177 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V); 178 if (ArgIt == SROAArgValues.end()) 179 return false; 180 181 Arg = ArgIt->second; 182 CostIt = SROAArgCosts.find(Arg); 183 return CostIt != SROAArgCosts.end(); 184 } 185 186 /// \brief Disable SROA for the candidate marked by this cost iterator. 187 /// 188 /// This marks the candidate as no longer viable for SROA, and adds the cost 189 /// savings associated with it back into the inline cost measurement. 190 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) { 191 // If we're no longer able to perform SROA we need to undo its cost savings 192 // and prevent subsequent analysis. 193 Cost += CostIt->second; 194 SROACostSavings -= CostIt->second; 195 SROACostSavingsLost += CostIt->second; 196 SROAArgCosts.erase(CostIt); 197 } 198 199 /// \brief If 'V' maps to a SROA candidate, disable SROA for it. 200 void CallAnalyzer::disableSROA(Value *V) { 201 Value *SROAArg; 202 DenseMap<Value *, int>::iterator CostIt; 203 if (lookupSROAArgAndCost(V, SROAArg, CostIt)) 204 disableSROA(CostIt); 205 } 206 207 /// \brief Accumulate the given cost for a particular SROA candidate. 208 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt, 209 int InstructionCost) { 210 CostIt->second += InstructionCost; 211 SROACostSavings += InstructionCost; 212 } 213 214 /// \brief Helper for the common pattern of handling a SROA candidate. 215 /// Either accumulates the cost savings if the SROA remains valid, or disables 216 /// SROA for the candidate. 217 bool CallAnalyzer::handleSROACandidate(bool IsSROAValid, 218 DenseMap<Value *, int>::iterator CostIt, 219 int InstructionCost) { 220 if (IsSROAValid) { 221 accumulateSROACost(CostIt, InstructionCost); 222 return true; 223 } 224 225 disableSROA(CostIt); 226 return false; 227 } 228 229 /// \brief Check whether a GEP's indices are all constant. 230 /// 231 /// Respects any simplified values known during the analysis of this callsite. 232 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) { 233 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I) 234 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I)) 235 return false; 236 237 return true; 238 } 239 240 /// \brief Accumulate a constant GEP offset into an APInt if possible. 241 /// 242 /// Returns false if unable to compute the offset for any reason. Respects any 243 /// simplified values known during the analysis of this callsite. 244 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { 245 if (!TD) 246 return false; 247 248 unsigned IntPtrWidth = TD->getPointerSizeInBits(); 249 assert(IntPtrWidth == Offset.getBitWidth()); 250 251 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 252 GTI != GTE; ++GTI) { 253 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand()); 254 if (!OpC) 255 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand())) 256 OpC = dyn_cast<ConstantInt>(SimpleOp); 257 if (!OpC) 258 return false; 259 if (OpC->isZero()) continue; 260 261 // Handle a struct index, which adds its field offset to the pointer. 262 if (StructType *STy = dyn_cast<StructType>(*GTI)) { 263 unsigned ElementIdx = OpC->getZExtValue(); 264 const StructLayout *SL = TD->getStructLayout(STy); 265 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); 266 continue; 267 } 268 269 APInt TypeSize(IntPtrWidth, TD->getTypeAllocSize(GTI.getIndexedType())); 270 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize; 271 } 272 return true; 273 } 274 275 bool CallAnalyzer::visitAlloca(AllocaInst &I) { 276 // FIXME: Check whether inlining will turn a dynamic alloca into a static 277 // alloca, and handle that case. 278 279 // Accumulate the allocated size. 280 if (I.isStaticAlloca()) { 281 Type *Ty = I.getAllocatedType(); 282 AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) : 283 Ty->getPrimitiveSizeInBits()); 284 } 285 286 // We will happily inline static alloca instructions. 287 if (I.isStaticAlloca()) 288 return Base::visitAlloca(I); 289 290 // FIXME: This is overly conservative. Dynamic allocas are inefficient for 291 // a variety of reasons, and so we would like to not inline them into 292 // functions which don't currently have a dynamic alloca. This simply 293 // disables inlining altogether in the presence of a dynamic alloca. 294 HasDynamicAlloca = true; 295 return false; 296 } 297 298 bool CallAnalyzer::visitPHI(PHINode &I) { 299 // FIXME: We should potentially be tracking values through phi nodes, 300 // especially when they collapse to a single value due to deleted CFG edges 301 // during inlining. 302 303 // FIXME: We need to propagate SROA *disabling* through phi nodes, even 304 // though we don't want to propagate it's bonuses. The idea is to disable 305 // SROA if it *might* be used in an inappropriate manner. 306 307 // Phi nodes are always zero-cost. 308 return true; 309 } 310 311 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) { 312 Value *SROAArg; 313 DenseMap<Value *, int>::iterator CostIt; 314 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(), 315 SROAArg, CostIt); 316 317 // Try to fold GEPs of constant-offset call site argument pointers. This 318 // requires target data and inbounds GEPs. 319 if (TD && I.isInBounds()) { 320 // Check if we have a base + offset for the pointer. 321 Value *Ptr = I.getPointerOperand(); 322 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr); 323 if (BaseAndOffset.first) { 324 // Check if the offset of this GEP is constant, and if so accumulate it 325 // into Offset. 326 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) { 327 // Non-constant GEPs aren't folded, and disable SROA. 328 if (SROACandidate) 329 disableSROA(CostIt); 330 return false; 331 } 332 333 // Add the result as a new mapping to Base + Offset. 334 ConstantOffsetPtrs[&I] = BaseAndOffset; 335 336 // Also handle SROA candidates here, we already know that the GEP is 337 // all-constant indexed. 338 if (SROACandidate) 339 SROAArgValues[&I] = SROAArg; 340 341 return true; 342 } 343 } 344 345 if (isGEPOffsetConstant(I)) { 346 if (SROACandidate) 347 SROAArgValues[&I] = SROAArg; 348 349 // Constant GEPs are modeled as free. 350 return true; 351 } 352 353 // Variable GEPs will require math and will disable SROA. 354 if (SROACandidate) 355 disableSROA(CostIt); 356 return false; 357 } 358 359 bool CallAnalyzer::visitBitCast(BitCastInst &I) { 360 // Propagate constants through bitcasts. 361 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 362 if (!COp) 363 COp = SimplifiedValues.lookup(I.getOperand(0)); 364 if (COp) 365 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) { 366 SimplifiedValues[&I] = C; 367 return true; 368 } 369 370 // Track base/offsets through casts 371 std::pair<Value *, APInt> BaseAndOffset 372 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 373 // Casts don't change the offset, just wrap it up. 374 if (BaseAndOffset.first) 375 ConstantOffsetPtrs[&I] = BaseAndOffset; 376 377 // Also look for SROA candidates here. 378 Value *SROAArg; 379 DenseMap<Value *, int>::iterator CostIt; 380 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 381 SROAArgValues[&I] = SROAArg; 382 383 // Bitcasts are always zero cost. 384 return true; 385 } 386 387 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) { 388 // Propagate constants through ptrtoint. 389 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 390 if (!COp) 391 COp = SimplifiedValues.lookup(I.getOperand(0)); 392 if (COp) 393 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) { 394 SimplifiedValues[&I] = C; 395 return true; 396 } 397 398 // Track base/offset pairs when converted to a plain integer provided the 399 // integer is large enough to represent the pointer. 400 unsigned IntegerSize = I.getType()->getScalarSizeInBits(); 401 if (TD && IntegerSize >= TD->getPointerSizeInBits()) { 402 std::pair<Value *, APInt> BaseAndOffset 403 = ConstantOffsetPtrs.lookup(I.getOperand(0)); 404 if (BaseAndOffset.first) 405 ConstantOffsetPtrs[&I] = BaseAndOffset; 406 } 407 408 // This is really weird. Technically, ptrtoint will disable SROA. However, 409 // unless that ptrtoint is *used* somewhere in the live basic blocks after 410 // inlining, it will be nuked, and SROA should proceed. All of the uses which 411 // would block SROA would also block SROA if applied directly to a pointer, 412 // and so we can just add the integer in here. The only places where SROA is 413 // preserved either cannot fire on an integer, or won't in-and-of themselves 414 // disable SROA (ext) w/o some later use that we would see and disable. 415 Value *SROAArg; 416 DenseMap<Value *, int>::iterator CostIt; 417 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) 418 SROAArgValues[&I] = SROAArg; 419 420 return isInstructionFree(&I, TD); 421 } 422 423 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) { 424 // Propagate constants through ptrtoint. 425 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 426 if (!COp) 427 COp = SimplifiedValues.lookup(I.getOperand(0)); 428 if (COp) 429 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) { 430 SimplifiedValues[&I] = C; 431 return true; 432 } 433 434 // Track base/offset pairs when round-tripped through a pointer without 435 // modifications provided the integer is not too large. 436 Value *Op = I.getOperand(0); 437 unsigned IntegerSize = Op->getType()->getScalarSizeInBits(); 438 if (TD && IntegerSize <= TD->getPointerSizeInBits()) { 439 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op); 440 if (BaseAndOffset.first) 441 ConstantOffsetPtrs[&I] = BaseAndOffset; 442 } 443 444 // "Propagate" SROA here in the same manner as we do for ptrtoint above. 445 Value *SROAArg; 446 DenseMap<Value *, int>::iterator CostIt; 447 if (lookupSROAArgAndCost(Op, SROAArg, CostIt)) 448 SROAArgValues[&I] = SROAArg; 449 450 return isInstructionFree(&I, TD); 451 } 452 453 bool CallAnalyzer::visitCastInst(CastInst &I) { 454 // Propagate constants through ptrtoint. 455 Constant *COp = dyn_cast<Constant>(I.getOperand(0)); 456 if (!COp) 457 COp = SimplifiedValues.lookup(I.getOperand(0)); 458 if (COp) 459 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) { 460 SimplifiedValues[&I] = C; 461 return true; 462 } 463 464 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere. 465 disableSROA(I.getOperand(0)); 466 467 return isInstructionFree(&I, TD); 468 } 469 470 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) { 471 Value *Operand = I.getOperand(0); 472 Constant *Ops[1] = { dyn_cast<Constant>(Operand) }; 473 if (Ops[0] || (Ops[0] = SimplifiedValues.lookup(Operand))) 474 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(), 475 Ops, TD)) { 476 SimplifiedValues[&I] = C; 477 return true; 478 } 479 480 // Disable any SROA on the argument to arbitrary unary operators. 481 disableSROA(Operand); 482 483 return false; 484 } 485 486 bool CallAnalyzer::visitICmp(ICmpInst &I) { 487 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 488 // First try to handle simplified comparisons. 489 if (!isa<Constant>(LHS)) 490 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 491 LHS = SimpleLHS; 492 if (!isa<Constant>(RHS)) 493 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 494 RHS = SimpleRHS; 495 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 496 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 497 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 498 SimplifiedValues[&I] = C; 499 return true; 500 } 501 502 // Otherwise look for a comparison between constant offset pointers with 503 // a common base. 504 Value *LHSBase, *RHSBase; 505 APInt LHSOffset, RHSOffset; 506 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 507 if (LHSBase) { 508 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 509 if (RHSBase && LHSBase == RHSBase) { 510 // We have common bases, fold the icmp to a constant based on the 511 // offsets. 512 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 513 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 514 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) { 515 SimplifiedValues[&I] = C; 516 ++NumConstantPtrCmps; 517 return true; 518 } 519 } 520 } 521 522 // If the comparison is an equality comparison with null, we can simplify it 523 // for any alloca-derived argument. 524 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1))) 525 if (isAllocaDerivedArg(I.getOperand(0))) { 526 // We can actually predict the result of comparisons between an 527 // alloca-derived value and null. Note that this fires regardless of 528 // SROA firing. 529 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE; 530 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType()) 531 : ConstantInt::getFalse(I.getType()); 532 return true; 533 } 534 535 // Finally check for SROA candidates in comparisons. 536 Value *SROAArg; 537 DenseMap<Value *, int>::iterator CostIt; 538 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 539 if (isa<ConstantPointerNull>(I.getOperand(1))) { 540 accumulateSROACost(CostIt, InlineConstants::InstrCost); 541 return true; 542 } 543 544 disableSROA(CostIt); 545 } 546 547 return false; 548 } 549 550 bool CallAnalyzer::visitSub(BinaryOperator &I) { 551 // Try to handle a special case: we can fold computing the difference of two 552 // constant-related pointers. 553 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 554 Value *LHSBase, *RHSBase; 555 APInt LHSOffset, RHSOffset; 556 llvm::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS); 557 if (LHSBase) { 558 llvm::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS); 559 if (RHSBase && LHSBase == RHSBase) { 560 // We have common bases, fold the subtract to a constant based on the 561 // offsets. 562 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset); 563 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset); 564 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) { 565 SimplifiedValues[&I] = C; 566 ++NumConstantPtrDiffs; 567 return true; 568 } 569 } 570 } 571 572 // Otherwise, fall back to the generic logic for simplifying and handling 573 // instructions. 574 return Base::visitSub(I); 575 } 576 577 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) { 578 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 579 if (!isa<Constant>(LHS)) 580 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS)) 581 LHS = SimpleLHS; 582 if (!isa<Constant>(RHS)) 583 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS)) 584 RHS = SimpleRHS; 585 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, TD); 586 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) { 587 SimplifiedValues[&I] = C; 588 return true; 589 } 590 591 // Disable any SROA on arguments to arbitrary, unsimplified binary operators. 592 disableSROA(LHS); 593 disableSROA(RHS); 594 595 return false; 596 } 597 598 bool CallAnalyzer::visitLoad(LoadInst &I) { 599 Value *SROAArg; 600 DenseMap<Value *, int>::iterator CostIt; 601 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 602 if (I.isSimple()) { 603 accumulateSROACost(CostIt, InlineConstants::InstrCost); 604 return true; 605 } 606 607 disableSROA(CostIt); 608 } 609 610 return false; 611 } 612 613 bool CallAnalyzer::visitStore(StoreInst &I) { 614 Value *SROAArg; 615 DenseMap<Value *, int>::iterator CostIt; 616 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) { 617 if (I.isSimple()) { 618 accumulateSROACost(CostIt, InlineConstants::InstrCost); 619 return true; 620 } 621 622 disableSROA(CostIt); 623 } 624 625 return false; 626 } 627 628 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) { 629 // Constant folding for extract value is trivial. 630 Constant *C = dyn_cast<Constant>(I.getAggregateOperand()); 631 if (!C) 632 C = SimplifiedValues.lookup(I.getAggregateOperand()); 633 if (C) { 634 SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices()); 635 return true; 636 } 637 638 // SROA can look through these but give them a cost. 639 return false; 640 } 641 642 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) { 643 // Constant folding for insert value is trivial. 644 Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand()); 645 if (!AggC) 646 AggC = SimplifiedValues.lookup(I.getAggregateOperand()); 647 Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand()); 648 if (!InsertedC) 649 InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand()); 650 if (AggC && InsertedC) { 651 SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC, 652 I.getIndices()); 653 return true; 654 } 655 656 // SROA can look through these but give them a cost. 657 return false; 658 } 659 660 /// \brief Try to simplify a call site. 661 /// 662 /// Takes a concrete function and callsite and tries to actually simplify it by 663 /// analyzing the arguments and call itself with instsimplify. Returns true if 664 /// it has simplified the callsite to some other entity (a constant), making it 665 /// free. 666 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { 667 // FIXME: Using the instsimplify logic directly for this is inefficient 668 // because we have to continually rebuild the argument list even when no 669 // simplifications can be performed. Until that is fixed with remapping 670 // inside of instsimplify, directly constant fold calls here. 671 if (!canConstantFoldCallTo(F)) 672 return false; 673 674 // Try to re-map the arguments to constants. 675 SmallVector<Constant *, 4> ConstantArgs; 676 ConstantArgs.reserve(CS.arg_size()); 677 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); 678 I != E; ++I) { 679 Constant *C = dyn_cast<Constant>(*I); 680 if (!C) 681 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I)); 682 if (!C) 683 return false; // This argument doesn't map to a constant. 684 685 ConstantArgs.push_back(C); 686 } 687 if (Constant *C = ConstantFoldCall(F, ConstantArgs)) { 688 SimplifiedValues[CS.getInstruction()] = C; 689 return true; 690 } 691 692 return false; 693 } 694 695 bool CallAnalyzer::visitCallSite(CallSite CS) { 696 if (CS.isCall() && cast<CallInst>(CS.getInstruction())->canReturnTwice() && 697 !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 698 Attribute::ReturnsTwice)) { 699 // This aborts the entire analysis. 700 ExposesReturnsTwice = true; 701 return false; 702 } 703 if (CS.isCall() && 704 cast<CallInst>(CS.getInstruction())->hasFnAttr(Attribute::NoDuplicate)) 705 ContainsNoDuplicateCall = true; 706 707 if (Function *F = CS.getCalledFunction()) { 708 // When we have a concrete function, first try to simplify it directly. 709 if (simplifyCallSite(F, CS)) 710 return true; 711 712 // Next check if it is an intrinsic we know about. 713 // FIXME: Lift this into part of the InstVisitor. 714 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { 715 switch (II->getIntrinsicID()) { 716 default: 717 return Base::visitCallSite(CS); 718 719 case Intrinsic::memset: 720 case Intrinsic::memcpy: 721 case Intrinsic::memmove: 722 // SROA can usually chew through these intrinsics, but they aren't free. 723 return false; 724 } 725 } 726 727 if (F == CS.getInstruction()->getParent()->getParent()) { 728 // This flag will fully abort the analysis, so don't bother with anything 729 // else. 730 IsRecursiveCall = true; 731 return false; 732 } 733 734 if (!callIsSmall(CS)) { 735 // We account for the average 1 instruction per call argument setup 736 // here. 737 Cost += CS.arg_size() * InlineConstants::InstrCost; 738 739 // Everything other than inline ASM will also have a significant cost 740 // merely from making the call. 741 if (!isa<InlineAsm>(CS.getCalledValue())) 742 Cost += InlineConstants::CallPenalty; 743 } 744 745 return Base::visitCallSite(CS); 746 } 747 748 // Otherwise we're in a very special case -- an indirect function call. See 749 // if we can be particularly clever about this. 750 Value *Callee = CS.getCalledValue(); 751 752 // First, pay the price of the argument setup. We account for the average 753 // 1 instruction per call argument setup here. 754 Cost += CS.arg_size() * InlineConstants::InstrCost; 755 756 // Next, check if this happens to be an indirect function call to a known 757 // function in this inline context. If not, we've done all we can. 758 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee)); 759 if (!F) 760 return Base::visitCallSite(CS); 761 762 // If we have a constant that we are calling as a function, we can peer 763 // through it and see the function target. This happens not infrequently 764 // during devirtualization and so we want to give it a hefty bonus for 765 // inlining, but cap that bonus in the event that inlining wouldn't pan 766 // out. Pretend to inline the function, with a custom threshold. 767 CallAnalyzer CA(TD, *F, InlineConstants::IndirectCallThreshold); 768 if (CA.analyzeCall(CS)) { 769 // We were able to inline the indirect call! Subtract the cost from the 770 // bonus we want to apply, but don't go below zero. 771 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost()); 772 } 773 774 return Base::visitCallSite(CS); 775 } 776 777 bool CallAnalyzer::visitInstruction(Instruction &I) { 778 // Some instructions are free. All of the free intrinsics can also be 779 // handled by SROA, etc. 780 if (isInstructionFree(&I, TD)) 781 return true; 782 783 // We found something we don't understand or can't handle. Mark any SROA-able 784 // values in the operand list as no longer viable. 785 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI) 786 disableSROA(*OI); 787 788 return false; 789 } 790 791 792 /// \brief Analyze a basic block for its contribution to the inline cost. 793 /// 794 /// This method walks the analyzer over every instruction in the given basic 795 /// block and accounts for their cost during inlining at this callsite. It 796 /// aborts early if the threshold has been exceeded or an impossible to inline 797 /// construct has been detected. It returns false if inlining is no longer 798 /// viable, and true if inlining remains viable. 799 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) { 800 for (BasicBlock::iterator I = BB->begin(), E = llvm::prior(BB->end()); 801 I != E; ++I) { 802 ++NumInstructions; 803 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy()) 804 ++NumVectorInstructions; 805 806 // If the instruction simplified to a constant, there is no cost to this 807 // instruction. Visit the instructions using our InstVisitor to account for 808 // all of the per-instruction logic. The visit tree returns true if we 809 // consumed the instruction in any way, and false if the instruction's base 810 // cost should count against inlining. 811 if (Base::visit(I)) 812 ++NumInstructionsSimplified; 813 else 814 Cost += InlineConstants::InstrCost; 815 816 // If the visit this instruction detected an uninlinable pattern, abort. 817 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) 818 return false; 819 820 // If the caller is a recursive function then we don't want to inline 821 // functions which allocate a lot of stack space because it would increase 822 // the caller stack usage dramatically. 823 if (IsCallerRecursive && 824 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 825 return false; 826 827 if (NumVectorInstructions > NumInstructions/2) 828 VectorBonus = FiftyPercentVectorBonus; 829 else if (NumVectorInstructions > NumInstructions/10) 830 VectorBonus = TenPercentVectorBonus; 831 else 832 VectorBonus = 0; 833 834 // Check if we've past the threshold so we don't spin in huge basic 835 // blocks that will never inline. 836 if (Cost > (Threshold + VectorBonus)) 837 return false; 838 } 839 840 return true; 841 } 842 843 /// \brief Compute the base pointer and cumulative constant offsets for V. 844 /// 845 /// This strips all constant offsets off of V, leaving it the base pointer, and 846 /// accumulates the total constant offset applied in the returned constant. It 847 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 848 /// no constant offsets applied. 849 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) { 850 if (!TD || !V->getType()->isPointerTy()) 851 return 0; 852 853 unsigned IntPtrWidth = TD->getPointerSizeInBits(); 854 APInt Offset = APInt::getNullValue(IntPtrWidth); 855 856 // Even though we don't look through PHI nodes, we could be called on an 857 // instruction in an unreachable block, which may be on a cycle. 858 SmallPtrSet<Value *, 4> Visited; 859 Visited.insert(V); 860 do { 861 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 862 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset)) 863 return 0; 864 V = GEP->getPointerOperand(); 865 } else if (Operator::getOpcode(V) == Instruction::BitCast) { 866 V = cast<Operator>(V)->getOperand(0); 867 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 868 if (GA->mayBeOverridden()) 869 break; 870 V = GA->getAliasee(); 871 } else { 872 break; 873 } 874 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 875 } while (Visited.insert(V)); 876 877 Type *IntPtrTy = TD->getIntPtrType(V->getContext()); 878 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset)); 879 } 880 881 /// \brief Analyze a call site for potential inlining. 882 /// 883 /// Returns true if inlining this call is viable, and false if it is not 884 /// viable. It computes the cost and adjusts the threshold based on numerous 885 /// factors and heuristics. If this method returns false but the computed cost 886 /// is below the computed threshold, then inlining was forcibly disabled by 887 /// some artifact of the routine. 888 bool CallAnalyzer::analyzeCall(CallSite CS) { 889 ++NumCallsAnalyzed; 890 891 // Track whether the post-inlining function would have more than one basic 892 // block. A single basic block is often intended for inlining. Balloon the 893 // threshold by 50% until we pass the single-BB phase. 894 bool SingleBB = true; 895 int SingleBBBonus = Threshold / 2; 896 Threshold += SingleBBBonus; 897 898 // Perform some tweaks to the cost and threshold based on the direct 899 // callsite information. 900 901 // We want to more aggressively inline vector-dense kernels, so up the 902 // threshold, and we'll lower it if the % of vector instructions gets too 903 // low. 904 assert(NumInstructions == 0); 905 assert(NumVectorInstructions == 0); 906 FiftyPercentVectorBonus = Threshold; 907 TenPercentVectorBonus = Threshold / 2; 908 909 // Give out bonuses per argument, as the instructions setting them up will 910 // be gone after inlining. 911 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) { 912 if (TD && CS.isByValArgument(I)) { 913 // We approximate the number of loads and stores needed by dividing the 914 // size of the byval type by the target's pointer size. 915 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType()); 916 unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType()); 917 unsigned PointerSize = TD->getPointerSizeInBits(); 918 // Ceiling division. 919 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize; 920 921 // If it generates more than 8 stores it is likely to be expanded as an 922 // inline memcpy so we take that as an upper bound. Otherwise we assume 923 // one load and one store per word copied. 924 // FIXME: The maxStoresPerMemcpy setting from the target should be used 925 // here instead of a magic number of 8, but it's not available via 926 // DataLayout. 927 NumStores = std::min(NumStores, 8U); 928 929 Cost -= 2 * NumStores * InlineConstants::InstrCost; 930 } else { 931 // For non-byval arguments subtract off one instruction per call 932 // argument. 933 Cost -= InlineConstants::InstrCost; 934 } 935 } 936 937 // If there is only one call of the function, and it has internal linkage, 938 // the cost of inlining it drops dramatically. 939 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() && 940 &F == CS.getCalledFunction(); 941 if (OnlyOneCallAndLocalLinkage) 942 Cost += InlineConstants::LastCallToStaticBonus; 943 944 // If the instruction after the call, or if the normal destination of the 945 // invoke is an unreachable instruction, the function is noreturn. As such, 946 // there is little point in inlining this unless there is literally zero 947 // cost. 948 Instruction *Instr = CS.getInstruction(); 949 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) { 950 if (isa<UnreachableInst>(II->getNormalDest()->begin())) 951 Threshold = 1; 952 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr))) 953 Threshold = 1; 954 955 // If this function uses the coldcc calling convention, prefer not to inline 956 // it. 957 if (F.getCallingConv() == CallingConv::Cold) 958 Cost += InlineConstants::ColdccPenalty; 959 960 // Check if we're done. This can happen due to bonuses and penalties. 961 if (Cost > Threshold) 962 return false; 963 964 if (F.empty()) 965 return true; 966 967 Function *Caller = CS.getInstruction()->getParent()->getParent(); 968 // Check if the caller function is recursive itself. 969 for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end(); 970 U != E; ++U) { 971 CallSite Site(cast<Value>(*U)); 972 if (!Site) 973 continue; 974 Instruction *I = Site.getInstruction(); 975 if (I->getParent()->getParent() == Caller) { 976 IsCallerRecursive = true; 977 break; 978 } 979 } 980 981 // Track whether we've seen a return instruction. The first return 982 // instruction is free, as at least one will usually disappear in inlining. 983 bool HasReturn = false; 984 985 // Populate our simplified values by mapping from function arguments to call 986 // arguments with known important simplifications. 987 CallSite::arg_iterator CAI = CS.arg_begin(); 988 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end(); 989 FAI != FAE; ++FAI, ++CAI) { 990 assert(CAI != CS.arg_end()); 991 if (Constant *C = dyn_cast<Constant>(CAI)) 992 SimplifiedValues[FAI] = C; 993 994 Value *PtrArg = *CAI; 995 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) { 996 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue()); 997 998 // We can SROA any pointer arguments derived from alloca instructions. 999 if (isa<AllocaInst>(PtrArg)) { 1000 SROAArgValues[FAI] = PtrArg; 1001 SROAArgCosts[PtrArg] = 0; 1002 } 1003 } 1004 } 1005 NumConstantArgs = SimplifiedValues.size(); 1006 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size(); 1007 NumAllocaArgs = SROAArgValues.size(); 1008 1009 // The worklist of live basic blocks in the callee *after* inlining. We avoid 1010 // adding basic blocks of the callee which can be proven to be dead for this 1011 // particular call site in order to get more accurate cost estimates. This 1012 // requires a somewhat heavyweight iteration pattern: we need to walk the 1013 // basic blocks in a breadth-first order as we insert live successors. To 1014 // accomplish this, prioritizing for small iterations because we exit after 1015 // crossing our threshold, we use a small-size optimized SetVector. 1016 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>, 1017 SmallPtrSet<BasicBlock *, 16> > BBSetVector; 1018 BBSetVector BBWorklist; 1019 BBWorklist.insert(&F.getEntryBlock()); 1020 // Note that we *must not* cache the size, this loop grows the worklist. 1021 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) { 1022 // Bail out the moment we cross the threshold. This means we'll under-count 1023 // the cost, but only when undercounting doesn't matter. 1024 if (Cost > (Threshold + VectorBonus)) 1025 break; 1026 1027 BasicBlock *BB = BBWorklist[Idx]; 1028 if (BB->empty()) 1029 continue; 1030 1031 // Handle the terminator cost here where we can track returns and other 1032 // function-wide constructs. 1033 TerminatorInst *TI = BB->getTerminator(); 1034 1035 // We never want to inline functions that contain an indirectbr. This is 1036 // incorrect because all the blockaddress's (in static global initializers 1037 // for example) would be referring to the original function, and this 1038 // indirect jump would jump from the inlined copy of the function into the 1039 // original function which is extremely undefined behavior. 1040 // FIXME: This logic isn't really right; we can safely inline functions 1041 // with indirectbr's as long as no other function or global references the 1042 // blockaddress of a block within the current function. And as a QOI issue, 1043 // if someone is using a blockaddress without an indirectbr, and that 1044 // reference somehow ends up in another function or global, we probably 1045 // don't want to inline this function. 1046 if (isa<IndirectBrInst>(TI)) 1047 return false; 1048 1049 if (!HasReturn && isa<ReturnInst>(TI)) 1050 HasReturn = true; 1051 else 1052 Cost += InlineConstants::InstrCost; 1053 1054 // Analyze the cost of this block. If we blow through the threshold, this 1055 // returns false, and we can bail on out. 1056 if (!analyzeBlock(BB)) { 1057 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) 1058 return false; 1059 1060 // If the caller is a recursive function then we don't want to inline 1061 // functions which allocate a lot of stack space because it would increase 1062 // the caller stack usage dramatically. 1063 if (IsCallerRecursive && 1064 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) 1065 return false; 1066 1067 break; 1068 } 1069 1070 // Add in the live successors by first checking whether we have terminator 1071 // that may be simplified based on the values simplified by this call. 1072 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1073 if (BI->isConditional()) { 1074 Value *Cond = BI->getCondition(); 1075 if (ConstantInt *SimpleCond 1076 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1077 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0)); 1078 continue; 1079 } 1080 } 1081 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1082 Value *Cond = SI->getCondition(); 1083 if (ConstantInt *SimpleCond 1084 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) { 1085 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor()); 1086 continue; 1087 } 1088 } 1089 1090 // If we're unable to select a particular successor, just count all of 1091 // them. 1092 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; 1093 ++TIdx) 1094 BBWorklist.insert(TI->getSuccessor(TIdx)); 1095 1096 // If we had any successors at this point, than post-inlining is likely to 1097 // have them as well. Note that we assume any basic blocks which existed 1098 // due to branches or switches which folded above will also fold after 1099 // inlining. 1100 if (SingleBB && TI->getNumSuccessors() > 1) { 1101 // Take off the bonus we applied to the threshold. 1102 Threshold -= SingleBBBonus; 1103 SingleBB = false; 1104 } 1105 } 1106 1107 // If this is a noduplicate call, we can still inline as long as 1108 // inlining this would cause the removal of the caller (so the instruction 1109 // is not actually duplicated, just moved). 1110 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall) 1111 return false; 1112 1113 Threshold += VectorBonus; 1114 1115 return Cost < Threshold; 1116 } 1117 1118 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1119 /// \brief Dump stats about this call's analysis. 1120 void CallAnalyzer::dump() { 1121 #define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n" 1122 DEBUG_PRINT_STAT(NumConstantArgs); 1123 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs); 1124 DEBUG_PRINT_STAT(NumAllocaArgs); 1125 DEBUG_PRINT_STAT(NumConstantPtrCmps); 1126 DEBUG_PRINT_STAT(NumConstantPtrDiffs); 1127 DEBUG_PRINT_STAT(NumInstructionsSimplified); 1128 DEBUG_PRINT_STAT(SROACostSavings); 1129 DEBUG_PRINT_STAT(SROACostSavingsLost); 1130 DEBUG_PRINT_STAT(ContainsNoDuplicateCall); 1131 #undef DEBUG_PRINT_STAT 1132 } 1133 #endif 1134 1135 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) { 1136 return getInlineCost(CS, CS.getCalledFunction(), Threshold); 1137 } 1138 1139 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, 1140 int Threshold) { 1141 // Cannot inline indirect calls. 1142 if (!Callee) 1143 return llvm::InlineCost::getNever(); 1144 1145 // Calls to functions with always-inline attributes should be inlined 1146 // whenever possible. 1147 if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1148 Attribute::AlwaysInline)) { 1149 if (isInlineViable(*Callee)) 1150 return llvm::InlineCost::getAlways(); 1151 return llvm::InlineCost::getNever(); 1152 } 1153 1154 // Don't inline functions which can be redefined at link-time to mean 1155 // something else. Don't inline functions marked noinline or call sites 1156 // marked noinline. 1157 if (Callee->mayBeOverridden() || 1158 Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1159 Attribute::NoInline) || 1160 CS.isNoInline()) 1161 return llvm::InlineCost::getNever(); 1162 1163 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() 1164 << "...\n"); 1165 1166 CallAnalyzer CA(TD, *Callee, Threshold); 1167 bool ShouldInline = CA.analyzeCall(CS); 1168 1169 DEBUG(CA.dump()); 1170 1171 // Check if there was a reason to force inlining or no inlining. 1172 if (!ShouldInline && CA.getCost() < CA.getThreshold()) 1173 return InlineCost::getNever(); 1174 if (ShouldInline && CA.getCost() >= CA.getThreshold()) 1175 return InlineCost::getAlways(); 1176 1177 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); 1178 } 1179 1180 bool InlineCostAnalyzer::isInlineViable(Function &F) { 1181 bool ReturnsTwice =F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, 1182 Attribute::ReturnsTwice); 1183 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { 1184 // Disallow inlining of functions which contain an indirect branch. 1185 if (isa<IndirectBrInst>(BI->getTerminator())) 1186 return false; 1187 1188 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; 1189 ++II) { 1190 CallSite CS(II); 1191 if (!CS) 1192 continue; 1193 1194 // Disallow recursive calls. 1195 if (&F == CS.getCalledFunction()) 1196 return false; 1197 1198 // Disallow calls which expose returns-twice to a function not previously 1199 // attributed as such. 1200 if (!ReturnsTwice && CS.isCall() && 1201 cast<CallInst>(CS.getInstruction())->canReturnTwice()) 1202 return false; 1203 } 1204 } 1205 1206 return true; 1207 } 1208