1 //===- llvm/Analysis/TargetTransformInfo.cpp ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #define DEBUG_TYPE "tti" 11 #include "llvm/Analysis/TargetTransformInfo.h" 12 #include "llvm/IR/DataLayout.h" 13 #include "llvm/IR/Operator.h" 14 #include "llvm/IR/Instruction.h" 15 #include "llvm/IR/IntrinsicInst.h" 16 #include "llvm/IR/Instructions.h" 17 #include "llvm/Support/CallSite.h" 18 #include "llvm/Support/ErrorHandling.h" 19 20 using namespace llvm; 21 22 // Setup the analysis group to manage the TargetTransformInfo passes. 23 INITIALIZE_ANALYSIS_GROUP(TargetTransformInfo, "Target Information", NoTTI) 24 char TargetTransformInfo::ID = 0; 25 26 TargetTransformInfo::~TargetTransformInfo() { 27 } 28 29 void TargetTransformInfo::pushTTIStack(Pass *P) { 30 TopTTI = this; 31 PrevTTI = &P->getAnalysis<TargetTransformInfo>(); 32 33 // Walk up the chain and update the top TTI pointer. 34 for (TargetTransformInfo *PTTI = PrevTTI; PTTI; PTTI = PTTI->PrevTTI) 35 PTTI->TopTTI = this; 36 } 37 38 void TargetTransformInfo::popTTIStack() { 39 TopTTI = 0; 40 41 // Walk up the chain and update the top TTI pointer. 42 for (TargetTransformInfo *PTTI = PrevTTI; PTTI; PTTI = PTTI->PrevTTI) 43 PTTI->TopTTI = PrevTTI; 44 45 PrevTTI = 0; 46 } 47 48 void TargetTransformInfo::getAnalysisUsage(AnalysisUsage &AU) const { 49 AU.addRequired<TargetTransformInfo>(); 50 } 51 52 unsigned TargetTransformInfo::getOperationCost(unsigned Opcode, Type *Ty, 53 Type *OpTy) const { 54 return PrevTTI->getOperationCost(Opcode, Ty, OpTy); 55 } 56 57 unsigned TargetTransformInfo::getGEPCost( 58 const Value *Ptr, ArrayRef<const Value *> Operands) const { 59 return PrevTTI->getGEPCost(Ptr, Operands); 60 } 61 62 unsigned TargetTransformInfo::getCallCost(FunctionType *FTy, 63 int NumArgs) const { 64 return PrevTTI->getCallCost(FTy, NumArgs); 65 } 66 67 unsigned TargetTransformInfo::getCallCost(const Function *F, 68 int NumArgs) const { 69 return PrevTTI->getCallCost(F, NumArgs); 70 } 71 72 unsigned TargetTransformInfo::getCallCost( 73 const Function *F, ArrayRef<const Value *> Arguments) const { 74 return PrevTTI->getCallCost(F, Arguments); 75 } 76 77 unsigned TargetTransformInfo::getIntrinsicCost( 78 Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> ParamTys) const { 79 return PrevTTI->getIntrinsicCost(IID, RetTy, ParamTys); 80 } 81 82 unsigned TargetTransformInfo::getIntrinsicCost( 83 Intrinsic::ID IID, Type *RetTy, ArrayRef<const Value *> Arguments) const { 84 return PrevTTI->getIntrinsicCost(IID, RetTy, Arguments); 85 } 86 87 unsigned TargetTransformInfo::getUserCost(const User *U) const { 88 return PrevTTI->getUserCost(U); 89 } 90 91 bool TargetTransformInfo::isLoweredToCall(const Function *F) const { 92 return PrevTTI->isLoweredToCall(F); 93 } 94 95 bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const { 96 return PrevTTI->isLegalAddImmediate(Imm); 97 } 98 99 bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const { 100 return PrevTTI->isLegalICmpImmediate(Imm); 101 } 102 103 bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 104 int64_t BaseOffset, 105 bool HasBaseReg, 106 int64_t Scale) const { 107 return PrevTTI->isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg, 108 Scale); 109 } 110 111 bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const { 112 return PrevTTI->isTruncateFree(Ty1, Ty2); 113 } 114 115 bool TargetTransformInfo::isTypeLegal(Type *Ty) const { 116 return PrevTTI->isTypeLegal(Ty); 117 } 118 119 unsigned TargetTransformInfo::getJumpBufAlignment() const { 120 return PrevTTI->getJumpBufAlignment(); 121 } 122 123 unsigned TargetTransformInfo::getJumpBufSize() const { 124 return PrevTTI->getJumpBufSize(); 125 } 126 127 bool TargetTransformInfo::shouldBuildLookupTables() const { 128 return PrevTTI->shouldBuildLookupTables(); 129 } 130 131 TargetTransformInfo::PopcntSupportKind 132 TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const { 133 return PrevTTI->getPopcntSupport(IntTyWidthInBit); 134 } 135 136 unsigned TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty) const { 137 return PrevTTI->getIntImmCost(Imm, Ty); 138 } 139 140 unsigned TargetTransformInfo::getNumberOfRegisters(bool Vector) const { 141 return PrevTTI->getNumberOfRegisters(Vector); 142 } 143 144 unsigned TargetTransformInfo::getRegisterBitWidth(bool Vector) const { 145 return PrevTTI->getRegisterBitWidth(Vector); 146 } 147 148 unsigned TargetTransformInfo::getMaximumUnrollFactor() const { 149 return PrevTTI->getMaximumUnrollFactor(); 150 } 151 152 unsigned TargetTransformInfo::getArithmeticInstrCost(unsigned Opcode, 153 Type *Ty, 154 OperandValueKind Op1Info, 155 OperandValueKind Op2Info) const { 156 return PrevTTI->getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 157 } 158 159 unsigned TargetTransformInfo::getShuffleCost(ShuffleKind Kind, Type *Tp, 160 int Index, Type *SubTp) const { 161 return PrevTTI->getShuffleCost(Kind, Tp, Index, SubTp); 162 } 163 164 unsigned TargetTransformInfo::getCastInstrCost(unsigned Opcode, Type *Dst, 165 Type *Src) const { 166 return PrevTTI->getCastInstrCost(Opcode, Dst, Src); 167 } 168 169 unsigned TargetTransformInfo::getCFInstrCost(unsigned Opcode) const { 170 return PrevTTI->getCFInstrCost(Opcode); 171 } 172 173 unsigned TargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 174 Type *CondTy) const { 175 return PrevTTI->getCmpSelInstrCost(Opcode, ValTy, CondTy); 176 } 177 178 unsigned TargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val, 179 unsigned Index) const { 180 return PrevTTI->getVectorInstrCost(Opcode, Val, Index); 181 } 182 183 unsigned TargetTransformInfo::getMemoryOpCost(unsigned Opcode, Type *Src, 184 unsigned Alignment, 185 unsigned AddressSpace) const { 186 return PrevTTI->getMemoryOpCost(Opcode, Src, Alignment, AddressSpace); 187 ; 188 } 189 190 unsigned 191 TargetTransformInfo::getIntrinsicInstrCost(Intrinsic::ID ID, 192 Type *RetTy, 193 ArrayRef<Type *> Tys) const { 194 return PrevTTI->getIntrinsicInstrCost(ID, RetTy, Tys); 195 } 196 197 unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const { 198 return PrevTTI->getNumberOfParts(Tp); 199 } 200 201 unsigned TargetTransformInfo::getAddressComputationCost(Type *Tp) const { 202 return PrevTTI->getAddressComputationCost(Tp); 203 } 204 205 namespace { 206 207 struct NoTTI : ImmutablePass, TargetTransformInfo { 208 const DataLayout *DL; 209 210 NoTTI() : ImmutablePass(ID), DL(0) { 211 initializeNoTTIPass(*PassRegistry::getPassRegistry()); 212 } 213 214 virtual void initializePass() { 215 // Note that this subclass is special, and must *not* call initializeTTI as 216 // it does not chain. 217 TopTTI = this; 218 PrevTTI = 0; 219 DL = getAnalysisIfAvailable<DataLayout>(); 220 } 221 222 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 223 // Note that this subclass is special, and must *not* call 224 // TTI::getAnalysisUsage as it breaks the recursion. 225 } 226 227 /// Pass identification. 228 static char ID; 229 230 /// Provide necessary pointer adjustments for the two base classes. 231 virtual void *getAdjustedAnalysisPointer(const void *ID) { 232 if (ID == &TargetTransformInfo::ID) 233 return (TargetTransformInfo*)this; 234 return this; 235 } 236 237 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) const { 238 switch (Opcode) { 239 default: 240 // By default, just classify everything as 'basic'. 241 return TCC_Basic; 242 243 case Instruction::GetElementPtr: 244 llvm_unreachable("Use getGEPCost for GEP operations!"); 245 246 case Instruction::BitCast: 247 assert(OpTy && "Cast instructions must provide the operand type"); 248 if (Ty == OpTy || (Ty->isPointerTy() && OpTy->isPointerTy())) 249 // Identity and pointer-to-pointer casts are free. 250 return TCC_Free; 251 252 // Otherwise, the default basic cost is used. 253 return TCC_Basic; 254 255 case Instruction::IntToPtr: 256 // An inttoptr cast is free so long as the input is a legal integer type 257 // which doesn't contain values outside the range of a pointer. 258 if (DL && DL->isLegalInteger(OpTy->getScalarSizeInBits()) && 259 OpTy->getScalarSizeInBits() <= DL->getPointerSizeInBits()) 260 return TCC_Free; 261 262 // Otherwise it's not a no-op. 263 return TCC_Basic; 264 265 case Instruction::PtrToInt: 266 // A ptrtoint cast is free so long as the result is large enough to store 267 // the pointer, and a legal integer type. 268 if (DL && DL->isLegalInteger(Ty->getScalarSizeInBits()) && 269 Ty->getScalarSizeInBits() >= DL->getPointerSizeInBits()) 270 return TCC_Free; 271 272 // Otherwise it's not a no-op. 273 return TCC_Basic; 274 275 case Instruction::Trunc: 276 // trunc to a native type is free (assuming the target has compare and 277 // shift-right of the same width). 278 if (DL && DL->isLegalInteger(DL->getTypeSizeInBits(Ty))) 279 return TCC_Free; 280 281 return TCC_Basic; 282 } 283 } 284 285 unsigned getGEPCost(const Value *Ptr, 286 ArrayRef<const Value *> Operands) const { 287 // In the basic model, we just assume that all-constant GEPs will be folded 288 // into their uses via addressing modes. 289 for (unsigned Idx = 0, Size = Operands.size(); Idx != Size; ++Idx) 290 if (!isa<Constant>(Operands[Idx])) 291 return TCC_Basic; 292 293 return TCC_Free; 294 } 295 296 unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const { 297 assert(FTy && "FunctionType must be provided to this routine."); 298 299 // The target-independent implementation just measures the size of the 300 // function by approximating that each argument will take on average one 301 // instruction to prepare. 302 303 if (NumArgs < 0) 304 // Set the argument number to the number of explicit arguments in the 305 // function. 306 NumArgs = FTy->getNumParams(); 307 308 return TCC_Basic * (NumArgs + 1); 309 } 310 311 unsigned getCallCost(const Function *F, int NumArgs = -1) const { 312 assert(F && "A concrete function must be provided to this routine."); 313 314 if (NumArgs < 0) 315 // Set the argument number to the number of explicit arguments in the 316 // function. 317 NumArgs = F->arg_size(); 318 319 if (Intrinsic::ID IID = (Intrinsic::ID)F->getIntrinsicID()) { 320 FunctionType *FTy = F->getFunctionType(); 321 SmallVector<Type *, 8> ParamTys(FTy->param_begin(), FTy->param_end()); 322 return TopTTI->getIntrinsicCost(IID, FTy->getReturnType(), ParamTys); 323 } 324 325 if (!TopTTI->isLoweredToCall(F)) 326 return TCC_Basic; // Give a basic cost if it will be lowered directly. 327 328 return TopTTI->getCallCost(F->getFunctionType(), NumArgs); 329 } 330 331 unsigned getCallCost(const Function *F, 332 ArrayRef<const Value *> Arguments) const { 333 // Simply delegate to generic handling of the call. 334 // FIXME: We should use instsimplify or something else to catch calls which 335 // will constant fold with these arguments. 336 return TopTTI->getCallCost(F, Arguments.size()); 337 } 338 339 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 340 ArrayRef<Type *> ParamTys) const { 341 switch (IID) { 342 default: 343 // Intrinsics rarely (if ever) have normal argument setup constraints. 344 // Model them as having a basic instruction cost. 345 // FIXME: This is wrong for libc intrinsics. 346 return TCC_Basic; 347 348 case Intrinsic::dbg_declare: 349 case Intrinsic::dbg_value: 350 case Intrinsic::invariant_start: 351 case Intrinsic::invariant_end: 352 case Intrinsic::lifetime_start: 353 case Intrinsic::lifetime_end: 354 case Intrinsic::objectsize: 355 case Intrinsic::ptr_annotation: 356 case Intrinsic::var_annotation: 357 // These intrinsics don't actually represent code after lowering. 358 return TCC_Free; 359 } 360 } 361 362 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 363 ArrayRef<const Value *> Arguments) const { 364 // Delegate to the generic intrinsic handling code. This mostly provides an 365 // opportunity for targets to (for example) special case the cost of 366 // certain intrinsics based on constants used as arguments. 367 SmallVector<Type *, 8> ParamTys; 368 ParamTys.reserve(Arguments.size()); 369 for (unsigned Idx = 0, Size = Arguments.size(); Idx != Size; ++Idx) 370 ParamTys.push_back(Arguments[Idx]->getType()); 371 return TopTTI->getIntrinsicCost(IID, RetTy, ParamTys); 372 } 373 374 unsigned getUserCost(const User *U) const { 375 if (isa<PHINode>(U)) 376 return TCC_Free; // Model all PHI nodes as free. 377 378 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(U)) 379 // In the basic model we just assume that all-constant GEPs will be 380 // folded into their uses via addressing modes. 381 return GEP->hasAllConstantIndices() ? TCC_Free : TCC_Basic; 382 383 if (ImmutableCallSite CS = U) { 384 const Function *F = CS.getCalledFunction(); 385 if (!F) { 386 // Just use the called value type. 387 Type *FTy = CS.getCalledValue()->getType()->getPointerElementType(); 388 return TopTTI->getCallCost(cast<FunctionType>(FTy), CS.arg_size()); 389 } 390 391 SmallVector<const Value *, 8> Arguments; 392 for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), 393 AE = CS.arg_end(); 394 AI != AE; ++AI) 395 Arguments.push_back(*AI); 396 397 return TopTTI->getCallCost(F, Arguments); 398 } 399 400 if (const CastInst *CI = dyn_cast<CastInst>(U)) { 401 // Result of a cmp instruction is often extended (to be used by other 402 // cmp instructions, logical or return instructions). These are usually 403 // nop on most sane targets. 404 if (isa<CmpInst>(CI->getOperand(0))) 405 return TCC_Free; 406 } 407 408 // Otherwise delegate to the fully generic implementations. 409 return getOperationCost(Operator::getOpcode(U), U->getType(), 410 U->getNumOperands() == 1 ? 411 U->getOperand(0)->getType() : 0); 412 } 413 414 bool isLoweredToCall(const Function *F) const { 415 // FIXME: These should almost certainly not be handled here, and instead 416 // handled with the help of TLI or the target itself. This was largely 417 // ported from existing analysis heuristics here so that such refactorings 418 // can take place in the future. 419 420 if (F->isIntrinsic()) 421 return false; 422 423 if (F->hasLocalLinkage() || !F->hasName()) 424 return true; 425 426 StringRef Name = F->getName(); 427 428 // These will all likely lower to a single selection DAG node. 429 if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || 430 Name == "fabs" || Name == "fabsf" || Name == "fabsl" || Name == "sin" || 431 Name == "sinf" || Name == "sinl" || Name == "cos" || Name == "cosf" || 432 Name == "cosl" || Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl") 433 return false; 434 435 // These are all likely to be optimized into something smaller. 436 if (Name == "pow" || Name == "powf" || Name == "powl" || Name == "exp2" || 437 Name == "exp2l" || Name == "exp2f" || Name == "floor" || Name == 438 "floorf" || Name == "ceil" || Name == "round" || Name == "ffs" || 439 Name == "ffsl" || Name == "abs" || Name == "labs" || Name == "llabs") 440 return false; 441 442 return true; 443 } 444 445 bool isLegalAddImmediate(int64_t Imm) const { 446 return false; 447 } 448 449 bool isLegalICmpImmediate(int64_t Imm) const { 450 return false; 451 } 452 453 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, 454 bool HasBaseReg, int64_t Scale) const { 455 // Guess that reg+reg addressing is allowed. This heuristic is taken from 456 // the implementation of LSR. 457 return !BaseGV && BaseOffset == 0 && Scale <= 1; 458 } 459 460 bool isTruncateFree(Type *Ty1, Type *Ty2) const { 461 return false; 462 } 463 464 bool isTypeLegal(Type *Ty) const { 465 return false; 466 } 467 468 unsigned getJumpBufAlignment() const { 469 return 0; 470 } 471 472 unsigned getJumpBufSize() const { 473 return 0; 474 } 475 476 bool shouldBuildLookupTables() const { 477 return true; 478 } 479 480 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const { 481 return PSK_Software; 482 } 483 484 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const { 485 return 1; 486 } 487 488 unsigned getNumberOfRegisters(bool Vector) const { 489 return 8; 490 } 491 492 unsigned getRegisterBitWidth(bool Vector) const { 493 return 32; 494 } 495 496 unsigned getMaximumUnrollFactor() const { 497 return 1; 498 } 499 500 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, 501 OperandValueKind) const { 502 return 1; 503 } 504 505 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 506 int Index = 0, Type *SubTp = 0) const { 507 return 1; 508 } 509 510 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 511 Type *Src) const { 512 return 1; 513 } 514 515 unsigned getCFInstrCost(unsigned Opcode) const { 516 return 1; 517 } 518 519 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 520 Type *CondTy = 0) const { 521 return 1; 522 } 523 524 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 525 unsigned Index = -1) const { 526 return 1; 527 } 528 529 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, 530 unsigned Alignment, 531 unsigned AddressSpace) const { 532 return 1; 533 } 534 535 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, 536 Type *RetTy, 537 ArrayRef<Type*> Tys) const { 538 return 1; 539 } 540 541 unsigned getNumberOfParts(Type *Tp) const { 542 return 0; 543 } 544 545 unsigned getAddressComputationCost(Type *Tp) const { 546 return 0; 547 } 548 }; 549 550 } // end anonymous namespace 551 552 INITIALIZE_AG_PASS(NoTTI, TargetTransformInfo, "notti", 553 "No target information", true, true, true) 554 char NoTTI::ID = 0; 555 556 ImmutablePass *llvm::createNoTargetTransformInfoPass() { 557 return new NoTTI(); 558 } 559