1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file provides the implementation of a basic TargetTransformInfo pass 11 /// predicated on the target abstractions present in the target independent 12 /// code generator. It uses these (primarily TargetLowering) to model as much 13 /// of the TTI query interface as possible. It is included by most targets so 14 /// that they can specialize only a small subset of the query space. 15 /// 16 //===----------------------------------------------------------------------===// 17 18 #define DEBUG_TYPE "basictti" 19 #include "llvm/CodeGen/Passes.h" 20 #include "llvm/Analysis/TargetTransformInfo.h" 21 #include "llvm/Target/TargetLowering.h" 22 #include <utility> 23 24 using namespace llvm; 25 26 namespace { 27 28 class BasicTTI : public ImmutablePass, public TargetTransformInfo { 29 const TargetLowering *TLI; 30 31 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 32 /// are set if the result needs to be inserted and/or extracted from vectors. 33 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 34 35 public: 36 BasicTTI() : ImmutablePass(ID), TLI(0) { 37 llvm_unreachable("This pass cannot be directly constructed"); 38 } 39 40 BasicTTI(const TargetLowering *TLI) : ImmutablePass(ID), TLI(TLI) { 41 initializeBasicTTIPass(*PassRegistry::getPassRegistry()); 42 } 43 44 virtual void initializePass() { 45 pushTTIStack(this); 46 } 47 48 virtual void finalizePass() { 49 popTTIStack(); 50 } 51 52 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 53 TargetTransformInfo::getAnalysisUsage(AU); 54 } 55 56 /// Pass identification. 57 static char ID; 58 59 /// Provide necessary pointer adjustments for the two base classes. 60 virtual void *getAdjustedAnalysisPointer(const void *ID) { 61 if (ID == &TargetTransformInfo::ID) 62 return (TargetTransformInfo*)this; 63 return this; 64 } 65 66 /// \name Scalar TTI Implementations 67 /// @{ 68 69 virtual bool isLegalAddImmediate(int64_t imm) const; 70 virtual bool isLegalICmpImmediate(int64_t imm) const; 71 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 72 int64_t BaseOffset, bool HasBaseReg, 73 int64_t Scale) const; 74 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; 75 virtual bool isTypeLegal(Type *Ty) const; 76 virtual unsigned getJumpBufAlignment() const; 77 virtual unsigned getJumpBufSize() const; 78 virtual bool shouldBuildLookupTables() const; 79 80 /// @} 81 82 /// \name Vector TTI Implementations 83 /// @{ 84 85 virtual unsigned getNumberOfRegisters(bool Vector) const; 86 virtual unsigned getMaximumUnrollFactor() const; 87 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; 88 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 89 int Index, Type *SubTp) const; 90 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 91 Type *Src) const; 92 virtual unsigned getCFInstrCost(unsigned Opcode) const; 93 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 94 Type *CondTy) const; 95 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 96 unsigned Index) const; 97 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, 98 unsigned Alignment, 99 unsigned AddressSpace) const; 100 virtual unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy, 101 ArrayRef<Type*> Tys) const; 102 virtual unsigned getNumberOfParts(Type *Tp) const; 103 104 /// @} 105 }; 106 107 } 108 109 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti", 110 "Target independent code generator's TTI", true, true, false) 111 char BasicTTI::ID = 0; 112 113 ImmutablePass * 114 llvm::createBasicTargetTransformInfoPass(const TargetLowering *TLI) { 115 return new BasicTTI(TLI); 116 } 117 118 119 bool BasicTTI::isLegalAddImmediate(int64_t imm) const { 120 return TLI->isLegalAddImmediate(imm); 121 } 122 123 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const { 124 return TLI->isLegalICmpImmediate(imm); 125 } 126 127 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 128 int64_t BaseOffset, bool HasBaseReg, 129 int64_t Scale) const { 130 TargetLowering::AddrMode AM; 131 AM.BaseGV = BaseGV; 132 AM.BaseOffs = BaseOffset; 133 AM.HasBaseReg = HasBaseReg; 134 AM.Scale = Scale; 135 return TLI->isLegalAddressingMode(AM, Ty); 136 } 137 138 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const { 139 return TLI->isTruncateFree(Ty1, Ty2); 140 } 141 142 bool BasicTTI::isTypeLegal(Type *Ty) const { 143 EVT T = TLI->getValueType(Ty); 144 return TLI->isTypeLegal(T); 145 } 146 147 unsigned BasicTTI::getJumpBufAlignment() const { 148 return TLI->getJumpBufAlignment(); 149 } 150 151 unsigned BasicTTI::getJumpBufSize() const { 152 return TLI->getJumpBufSize(); 153 } 154 155 bool BasicTTI::shouldBuildLookupTables() const { 156 return TLI->supportJumpTables() && 157 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || 158 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other)); 159 } 160 161 //===----------------------------------------------------------------------===// 162 // 163 // Calls used by the vectorizers. 164 // 165 //===----------------------------------------------------------------------===// 166 167 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert, 168 bool Extract) const { 169 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 170 unsigned Cost = 0; 171 172 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 173 if (Insert) 174 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 175 if (Extract) 176 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 177 } 178 179 return Cost; 180 } 181 182 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const { 183 return 1; 184 } 185 186 unsigned BasicTTI::getMaximumUnrollFactor() const { 187 return 1; 188 } 189 190 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { 191 // Check if any of the operands are vector operands. 192 int ISD = TLI->InstructionOpcodeToISD(Opcode); 193 assert(ISD && "Invalid opcode"); 194 195 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 196 197 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { 198 // The operation is legal. Assume it costs 1. 199 // If the type is split to multiple registers, assume that thre is some 200 // overhead to this. 201 // TODO: Once we have extract/insert subvector cost we need to use them. 202 if (LT.first > 1) 203 return LT.first * 2; 204 return LT.first * 1; 205 } 206 207 if (!TLI->isOperationExpand(ISD, LT.second)) { 208 // If the operation is custom lowered then assume 209 // thare the code is twice as expensive. 210 return LT.first * 2; 211 } 212 213 // Else, assume that we need to scalarize this op. 214 if (Ty->isVectorTy()) { 215 unsigned Num = Ty->getVectorNumElements(); 216 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType()); 217 // return the cost of multiple scalar invocation plus the cost of inserting 218 // and extracting the values. 219 return getScalarizationOverhead(Ty, true, true) + Num * Cost; 220 } 221 222 // We don't know anything about this scalar instruction. 223 return 1; 224 } 225 226 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 227 Type *SubTp) const { 228 return 1; 229 } 230 231 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst, 232 Type *Src) const { 233 int ISD = TLI->InstructionOpcodeToISD(Opcode); 234 assert(ISD && "Invalid opcode"); 235 236 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src); 237 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst); 238 239 // Handle scalar conversions. 240 if (!Src->isVectorTy() && !Dst->isVectorTy()) { 241 242 // Scalar bitcasts are usually free. 243 if (Opcode == Instruction::BitCast) 244 return 0; 245 246 if (Opcode == Instruction::Trunc && 247 TLI->isTruncateFree(SrcLT.second, DstLT.second)) 248 return 0; 249 250 if (Opcode == Instruction::ZExt && 251 TLI->isZExtFree(SrcLT.second, DstLT.second)) 252 return 0; 253 254 // Just check the op cost. If the operation is legal then assume it costs 1. 255 if (!TLI->isOperationExpand(ISD, DstLT.second)) 256 return 1; 257 258 // Assume that illegal scalar instruction are expensive. 259 return 4; 260 } 261 262 // Check vector-to-vector casts. 263 if (Dst->isVectorTy() && Src->isVectorTy()) { 264 265 // If the cast is between same-sized registers, then the check is simple. 266 if (SrcLT.first == DstLT.first && 267 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { 268 269 // Bitcast between types that are legalized to the same type are free. 270 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc) 271 return 0; 272 273 // Assume that Zext is done using AND. 274 if (Opcode == Instruction::ZExt) 275 return 1; 276 277 // Assume that sext is done using SHL and SRA. 278 if (Opcode == Instruction::SExt) 279 return 2; 280 281 // Just check the op cost. If the operation is legal then assume it costs 282 // 1 and multiply by the type-legalization overhead. 283 if (!TLI->isOperationExpand(ISD, DstLT.second)) 284 return SrcLT.first * 1; 285 } 286 287 // If we are converting vectors and the operation is illegal, or 288 // if the vectors are legalized to different types, estimate the 289 // scalarization costs. 290 unsigned Num = Dst->getVectorNumElements(); 291 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(), 292 Src->getScalarType()); 293 294 // Return the cost of multiple scalar invocation plus the cost of 295 // inserting and extracting the values. 296 return getScalarizationOverhead(Dst, true, true) + Num * Cost; 297 } 298 299 // We already handled vector-to-vector and scalar-to-scalar conversions. This 300 // is where we handle bitcast between vectors and scalars. We need to assume 301 // that the conversion is scalarized in one way or another. 302 if (Opcode == Instruction::BitCast) 303 // Illegal bitcasts are done by storing and loading from a stack slot. 304 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) + 305 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0); 306 307 llvm_unreachable("Unhandled cast"); 308 } 309 310 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const { 311 // Branches are assumed to be predicted. 312 return 0; 313 } 314 315 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 316 Type *CondTy) const { 317 int ISD = TLI->InstructionOpcodeToISD(Opcode); 318 assert(ISD && "Invalid opcode"); 319 320 // Selects on vectors are actually vector selects. 321 if (ISD == ISD::SELECT) { 322 assert(CondTy && "CondTy must exist"); 323 if (CondTy->isVectorTy()) 324 ISD = ISD::VSELECT; 325 } 326 327 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 328 329 if (!TLI->isOperationExpand(ISD, LT.second)) { 330 // The operation is legal. Assume it costs 1. Multiply 331 // by the type-legalization overhead. 332 return LT.first * 1; 333 } 334 335 // Otherwise, assume that the cast is scalarized. 336 if (ValTy->isVectorTy()) { 337 unsigned Num = ValTy->getVectorNumElements(); 338 if (CondTy) 339 CondTy = CondTy->getScalarType(); 340 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(), 341 CondTy); 342 343 // Return the cost of multiple scalar invocation plus the cost of inserting 344 // and extracting the values. 345 return getScalarizationOverhead(ValTy, true, false) + Num * Cost; 346 } 347 348 // Unknown scalar opcode. 349 return 1; 350 } 351 352 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val, 353 unsigned Index) const { 354 return 1; 355 } 356 357 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src, 358 unsigned Alignment, 359 unsigned AddressSpace) const { 360 assert(!Src->isVoidTy() && "Invalid type"); 361 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 362 363 // Assume that all loads of legal types cost 1. 364 return LT.first; 365 } 366 367 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy, 368 ArrayRef<Type *> Tys) const { 369 // assume that we need to scalarize this intrinsic. 370 unsigned ScalarizationCost = 0; 371 unsigned ScalarCalls = 1; 372 if (RetTy->isVectorTy()) { 373 ScalarizationCost = getScalarizationOverhead(RetTy, true, false); 374 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 375 } 376 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) { 377 if (Tys[i]->isVectorTy()) { 378 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true); 379 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements()); 380 } 381 } 382 return ScalarCalls + ScalarizationCost; 383 } 384 385 unsigned BasicTTI::getNumberOfParts(Type *Tp) const { 386 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 387 return LT.first; 388 } 389