1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #include "X86.h" 18 #include "X86TargetMachine.h" 19 #include "llvm/Analysis/TargetTransformInfo.h" 20 #include "llvm/IR/IntrinsicInst.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Target/CostTable.h" 23 #include "llvm/Target/TargetLowering.h" 24 using namespace llvm; 25 26 #define DEBUG_TYPE "x86tti" 27 28 // Declare the pass initialization routine locally as target-specific passes 29 // don't have a target-wide initialization entry point, and so we rely on the 30 // pass constructor initialization. 31 namespace llvm { 32 void initializeX86TTIPass(PassRegistry &); 33 } 34 35 namespace { 36 37 class X86TTI final : public ImmutablePass, public TargetTransformInfo { 38 const X86Subtarget *ST; 39 const X86TargetLowering *TLI; 40 41 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 42 /// are set if the result needs to be inserted and/or extracted from vectors. 43 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 44 45 public: 46 X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { 47 llvm_unreachable("This pass cannot be directly constructed"); 48 } 49 50 X86TTI(const X86TargetMachine *TM) 51 : ImmutablePass(ID), ST(TM->getSubtargetImpl()), 52 TLI(TM->getTargetLowering()) { 53 initializeX86TTIPass(*PassRegistry::getPassRegistry()); 54 } 55 56 void initializePass() override { 57 pushTTIStack(this); 58 } 59 60 void getAnalysisUsage(AnalysisUsage &AU) const override { 61 TargetTransformInfo::getAnalysisUsage(AU); 62 } 63 64 /// Pass identification. 65 static char ID; 66 67 /// Provide necessary pointer adjustments for the two base classes. 68 void *getAdjustedAnalysisPointer(const void *ID) override { 69 if (ID == &TargetTransformInfo::ID) 70 return (TargetTransformInfo*)this; 71 return this; 72 } 73 74 /// \name Scalar TTI Implementations 75 /// @{ 76 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override; 77 78 /// @} 79 80 /// \name Vector TTI Implementations 81 /// @{ 82 83 unsigned getNumberOfRegisters(bool Vector) const override; 84 unsigned getRegisterBitWidth(bool Vector) const override; 85 unsigned getMaximumUnrollFactor() const override; 86 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, 87 OperandValueKind) const override; 88 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 89 int Index, Type *SubTp) const override; 90 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 91 Type *Src) const override; 92 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 93 Type *CondTy) const override; 94 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 95 unsigned Index) const override; 96 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 97 unsigned AddressSpace) const override; 98 99 unsigned getAddressComputationCost(Type *PtrTy, 100 bool IsComplex) const override; 101 102 unsigned getReductionCost(unsigned Opcode, Type *Ty, 103 bool IsPairwiseForm) const override; 104 105 unsigned getIntImmCost(int64_t) const; 106 107 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override; 108 109 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 110 Type *Ty) const override; 111 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 112 Type *Ty) const override; 113 114 /// @} 115 }; 116 117 } // end anonymous namespace 118 119 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti", 120 "X86 Target Transform Info", true, true, false) 121 char X86TTI::ID = 0; 122 123 ImmutablePass * 124 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { 125 return new X86TTI(TM); 126 } 127 128 129 //===----------------------------------------------------------------------===// 130 // 131 // X86 cost model. 132 // 133 //===----------------------------------------------------------------------===// 134 135 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const { 136 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 137 // TODO: Currently the __builtin_popcount() implementation using SSE3 138 // instructions is inefficient. Once the problem is fixed, we should 139 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 140 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software; 141 } 142 143 unsigned X86TTI::getNumberOfRegisters(bool Vector) const { 144 if (Vector && !ST->hasSSE1()) 145 return 0; 146 147 if (ST->is64Bit()) 148 return 16; 149 return 8; 150 } 151 152 unsigned X86TTI::getRegisterBitWidth(bool Vector) const { 153 if (Vector) { 154 if (ST->hasAVX()) return 256; 155 if (ST->hasSSE1()) return 128; 156 return 0; 157 } 158 159 if (ST->is64Bit()) 160 return 64; 161 return 32; 162 163 } 164 165 unsigned X86TTI::getMaximumUnrollFactor() const { 166 if (ST->isAtom()) 167 return 1; 168 169 // Sandybridge and Haswell have multiple execution ports and pipelined 170 // vector units. 171 if (ST->hasAVX()) 172 return 4; 173 174 return 2; 175 } 176 177 unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 178 OperandValueKind Op1Info, 179 OperandValueKind Op2Info) const { 180 // Legalize the type. 181 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 182 183 int ISD = TLI->InstructionOpcodeToISD(Opcode); 184 assert(ISD && "Invalid opcode"); 185 186 static const CostTblEntry<MVT::SimpleValueType> 187 AVX2UniformConstCostTable[] = { 188 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 189 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 190 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 191 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 192 }; 193 194 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 195 ST->hasAVX2()) { 196 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second); 197 if (Idx != -1) 198 return LT.first * AVX2UniformConstCostTable[Idx].Cost; 199 } 200 201 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 202 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 203 // customize them to detect the cases where shift amount is a scalar one. 204 { ISD::SHL, MVT::v4i32, 1 }, 205 { ISD::SRL, MVT::v4i32, 1 }, 206 { ISD::SRA, MVT::v4i32, 1 }, 207 { ISD::SHL, MVT::v8i32, 1 }, 208 { ISD::SRL, MVT::v8i32, 1 }, 209 { ISD::SRA, MVT::v8i32, 1 }, 210 { ISD::SHL, MVT::v2i64, 1 }, 211 { ISD::SRL, MVT::v2i64, 1 }, 212 { ISD::SHL, MVT::v4i64, 1 }, 213 { ISD::SRL, MVT::v4i64, 1 }, 214 215 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. 216 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. 217 218 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. 219 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. 220 221 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. 222 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. 223 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. 224 225 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 226 { ISD::SDIV, MVT::v32i8, 32*20 }, 227 { ISD::SDIV, MVT::v16i16, 16*20 }, 228 { ISD::SDIV, MVT::v8i32, 8*20 }, 229 { ISD::SDIV, MVT::v4i64, 4*20 }, 230 { ISD::UDIV, MVT::v32i8, 32*20 }, 231 { ISD::UDIV, MVT::v16i16, 16*20 }, 232 { ISD::UDIV, MVT::v8i32, 8*20 }, 233 { ISD::UDIV, MVT::v4i64, 4*20 }, 234 }; 235 236 // Look for AVX2 lowering tricks. 237 if (ST->hasAVX2()) { 238 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 239 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 240 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 241 // On AVX2, a packed v16i16 shift left by a constant build_vector 242 // is lowered into a vector multiply (vpmullw). 243 return LT.first; 244 245 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 246 if (Idx != -1) 247 return LT.first * AVX2CostTable[Idx].Cost; 248 } 249 250 static const CostTblEntry<MVT::SimpleValueType> 251 SSE2UniformConstCostTable[] = { 252 // We don't correctly identify costs of casts because they are marked as 253 // custom. 254 // Constant splats are cheaper for the following instructions. 255 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 256 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 257 { ISD::SHL, MVT::v4i32, 1 }, // pslld 258 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 259 260 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 261 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 262 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 263 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 264 265 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 266 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 267 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 268 269 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 270 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 271 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 272 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 273 }; 274 275 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 276 ST->hasSSE2()) { 277 // pmuldq sequence. 278 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 279 return LT.first * 15; 280 281 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 282 if (Idx != -1) 283 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 284 } 285 286 if (ISD == ISD::SHL && 287 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 288 EVT VT = LT.second; 289 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 290 (VT == MVT::v4i32 && ST->hasSSE41())) 291 // Vector shift left by non uniform constant can be lowered 292 // into vector multiply (pmullw/pmulld). 293 return LT.first; 294 if (VT == MVT::v4i32 && ST->hasSSE2()) 295 // A vector shift left by non uniform constant is converted 296 // into a vector multiply; the new multiply is eventually 297 // lowered into a sequence of shuffles and 2 x pmuludq. 298 ISD = ISD::MUL; 299 } 300 301 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 302 // We don't correctly identify costs of casts because they are marked as 303 // custom. 304 // For some cases, where the shift amount is a scalar we would be able 305 // to generate better code. Unfortunately, when this is the case the value 306 // (the splat) will get hoisted out of the loop, thereby making it invisible 307 // to ISel. The cost model must return worst case assumptions because it is 308 // used for vectorization and we don't want to make vectorized code worse 309 // than scalar code. 310 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. 311 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. 312 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 313 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. 314 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized. 315 316 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. 317 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. 318 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. 319 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. 320 321 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. 322 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. 323 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. 324 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. 325 326 // It is not a good idea to vectorize division. We have to scalarize it and 327 // in the process we will often end up having to spilling regular 328 // registers. The overhead of division is going to dominate most kernels 329 // anyways so try hard to prevent vectorization of division - it is 330 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 331 // to hide "20 cycles" for each lane. 332 { ISD::SDIV, MVT::v16i8, 16*20 }, 333 { ISD::SDIV, MVT::v8i16, 8*20 }, 334 { ISD::SDIV, MVT::v4i32, 4*20 }, 335 { ISD::SDIV, MVT::v2i64, 2*20 }, 336 { ISD::UDIV, MVT::v16i8, 16*20 }, 337 { ISD::UDIV, MVT::v8i16, 8*20 }, 338 { ISD::UDIV, MVT::v4i32, 4*20 }, 339 { ISD::UDIV, MVT::v2i64, 2*20 }, 340 }; 341 342 if (ST->hasSSE2()) { 343 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 344 if (Idx != -1) 345 return LT.first * SSE2CostTable[Idx].Cost; 346 } 347 348 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 349 // We don't have to scalarize unsupported ops. We can issue two half-sized 350 // operations and we only need to extract the upper YMM half. 351 // Two ops + 1 extract + 1 insert = 4. 352 { ISD::MUL, MVT::v16i16, 4 }, 353 { ISD::MUL, MVT::v8i32, 4 }, 354 { ISD::SUB, MVT::v8i32, 4 }, 355 { ISD::ADD, MVT::v8i32, 4 }, 356 { ISD::SUB, MVT::v4i64, 4 }, 357 { ISD::ADD, MVT::v4i64, 4 }, 358 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 359 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 360 // Because we believe v4i64 to be a legal type, we must also include the 361 // split factor of two in the cost table. Therefore, the cost here is 18 362 // instead of 9. 363 { ISD::MUL, MVT::v4i64, 18 }, 364 }; 365 366 // Look for AVX1 lowering tricks. 367 if (ST->hasAVX() && !ST->hasAVX2()) { 368 EVT VT = LT.second; 369 370 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 371 // sequence of extract + two vector multiply + insert. 372 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) && 373 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) 374 ISD = ISD::MUL; 375 376 int Idx = CostTableLookup(AVX1CostTable, ISD, VT); 377 if (Idx != -1) 378 return LT.first * AVX1CostTable[Idx].Cost; 379 } 380 381 // Custom lowering of vectors. 382 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 383 // A v2i64/v4i64 and multiply is custom lowered as a series of long 384 // multiplies(3), shifts(4) and adds(2). 385 { ISD::MUL, MVT::v2i64, 9 }, 386 { ISD::MUL, MVT::v4i64, 9 }, 387 }; 388 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 389 if (Idx != -1) 390 return LT.first * CustomLowered[Idx].Cost; 391 392 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 393 // 2x pmuludq, 2x shuffle. 394 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 395 !ST->hasSSE41()) 396 return LT.first * 6; 397 398 // Fallback to the default implementation. 399 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, 400 Op2Info); 401 } 402 403 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 404 Type *SubTp) const { 405 // We only estimate the cost of reverse and alternate shuffles. 406 if (Kind != SK_Reverse && Kind != SK_Alternate) 407 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 408 409 if (Kind == SK_Reverse) { 410 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 411 unsigned Cost = 1; 412 if (LT.second.getSizeInBits() > 128) 413 Cost = 3; // Extract + insert + copy. 414 415 // Multiple by the number of parts. 416 return Cost * LT.first; 417 } 418 419 if (Kind == SK_Alternate) { 420 static const CostTblEntry<MVT::SimpleValueType> X86AltShuffleTbl[] = { 421 // Alt shuffle cost table for X86. Cost is the number of instructions 422 // required to create the shuffled vector. 423 424 {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1}, 425 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 426 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 427 428 {ISD::VECTOR_SHUFFLE, MVT::v2i32, 2}, 429 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 430 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 431 432 {ISD::VECTOR_SHUFFLE, MVT::v4i16, 8}, 433 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, 434 435 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 49}}; 436 437 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 438 439 int Idx = CostTableLookup(X86AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 440 if (Idx == -1) 441 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 442 return LT.first * X86AltShuffleTbl[Idx].Cost; 443 } 444 445 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 446 } 447 448 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { 449 int ISD = TLI->InstructionOpcodeToISD(Opcode); 450 assert(ISD && "Invalid opcode"); 451 452 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src); 453 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst); 454 455 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 456 SSE2ConvTbl[] = { 457 // These are somewhat magic numbers justified by looking at the output of 458 // Intel's IACA, running some kernels and making sure when we take 459 // legalization into account the throughput will be overestimated. 460 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 461 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 462 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 463 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 464 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 465 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 466 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 467 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 468 // There are faster sequences for float conversions. 469 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 470 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 471 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 472 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 473 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 474 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 475 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 476 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 477 }; 478 479 if (ST->hasSSE2() && !ST->hasAVX()) { 480 int Idx = 481 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 482 if (Idx != -1) 483 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 484 } 485 486 EVT SrcTy = TLI->getValueType(Src); 487 EVT DstTy = TLI->getValueType(Dst); 488 489 // The function getSimpleVT only handles simple value types. 490 if (!SrcTy.isSimple() || !DstTy.isSimple()) 491 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 492 493 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 494 AVX2ConversionTbl[] = { 495 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 496 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 497 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 498 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 499 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 500 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 501 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 502 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 503 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 504 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 505 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 506 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 507 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 508 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 509 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 510 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 511 512 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 513 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 514 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 515 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 516 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 517 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 518 }; 519 520 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 521 AVXConversionTbl[] = { 522 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 523 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 524 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 525 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 526 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 527 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 528 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 529 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 530 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 531 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 532 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 533 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 534 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 535 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 536 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 537 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 538 539 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 540 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 541 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 542 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 543 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 544 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 545 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 546 547 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 548 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 549 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 550 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 551 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 552 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 553 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 554 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 555 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 556 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 557 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 558 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 559 560 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 561 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 562 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 563 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 564 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 565 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 566 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 567 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 568 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 569 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 570 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 571 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 572 // The generic code to compute the scalar overhead is currently broken. 573 // Workaround this limitation by estimating the scalarization overhead 574 // here. We have roughly 10 instructions per scalar element. 575 // Multiply that by the vector width. 576 // FIXME: remove that when PR19268 is fixed. 577 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 578 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 }, 579 580 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 581 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 582 // This node is expanded into scalarized operations but BasicTTI is overly 583 // optimistic estimating its cost. It computes 3 per element (one 584 // vector-extract, one scalar conversion and one vector-insert). The 585 // problem is that the inserts form a read-modify-write chain so latency 586 // should be factored in too. Inflating the cost per element by 1. 587 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 588 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 589 }; 590 591 if (ST->hasAVX2()) { 592 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 593 DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 594 if (Idx != -1) 595 return AVX2ConversionTbl[Idx].Cost; 596 } 597 598 if (ST->hasAVX()) { 599 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 600 SrcTy.getSimpleVT()); 601 if (Idx != -1) 602 return AVXConversionTbl[Idx].Cost; 603 } 604 605 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 606 } 607 608 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 609 Type *CondTy) const { 610 // Legalize the type. 611 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 612 613 MVT MTy = LT.second; 614 615 int ISD = TLI->InstructionOpcodeToISD(Opcode); 616 assert(ISD && "Invalid opcode"); 617 618 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 619 { ISD::SETCC, MVT::v2f64, 1 }, 620 { ISD::SETCC, MVT::v4f32, 1 }, 621 { ISD::SETCC, MVT::v2i64, 1 }, 622 { ISD::SETCC, MVT::v4i32, 1 }, 623 { ISD::SETCC, MVT::v8i16, 1 }, 624 { ISD::SETCC, MVT::v16i8, 1 }, 625 }; 626 627 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 628 { ISD::SETCC, MVT::v4f64, 1 }, 629 { ISD::SETCC, MVT::v8f32, 1 }, 630 // AVX1 does not support 8-wide integer compare. 631 { ISD::SETCC, MVT::v4i64, 4 }, 632 { ISD::SETCC, MVT::v8i32, 4 }, 633 { ISD::SETCC, MVT::v16i16, 4 }, 634 { ISD::SETCC, MVT::v32i8, 4 }, 635 }; 636 637 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 638 { ISD::SETCC, MVT::v4i64, 1 }, 639 { ISD::SETCC, MVT::v8i32, 1 }, 640 { ISD::SETCC, MVT::v16i16, 1 }, 641 { ISD::SETCC, MVT::v32i8, 1 }, 642 }; 643 644 if (ST->hasAVX2()) { 645 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 646 if (Idx != -1) 647 return LT.first * AVX2CostTbl[Idx].Cost; 648 } 649 650 if (ST->hasAVX()) { 651 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 652 if (Idx != -1) 653 return LT.first * AVX1CostTbl[Idx].Cost; 654 } 655 656 if (ST->hasSSE42()) { 657 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 658 if (Idx != -1) 659 return LT.first * SSE42CostTbl[Idx].Cost; 660 } 661 662 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); 663 } 664 665 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, 666 unsigned Index) const { 667 assert(Val->isVectorTy() && "This must be a vector type"); 668 669 if (Index != -1U) { 670 // Legalize the type. 671 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val); 672 673 // This type is legalized to a scalar type. 674 if (!LT.second.isVector()) 675 return 0; 676 677 // The type may be split. Normalize the index to the new type. 678 unsigned Width = LT.second.getVectorNumElements(); 679 Index = Index % Width; 680 681 // Floating point scalars are already located in index #0. 682 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 683 return 0; 684 } 685 686 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); 687 } 688 689 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, 690 bool Extract) const { 691 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 692 unsigned Cost = 0; 693 694 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 695 if (Insert) 696 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 697 if (Extract) 698 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 699 } 700 701 return Cost; 702 } 703 704 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 705 unsigned AddressSpace) const { 706 // Handle non-power-of-two vectors such as <3 x float> 707 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 708 unsigned NumElem = VTy->getVectorNumElements(); 709 710 // Handle a few common cases: 711 // <3 x float> 712 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 713 // Cost = 64 bit store + extract + 32 bit store. 714 return 3; 715 716 // <3 x double> 717 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 718 // Cost = 128 bit store + unpack + 64 bit store. 719 return 3; 720 721 // Assume that all other non-power-of-two numbers are scalarized. 722 if (!isPowerOf2_32(NumElem)) { 723 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, 724 VTy->getScalarType(), 725 Alignment, 726 AddressSpace); 727 unsigned SplitCost = getScalarizationOverhead(Src, 728 Opcode == Instruction::Load, 729 Opcode==Instruction::Store); 730 return NumElem * Cost + SplitCost; 731 } 732 } 733 734 // Legalize the type. 735 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 736 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 737 "Invalid Opcode"); 738 739 // Each load/store unit costs 1. 740 unsigned Cost = LT.first * 1; 741 742 // On Sandybridge 256bit load/stores are double pumped 743 // (but not on Haswell). 744 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 745 Cost*=2; 746 747 return Cost; 748 } 749 750 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 751 // Address computations in vectorized code with non-consecutive addresses will 752 // likely result in more instructions compared to scalar code where the 753 // computation can more often be merged into the index mode. The resulting 754 // extra micro-ops can significantly decrease throughput. 755 unsigned NumVectorInstToHideOverhead = 10; 756 757 if (Ty->isVectorTy() && IsComplex) 758 return NumVectorInstToHideOverhead; 759 760 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex); 761 } 762 763 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy, 764 bool IsPairwise) const { 765 766 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 767 768 MVT MTy = LT.second; 769 770 int ISD = TLI->InstructionOpcodeToISD(Opcode); 771 assert(ISD && "Invalid opcode"); 772 773 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 774 // and make it as the cost. 775 776 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = { 777 { ISD::FADD, MVT::v2f64, 2 }, 778 { ISD::FADD, MVT::v4f32, 4 }, 779 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 780 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 781 { ISD::ADD, MVT::v8i16, 5 }, 782 }; 783 784 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = { 785 { ISD::FADD, MVT::v4f32, 4 }, 786 { ISD::FADD, MVT::v4f64, 5 }, 787 { ISD::FADD, MVT::v8f32, 7 }, 788 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 789 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 790 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 791 { ISD::ADD, MVT::v8i16, 5 }, 792 { ISD::ADD, MVT::v8i32, 5 }, 793 }; 794 795 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = { 796 { ISD::FADD, MVT::v2f64, 2 }, 797 { ISD::FADD, MVT::v4f32, 4 }, 798 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 799 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 800 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 801 }; 802 803 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = { 804 { ISD::FADD, MVT::v4f32, 3 }, 805 { ISD::FADD, MVT::v4f64, 3 }, 806 { ISD::FADD, MVT::v8f32, 4 }, 807 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 808 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 809 { ISD::ADD, MVT::v4i64, 3 }, 810 { ISD::ADD, MVT::v8i16, 4 }, 811 { ISD::ADD, MVT::v8i32, 5 }, 812 }; 813 814 if (IsPairwise) { 815 if (ST->hasAVX()) { 816 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy); 817 if (Idx != -1) 818 return LT.first * AVX1CostTblPairWise[Idx].Cost; 819 } 820 821 if (ST->hasSSE42()) { 822 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy); 823 if (Idx != -1) 824 return LT.first * SSE42CostTblPairWise[Idx].Cost; 825 } 826 } else { 827 if (ST->hasAVX()) { 828 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy); 829 if (Idx != -1) 830 return LT.first * AVX1CostTblNoPairWise[Idx].Cost; 831 } 832 833 if (ST->hasSSE42()) { 834 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy); 835 if (Idx != -1) 836 return LT.first * SSE42CostTblNoPairWise[Idx].Cost; 837 } 838 } 839 840 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise); 841 } 842 843 /// \brief Calculate the cost of materializing a 64-bit value. This helper 844 /// method might only calculate a fraction of a larger immediate. Therefore it 845 /// is valid to return a cost of ZERO. 846 unsigned X86TTI::getIntImmCost(int64_t Val) const { 847 if (Val == 0) 848 return TCC_Free; 849 850 if (isInt<32>(Val)) 851 return TCC_Basic; 852 853 return 2 * TCC_Basic; 854 } 855 856 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { 857 assert(Ty->isIntegerTy()); 858 859 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 860 if (BitSize == 0) 861 return ~0U; 862 863 // Never hoist constants larger than 128bit, because this might lead to 864 // incorrect code generation or assertions in codegen. 865 // Fixme: Create a cost model for types larger than i128 once the codegen 866 // issues have been fixed. 867 if (BitSize > 128) 868 return TCC_Free; 869 870 if (Imm == 0) 871 return TCC_Free; 872 873 // Sign-extend all constants to a multiple of 64-bit. 874 APInt ImmVal = Imm; 875 if (BitSize & 0x3f) 876 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 877 878 // Split the constant into 64-bit chunks and calculate the cost for each 879 // chunk. 880 unsigned Cost = 0; 881 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 882 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 883 int64_t Val = Tmp.getSExtValue(); 884 Cost += getIntImmCost(Val); 885 } 886 // We need at least one instruction to materialze the constant. 887 return std::max(1U, Cost); 888 } 889 890 unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 891 Type *Ty) const { 892 assert(Ty->isIntegerTy()); 893 894 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 895 // There is no cost model for constants with a bit size of 0. Return TCC_Free 896 // here, so that constant hoisting will ignore this constant. 897 if (BitSize == 0) 898 return TCC_Free; 899 900 unsigned ImmIdx = ~0U; 901 switch (Opcode) { 902 default: return TCC_Free; 903 case Instruction::GetElementPtr: 904 // Always hoist the base address of a GetElementPtr. This prevents the 905 // creation of new constants for every base constant that gets constant 906 // folded with the offset. 907 if (Idx == 0) 908 return 2 * TCC_Basic; 909 return TCC_Free; 910 case Instruction::Store: 911 ImmIdx = 0; 912 break; 913 case Instruction::Add: 914 case Instruction::Sub: 915 case Instruction::Mul: 916 case Instruction::UDiv: 917 case Instruction::SDiv: 918 case Instruction::URem: 919 case Instruction::SRem: 920 case Instruction::And: 921 case Instruction::Or: 922 case Instruction::Xor: 923 case Instruction::ICmp: 924 ImmIdx = 1; 925 break; 926 // Always return TCC_Free for the shift value of a shift instruction. 927 case Instruction::Shl: 928 case Instruction::LShr: 929 case Instruction::AShr: 930 if (Idx == 1) 931 return TCC_Free; 932 break; 933 case Instruction::Trunc: 934 case Instruction::ZExt: 935 case Instruction::SExt: 936 case Instruction::IntToPtr: 937 case Instruction::PtrToInt: 938 case Instruction::BitCast: 939 case Instruction::PHI: 940 case Instruction::Call: 941 case Instruction::Select: 942 case Instruction::Ret: 943 case Instruction::Load: 944 break; 945 } 946 947 if (Idx == ImmIdx) { 948 unsigned NumConstants = (BitSize + 63) / 64; 949 unsigned Cost = X86TTI::getIntImmCost(Imm, Ty); 950 return (Cost <= NumConstants * TCC_Basic) 951 ? static_cast<unsigned>(TCC_Free) 952 : Cost; 953 } 954 955 return X86TTI::getIntImmCost(Imm, Ty); 956 } 957 958 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx, 959 const APInt &Imm, Type *Ty) const { 960 assert(Ty->isIntegerTy()); 961 962 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 963 // There is no cost model for constants with a bit size of 0. Return TCC_Free 964 // here, so that constant hoisting will ignore this constant. 965 if (BitSize == 0) 966 return TCC_Free; 967 968 switch (IID) { 969 default: return TCC_Free; 970 case Intrinsic::sadd_with_overflow: 971 case Intrinsic::uadd_with_overflow: 972 case Intrinsic::ssub_with_overflow: 973 case Intrinsic::usub_with_overflow: 974 case Intrinsic::smul_with_overflow: 975 case Intrinsic::umul_with_overflow: 976 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 977 return TCC_Free; 978 break; 979 case Intrinsic::experimental_stackmap: 980 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 981 return TCC_Free; 982 break; 983 case Intrinsic::experimental_patchpoint_void: 984 case Intrinsic::experimental_patchpoint_i64: 985 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 986 return TCC_Free; 987 break; 988 } 989 return X86TTI::getIntImmCost(Imm, Ty); 990 } 991