1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #include "X86.h" 18 #include "X86TargetMachine.h" 19 #include "llvm/Analysis/TargetTransformInfo.h" 20 #include "llvm/IR/IntrinsicInst.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Target/CostTable.h" 23 #include "llvm/Target/TargetLowering.h" 24 using namespace llvm; 25 26 #define DEBUG_TYPE "x86tti" 27 28 // Declare the pass initialization routine locally as target-specific passes 29 // don't have a target-wide initialization entry point, and so we rely on the 30 // pass constructor initialization. 31 namespace llvm { 32 void initializeX86TTIPass(PassRegistry &); 33 } 34 35 namespace { 36 37 class X86TTI final : public ImmutablePass, public TargetTransformInfo { 38 const X86Subtarget *ST; 39 const X86TargetLowering *TLI; 40 41 /// Estimate the overhead of scalarizing an instruction. Insert and Extract 42 /// are set if the result needs to be inserted and/or extracted from vectors. 43 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; 44 45 public: 46 X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) { 47 llvm_unreachable("This pass cannot be directly constructed"); 48 } 49 50 X86TTI(const X86TargetMachine *TM) 51 : ImmutablePass(ID), ST(TM->getSubtargetImpl()), 52 TLI(TM->getSubtargetImpl()->getTargetLowering()) { 53 initializeX86TTIPass(*PassRegistry::getPassRegistry()); 54 } 55 56 void initializePass() override { 57 pushTTIStack(this); 58 } 59 60 void getAnalysisUsage(AnalysisUsage &AU) const override { 61 TargetTransformInfo::getAnalysisUsage(AU); 62 } 63 64 /// Pass identification. 65 static char ID; 66 67 /// Provide necessary pointer adjustments for the two base classes. 68 void *getAdjustedAnalysisPointer(const void *ID) override { 69 if (ID == &TargetTransformInfo::ID) 70 return (TargetTransformInfo*)this; 71 return this; 72 } 73 74 /// \name Scalar TTI Implementations 75 /// @{ 76 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override; 77 78 /// @} 79 80 /// \name Vector TTI Implementations 81 /// @{ 82 83 unsigned getNumberOfRegisters(bool Vector) const override; 84 unsigned getRegisterBitWidth(bool Vector) const override; 85 unsigned getMaxInterleaveFactor() const override; 86 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind, 87 OperandValueKind, OperandValueProperties, 88 OperandValueProperties) const override; 89 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, 90 int Index, Type *SubTp) const override; 91 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 92 Type *Src) const override; 93 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 94 Type *CondTy) const override; 95 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 96 unsigned Index) const override; 97 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 98 unsigned AddressSpace) const override; 99 100 unsigned getAddressComputationCost(Type *PtrTy, 101 bool IsComplex) const override; 102 103 unsigned getReductionCost(unsigned Opcode, Type *Ty, 104 bool IsPairwiseForm) const override; 105 106 unsigned getIntImmCost(int64_t) const; 107 108 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override; 109 110 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 111 Type *Ty) const override; 112 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 113 Type *Ty) const override; 114 115 /// @} 116 }; 117 118 } // end anonymous namespace 119 120 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti", 121 "X86 Target Transform Info", true, true, false) 122 char X86TTI::ID = 0; 123 124 ImmutablePass * 125 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) { 126 return new X86TTI(TM); 127 } 128 129 130 //===----------------------------------------------------------------------===// 131 // 132 // X86 cost model. 133 // 134 //===----------------------------------------------------------------------===// 135 136 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const { 137 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 138 // TODO: Currently the __builtin_popcount() implementation using SSE3 139 // instructions is inefficient. Once the problem is fixed, we should 140 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 141 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software; 142 } 143 144 unsigned X86TTI::getNumberOfRegisters(bool Vector) const { 145 if (Vector && !ST->hasSSE1()) 146 return 0; 147 148 if (ST->is64Bit()) { 149 if (Vector && ST->hasAVX512()) 150 return 32; 151 return 16; 152 } 153 return 8; 154 } 155 156 unsigned X86TTI::getRegisterBitWidth(bool Vector) const { 157 if (Vector) { 158 if (ST->hasAVX512()) return 512; 159 if (ST->hasAVX()) return 256; 160 if (ST->hasSSE1()) return 128; 161 return 0; 162 } 163 164 if (ST->is64Bit()) 165 return 64; 166 return 32; 167 168 } 169 170 unsigned X86TTI::getMaxInterleaveFactor() const { 171 if (ST->isAtom()) 172 return 1; 173 174 // Sandybridge and Haswell have multiple execution ports and pipelined 175 // vector units. 176 if (ST->hasAVX()) 177 return 4; 178 179 return 2; 180 } 181 182 unsigned X86TTI::getArithmeticInstrCost( 183 unsigned Opcode, Type *Ty, OperandValueKind Op1Info, 184 OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo, 185 OperandValueProperties Opd2PropInfo) const { 186 // Legalize the type. 187 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty); 188 189 int ISD = TLI->InstructionOpcodeToISD(Opcode); 190 assert(ISD && "Invalid opcode"); 191 192 if (ISD == ISD::SDIV && 193 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 194 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 195 // On X86, vector signed division by constants power-of-two are 196 // normally expanded to the sequence SRA + SRL + ADD + SRA. 197 // The OperandValue properties many not be same as that of previous 198 // operation;conservatively assume OP_None. 199 unsigned Cost = 200 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info, 201 TargetTransformInfo::OP_None, 202 TargetTransformInfo::OP_None); 203 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 204 TargetTransformInfo::OP_None, 205 TargetTransformInfo::OP_None); 206 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 207 TargetTransformInfo::OP_None, 208 TargetTransformInfo::OP_None); 209 210 return Cost; 211 } 212 213 static const CostTblEntry<MVT::SimpleValueType> 214 AVX2UniformConstCostTable[] = { 215 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 216 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 217 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 218 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 219 }; 220 221 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 222 ST->hasAVX2()) { 223 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second); 224 if (Idx != -1) 225 return LT.first * AVX2UniformConstCostTable[Idx].Cost; 226 } 227 228 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = { 229 { ISD::SHL, MVT::v16i32, 1 }, 230 { ISD::SRL, MVT::v16i32, 1 }, 231 { ISD::SRA, MVT::v16i32, 1 }, 232 { ISD::SHL, MVT::v8i64, 1 }, 233 { ISD::SRL, MVT::v8i64, 1 }, 234 { ISD::SRA, MVT::v8i64, 1 }, 235 }; 236 237 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 238 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 239 // customize them to detect the cases where shift amount is a scalar one. 240 { ISD::SHL, MVT::v4i32, 1 }, 241 { ISD::SRL, MVT::v4i32, 1 }, 242 { ISD::SRA, MVT::v4i32, 1 }, 243 { ISD::SHL, MVT::v8i32, 1 }, 244 { ISD::SRL, MVT::v8i32, 1 }, 245 { ISD::SRA, MVT::v8i32, 1 }, 246 { ISD::SHL, MVT::v2i64, 1 }, 247 { ISD::SRL, MVT::v2i64, 1 }, 248 { ISD::SHL, MVT::v4i64, 1 }, 249 { ISD::SRL, MVT::v4i64, 1 }, 250 251 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence. 252 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized. 253 254 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized. 255 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized. 256 257 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized. 258 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized. 259 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized. 260 261 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 262 { ISD::SDIV, MVT::v32i8, 32*20 }, 263 { ISD::SDIV, MVT::v16i16, 16*20 }, 264 { ISD::SDIV, MVT::v8i32, 8*20 }, 265 { ISD::SDIV, MVT::v4i64, 4*20 }, 266 { ISD::UDIV, MVT::v32i8, 32*20 }, 267 { ISD::UDIV, MVT::v16i16, 16*20 }, 268 { ISD::UDIV, MVT::v8i32, 8*20 }, 269 { ISD::UDIV, MVT::v4i64, 4*20 }, 270 }; 271 272 if (ST->hasAVX512()) { 273 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second); 274 if (Idx != -1) 275 return LT.first * AVX512CostTable[Idx].Cost; 276 } 277 // Look for AVX2 lowering tricks. 278 if (ST->hasAVX2()) { 279 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 280 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 281 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 282 // On AVX2, a packed v16i16 shift left by a constant build_vector 283 // is lowered into a vector multiply (vpmullw). 284 return LT.first; 285 286 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 287 if (Idx != -1) 288 return LT.first * AVX2CostTable[Idx].Cost; 289 } 290 291 static const CostTblEntry<MVT::SimpleValueType> 292 SSE2UniformConstCostTable[] = { 293 // We don't correctly identify costs of casts because they are marked as 294 // custom. 295 // Constant splats are cheaper for the following instructions. 296 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 297 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 298 { ISD::SHL, MVT::v4i32, 1 }, // pslld 299 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 300 301 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 302 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 303 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 304 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 305 306 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 307 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 308 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 309 310 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 311 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 312 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 313 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 314 }; 315 316 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 317 ST->hasSSE2()) { 318 // pmuldq sequence. 319 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 320 return LT.first * 15; 321 322 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 323 if (Idx != -1) 324 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 325 } 326 327 if (ISD == ISD::SHL && 328 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 329 EVT VT = LT.second; 330 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 331 (VT == MVT::v4i32 && ST->hasSSE41())) 332 // Vector shift left by non uniform constant can be lowered 333 // into vector multiply (pmullw/pmulld). 334 return LT.first; 335 if (VT == MVT::v4i32 && ST->hasSSE2()) 336 // A vector shift left by non uniform constant is converted 337 // into a vector multiply; the new multiply is eventually 338 // lowered into a sequence of shuffles and 2 x pmuludq. 339 ISD = ISD::MUL; 340 } 341 342 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 343 // We don't correctly identify costs of casts because they are marked as 344 // custom. 345 // For some cases, where the shift amount is a scalar we would be able 346 // to generate better code. Unfortunately, when this is the case the value 347 // (the splat) will get hoisted out of the loop, thereby making it invisible 348 // to ISel. The cost model must return worst case assumptions because it is 349 // used for vectorization and we don't want to make vectorized code worse 350 // than scalar code. 351 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence. 352 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized. 353 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 354 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized. 355 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized. 356 357 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized. 358 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized. 359 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized. 360 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized. 361 362 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized. 363 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized. 364 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized. 365 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized. 366 367 // It is not a good idea to vectorize division. We have to scalarize it and 368 // in the process we will often end up having to spilling regular 369 // registers. The overhead of division is going to dominate most kernels 370 // anyways so try hard to prevent vectorization of division - it is 371 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 372 // to hide "20 cycles" for each lane. 373 { ISD::SDIV, MVT::v16i8, 16*20 }, 374 { ISD::SDIV, MVT::v8i16, 8*20 }, 375 { ISD::SDIV, MVT::v4i32, 4*20 }, 376 { ISD::SDIV, MVT::v2i64, 2*20 }, 377 { ISD::UDIV, MVT::v16i8, 16*20 }, 378 { ISD::UDIV, MVT::v8i16, 8*20 }, 379 { ISD::UDIV, MVT::v4i32, 4*20 }, 380 { ISD::UDIV, MVT::v2i64, 2*20 }, 381 }; 382 383 if (ST->hasSSE2()) { 384 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 385 if (Idx != -1) 386 return LT.first * SSE2CostTable[Idx].Cost; 387 } 388 389 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 390 // We don't have to scalarize unsupported ops. We can issue two half-sized 391 // operations and we only need to extract the upper YMM half. 392 // Two ops + 1 extract + 1 insert = 4. 393 { ISD::MUL, MVT::v16i16, 4 }, 394 { ISD::MUL, MVT::v8i32, 4 }, 395 { ISD::SUB, MVT::v8i32, 4 }, 396 { ISD::ADD, MVT::v8i32, 4 }, 397 { ISD::SUB, MVT::v4i64, 4 }, 398 { ISD::ADD, MVT::v4i64, 4 }, 399 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 400 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 401 // Because we believe v4i64 to be a legal type, we must also include the 402 // split factor of two in the cost table. Therefore, the cost here is 18 403 // instead of 9. 404 { ISD::MUL, MVT::v4i64, 18 }, 405 }; 406 407 // Look for AVX1 lowering tricks. 408 if (ST->hasAVX() && !ST->hasAVX2()) { 409 EVT VT = LT.second; 410 411 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 412 // sequence of extract + two vector multiply + insert. 413 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) && 414 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) 415 ISD = ISD::MUL; 416 417 int Idx = CostTableLookup(AVX1CostTable, ISD, VT); 418 if (Idx != -1) 419 return LT.first * AVX1CostTable[Idx].Cost; 420 } 421 422 // Custom lowering of vectors. 423 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 424 // A v2i64/v4i64 and multiply is custom lowered as a series of long 425 // multiplies(3), shifts(4) and adds(2). 426 { ISD::MUL, MVT::v2i64, 9 }, 427 { ISD::MUL, MVT::v4i64, 9 }, 428 }; 429 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 430 if (Idx != -1) 431 return LT.first * CustomLowered[Idx].Cost; 432 433 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 434 // 2x pmuludq, 2x shuffle. 435 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 436 !ST->hasSSE41()) 437 return LT.first * 6; 438 439 // Fallback to the default implementation. 440 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info, 441 Op2Info); 442 } 443 444 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index, 445 Type *SubTp) const { 446 // We only estimate the cost of reverse and alternate shuffles. 447 if (Kind != SK_Reverse && Kind != SK_Alternate) 448 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 449 450 if (Kind == SK_Reverse) { 451 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 452 unsigned Cost = 1; 453 if (LT.second.getSizeInBits() > 128) 454 Cost = 3; // Extract + insert + copy. 455 456 // Multiple by the number of parts. 457 return Cost * LT.first; 458 } 459 460 if (Kind == SK_Alternate) { 461 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 462 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 463 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp); 464 465 // The backend knows how to generate a single VEX.256 version of 466 // instruction VPBLENDW if the target supports AVX2. 467 if (ST->hasAVX2() && LT.second == MVT::v16i16) 468 return LT.first; 469 470 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = { 471 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd 472 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd 473 474 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps 475 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps 476 477 // This shuffle is custom lowered into a sequence of: 478 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128 479 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5}, 480 481 // This shuffle is custom lowered into a long sequence of: 482 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128 483 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9} 484 }; 485 486 if (ST->hasAVX()) { 487 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 488 if (Idx != -1) 489 return LT.first * AVXAltShuffleTbl[Idx].Cost; 490 } 491 492 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = { 493 // These are lowered into movsd. 494 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 495 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 496 497 // packed float vectors with four elements are lowered into BLENDI dag 498 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'. 499 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 500 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 501 502 // This shuffle generates a single pshufw. 503 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 504 505 // There is no instruction that matches a v16i8 alternate shuffle. 506 // The backend will expand it into the sequence 'pshufb + pshufb + or'. 507 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} 508 }; 509 510 if (ST->hasSSE41()) { 511 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 512 if (Idx != -1) 513 return LT.first * SSE41AltShuffleTbl[Idx].Cost; 514 } 515 516 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = { 517 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 518 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 519 520 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into 521 // the sequence 'shufps + pshufd' 522 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 523 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 524 525 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or 526 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or 527 }; 528 529 if (ST->hasSSSE3()) { 530 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 531 if (Idx != -1) 532 return LT.first * SSSE3AltShuffleTbl[Idx].Cost; 533 } 534 535 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = { 536 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 537 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 538 539 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd 540 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd 541 542 // This is expanded into a long sequence of four extract + four insert. 543 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw. 544 545 // 8 x (pinsrw + pextrw + and + movb + movzb + or) 546 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48} 547 }; 548 549 // Fall-back (SSE3 and SSE2). 550 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 551 if (Idx != -1) 552 return LT.first * SSEAltShuffleTbl[Idx].Cost; 553 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 554 } 555 556 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp); 557 } 558 559 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const { 560 int ISD = TLI->InstructionOpcodeToISD(Opcode); 561 assert(ISD && "Invalid opcode"); 562 563 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src); 564 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst); 565 566 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 567 SSE2ConvTbl[] = { 568 // These are somewhat magic numbers justified by looking at the output of 569 // Intel's IACA, running some kernels and making sure when we take 570 // legalization into account the throughput will be overestimated. 571 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 572 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 573 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 574 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 575 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 576 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 577 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 578 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 579 // There are faster sequences for float conversions. 580 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 581 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 582 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 583 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 584 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 585 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 586 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 587 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 588 }; 589 590 if (ST->hasSSE2() && !ST->hasAVX()) { 591 int Idx = 592 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 593 if (Idx != -1) 594 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 595 } 596 597 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 598 AVX512ConversionTbl[] = { 599 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 600 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 601 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 602 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 }, 603 604 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 605 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 606 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 607 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 608 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 }, 609 610 // v16i1 -> v16i32 - load + broadcast 611 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 612 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 613 614 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 615 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 616 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 617 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 618 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 619 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 620 621 }; 622 623 if (ST->hasAVX512()) { 624 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second, 625 LTSrc.second); 626 if (Idx != -1) 627 return AVX512ConversionTbl[Idx].Cost; 628 } 629 EVT SrcTy = TLI->getValueType(Src); 630 EVT DstTy = TLI->getValueType(Dst); 631 632 // The function getSimpleVT only handles simple value types. 633 if (!SrcTy.isSimple() || !DstTy.isSimple()) 634 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 635 636 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 637 AVX2ConversionTbl[] = { 638 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 639 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 640 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 641 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 642 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 643 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 644 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 645 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 646 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 647 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 648 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 649 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 650 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 651 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 652 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 653 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 654 655 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 656 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 657 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 658 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 659 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 660 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 661 662 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 663 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 664 }; 665 666 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 667 AVXConversionTbl[] = { 668 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 669 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 670 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 671 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 672 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 673 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 674 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 675 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 676 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 677 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 678 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 679 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 680 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 681 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 682 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 683 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 684 685 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 686 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 687 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 688 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 689 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 690 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 691 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 692 693 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 694 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 695 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 696 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 697 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 698 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 699 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 700 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 701 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 702 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 703 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 704 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 705 706 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 707 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 708 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 709 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 710 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 711 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 712 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 713 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 714 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 715 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 716 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 717 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 718 // The generic code to compute the scalar overhead is currently broken. 719 // Workaround this limitation by estimating the scalarization overhead 720 // here. We have roughly 10 instructions per scalar element. 721 // Multiply that by the vector width. 722 // FIXME: remove that when PR19268 is fixed. 723 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 724 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 }, 725 726 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 727 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 728 // This node is expanded into scalarized operations but BasicTTI is overly 729 // optimistic estimating its cost. It computes 3 per element (one 730 // vector-extract, one scalar conversion and one vector-insert). The 731 // problem is that the inserts form a read-modify-write chain so latency 732 // should be factored in too. Inflating the cost per element by 1. 733 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 734 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 735 }; 736 737 if (ST->hasAVX2()) { 738 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 739 DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 740 if (Idx != -1) 741 return AVX2ConversionTbl[Idx].Cost; 742 } 743 744 if (ST->hasAVX()) { 745 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 746 SrcTy.getSimpleVT()); 747 if (Idx != -1) 748 return AVXConversionTbl[Idx].Cost; 749 } 750 751 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src); 752 } 753 754 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 755 Type *CondTy) const { 756 // Legalize the type. 757 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 758 759 MVT MTy = LT.second; 760 761 int ISD = TLI->InstructionOpcodeToISD(Opcode); 762 assert(ISD && "Invalid opcode"); 763 764 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 765 { ISD::SETCC, MVT::v2f64, 1 }, 766 { ISD::SETCC, MVT::v4f32, 1 }, 767 { ISD::SETCC, MVT::v2i64, 1 }, 768 { ISD::SETCC, MVT::v4i32, 1 }, 769 { ISD::SETCC, MVT::v8i16, 1 }, 770 { ISD::SETCC, MVT::v16i8, 1 }, 771 }; 772 773 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 774 { ISD::SETCC, MVT::v4f64, 1 }, 775 { ISD::SETCC, MVT::v8f32, 1 }, 776 // AVX1 does not support 8-wide integer compare. 777 { ISD::SETCC, MVT::v4i64, 4 }, 778 { ISD::SETCC, MVT::v8i32, 4 }, 779 { ISD::SETCC, MVT::v16i16, 4 }, 780 { ISD::SETCC, MVT::v32i8, 4 }, 781 }; 782 783 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 784 { ISD::SETCC, MVT::v4i64, 1 }, 785 { ISD::SETCC, MVT::v8i32, 1 }, 786 { ISD::SETCC, MVT::v16i16, 1 }, 787 { ISD::SETCC, MVT::v32i8, 1 }, 788 }; 789 790 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = { 791 { ISD::SETCC, MVT::v8i64, 1 }, 792 { ISD::SETCC, MVT::v16i32, 1 }, 793 { ISD::SETCC, MVT::v8f64, 1 }, 794 { ISD::SETCC, MVT::v16f32, 1 }, 795 }; 796 797 if (ST->hasAVX512()) { 798 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy); 799 if (Idx != -1) 800 return LT.first * AVX512CostTbl[Idx].Cost; 801 } 802 803 if (ST->hasAVX2()) { 804 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 805 if (Idx != -1) 806 return LT.first * AVX2CostTbl[Idx].Cost; 807 } 808 809 if (ST->hasAVX()) { 810 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 811 if (Idx != -1) 812 return LT.first * AVX1CostTbl[Idx].Cost; 813 } 814 815 if (ST->hasSSE42()) { 816 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 817 if (Idx != -1) 818 return LT.first * SSE42CostTbl[Idx].Cost; 819 } 820 821 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy); 822 } 823 824 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val, 825 unsigned Index) const { 826 assert(Val->isVectorTy() && "This must be a vector type"); 827 828 if (Index != -1U) { 829 // Legalize the type. 830 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val); 831 832 // This type is legalized to a scalar type. 833 if (!LT.second.isVector()) 834 return 0; 835 836 // The type may be split. Normalize the index to the new type. 837 unsigned Width = LT.second.getVectorNumElements(); 838 Index = Index % Width; 839 840 // Floating point scalars are already located in index #0. 841 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 842 return 0; 843 } 844 845 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index); 846 } 847 848 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert, 849 bool Extract) const { 850 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 851 unsigned Cost = 0; 852 853 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 854 if (Insert) 855 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i); 856 if (Extract) 857 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i); 858 } 859 860 return Cost; 861 } 862 863 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 864 unsigned AddressSpace) const { 865 // Handle non-power-of-two vectors such as <3 x float> 866 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 867 unsigned NumElem = VTy->getVectorNumElements(); 868 869 // Handle a few common cases: 870 // <3 x float> 871 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 872 // Cost = 64 bit store + extract + 32 bit store. 873 return 3; 874 875 // <3 x double> 876 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 877 // Cost = 128 bit store + unpack + 64 bit store. 878 return 3; 879 880 // Assume that all other non-power-of-two numbers are scalarized. 881 if (!isPowerOf2_32(NumElem)) { 882 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode, 883 VTy->getScalarType(), 884 Alignment, 885 AddressSpace); 886 unsigned SplitCost = getScalarizationOverhead(Src, 887 Opcode == Instruction::Load, 888 Opcode==Instruction::Store); 889 return NumElem * Cost + SplitCost; 890 } 891 } 892 893 // Legalize the type. 894 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src); 895 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 896 "Invalid Opcode"); 897 898 // Each load/store unit costs 1. 899 unsigned Cost = LT.first * 1; 900 901 // On Sandybridge 256bit load/stores are double pumped 902 // (but not on Haswell). 903 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 904 Cost*=2; 905 906 return Cost; 907 } 908 909 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const { 910 // Address computations in vectorized code with non-consecutive addresses will 911 // likely result in more instructions compared to scalar code where the 912 // computation can more often be merged into the index mode. The resulting 913 // extra micro-ops can significantly decrease throughput. 914 unsigned NumVectorInstToHideOverhead = 10; 915 916 if (Ty->isVectorTy() && IsComplex) 917 return NumVectorInstToHideOverhead; 918 919 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex); 920 } 921 922 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy, 923 bool IsPairwise) const { 924 925 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy); 926 927 MVT MTy = LT.second; 928 929 int ISD = TLI->InstructionOpcodeToISD(Opcode); 930 assert(ISD && "Invalid opcode"); 931 932 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 933 // and make it as the cost. 934 935 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = { 936 { ISD::FADD, MVT::v2f64, 2 }, 937 { ISD::FADD, MVT::v4f32, 4 }, 938 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 939 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 940 { ISD::ADD, MVT::v8i16, 5 }, 941 }; 942 943 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = { 944 { ISD::FADD, MVT::v4f32, 4 }, 945 { ISD::FADD, MVT::v4f64, 5 }, 946 { ISD::FADD, MVT::v8f32, 7 }, 947 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 948 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 949 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 950 { ISD::ADD, MVT::v8i16, 5 }, 951 { ISD::ADD, MVT::v8i32, 5 }, 952 }; 953 954 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = { 955 { ISD::FADD, MVT::v2f64, 2 }, 956 { ISD::FADD, MVT::v4f32, 4 }, 957 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 958 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 959 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 960 }; 961 962 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = { 963 { ISD::FADD, MVT::v4f32, 3 }, 964 { ISD::FADD, MVT::v4f64, 3 }, 965 { ISD::FADD, MVT::v8f32, 4 }, 966 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 967 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 968 { ISD::ADD, MVT::v4i64, 3 }, 969 { ISD::ADD, MVT::v8i16, 4 }, 970 { ISD::ADD, MVT::v8i32, 5 }, 971 }; 972 973 if (IsPairwise) { 974 if (ST->hasAVX()) { 975 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy); 976 if (Idx != -1) 977 return LT.first * AVX1CostTblPairWise[Idx].Cost; 978 } 979 980 if (ST->hasSSE42()) { 981 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy); 982 if (Idx != -1) 983 return LT.first * SSE42CostTblPairWise[Idx].Cost; 984 } 985 } else { 986 if (ST->hasAVX()) { 987 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy); 988 if (Idx != -1) 989 return LT.first * AVX1CostTblNoPairWise[Idx].Cost; 990 } 991 992 if (ST->hasSSE42()) { 993 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy); 994 if (Idx != -1) 995 return LT.first * SSE42CostTblNoPairWise[Idx].Cost; 996 } 997 } 998 999 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise); 1000 } 1001 1002 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1003 /// method might only calculate a fraction of a larger immediate. Therefore it 1004 /// is valid to return a cost of ZERO. 1005 unsigned X86TTI::getIntImmCost(int64_t Val) const { 1006 if (Val == 0) 1007 return TCC_Free; 1008 1009 if (isInt<32>(Val)) 1010 return TCC_Basic; 1011 1012 return 2 * TCC_Basic; 1013 } 1014 1015 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const { 1016 assert(Ty->isIntegerTy()); 1017 1018 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1019 if (BitSize == 0) 1020 return ~0U; 1021 1022 // Never hoist constants larger than 128bit, because this might lead to 1023 // incorrect code generation or assertions in codegen. 1024 // Fixme: Create a cost model for types larger than i128 once the codegen 1025 // issues have been fixed. 1026 if (BitSize > 128) 1027 return TCC_Free; 1028 1029 if (Imm == 0) 1030 return TCC_Free; 1031 1032 // Sign-extend all constants to a multiple of 64-bit. 1033 APInt ImmVal = Imm; 1034 if (BitSize & 0x3f) 1035 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1036 1037 // Split the constant into 64-bit chunks and calculate the cost for each 1038 // chunk. 1039 unsigned Cost = 0; 1040 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1041 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1042 int64_t Val = Tmp.getSExtValue(); 1043 Cost += getIntImmCost(Val); 1044 } 1045 // We need at least one instruction to materialze the constant. 1046 return std::max(1U, Cost); 1047 } 1048 1049 unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1050 Type *Ty) const { 1051 assert(Ty->isIntegerTy()); 1052 1053 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1054 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1055 // here, so that constant hoisting will ignore this constant. 1056 if (BitSize == 0) 1057 return TCC_Free; 1058 1059 unsigned ImmIdx = ~0U; 1060 switch (Opcode) { 1061 default: return TCC_Free; 1062 case Instruction::GetElementPtr: 1063 // Always hoist the base address of a GetElementPtr. This prevents the 1064 // creation of new constants for every base constant that gets constant 1065 // folded with the offset. 1066 if (Idx == 0) 1067 return 2 * TCC_Basic; 1068 return TCC_Free; 1069 case Instruction::Store: 1070 ImmIdx = 0; 1071 break; 1072 case Instruction::Add: 1073 case Instruction::Sub: 1074 case Instruction::Mul: 1075 case Instruction::UDiv: 1076 case Instruction::SDiv: 1077 case Instruction::URem: 1078 case Instruction::SRem: 1079 case Instruction::And: 1080 case Instruction::Or: 1081 case Instruction::Xor: 1082 case Instruction::ICmp: 1083 ImmIdx = 1; 1084 break; 1085 // Always return TCC_Free for the shift value of a shift instruction. 1086 case Instruction::Shl: 1087 case Instruction::LShr: 1088 case Instruction::AShr: 1089 if (Idx == 1) 1090 return TCC_Free; 1091 break; 1092 case Instruction::Trunc: 1093 case Instruction::ZExt: 1094 case Instruction::SExt: 1095 case Instruction::IntToPtr: 1096 case Instruction::PtrToInt: 1097 case Instruction::BitCast: 1098 case Instruction::PHI: 1099 case Instruction::Call: 1100 case Instruction::Select: 1101 case Instruction::Ret: 1102 case Instruction::Load: 1103 break; 1104 } 1105 1106 if (Idx == ImmIdx) { 1107 unsigned NumConstants = (BitSize + 63) / 64; 1108 unsigned Cost = X86TTI::getIntImmCost(Imm, Ty); 1109 return (Cost <= NumConstants * TCC_Basic) 1110 ? static_cast<unsigned>(TCC_Free) 1111 : Cost; 1112 } 1113 1114 return X86TTI::getIntImmCost(Imm, Ty); 1115 } 1116 1117 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx, 1118 const APInt &Imm, Type *Ty) const { 1119 assert(Ty->isIntegerTy()); 1120 1121 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1122 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1123 // here, so that constant hoisting will ignore this constant. 1124 if (BitSize == 0) 1125 return TCC_Free; 1126 1127 switch (IID) { 1128 default: return TCC_Free; 1129 case Intrinsic::sadd_with_overflow: 1130 case Intrinsic::uadd_with_overflow: 1131 case Intrinsic::ssub_with_overflow: 1132 case Intrinsic::usub_with_overflow: 1133 case Intrinsic::smul_with_overflow: 1134 case Intrinsic::umul_with_overflow: 1135 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1136 return TCC_Free; 1137 break; 1138 case Intrinsic::experimental_stackmap: 1139 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1140 return TCC_Free; 1141 break; 1142 case Intrinsic::experimental_patchpoint_void: 1143 case Intrinsic::experimental_patchpoint_i64: 1144 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1145 return TCC_Free; 1146 break; 1147 } 1148 return X86TTI::getIntImmCost(Imm, Ty); 1149 } 1150