1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #include "X86TargetTransformInfo.h" 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/CodeGen/BasicTTIImpl.h" 20 #include "llvm/IR/IntrinsicInst.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Target/CostTable.h" 23 #include "llvm/Target/TargetLowering.h" 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "x86tti" 28 29 //===----------------------------------------------------------------------===// 30 // 31 // X86 cost model. 32 // 33 //===----------------------------------------------------------------------===// 34 35 TargetTransformInfo::PopcntSupportKind 36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 38 // TODO: Currently the __builtin_popcount() implementation using SSE3 39 // instructions is inefficient. Once the problem is fixed, we should 40 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 42 } 43 44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 45 if (Vector && !ST->hasSSE1()) 46 return 0; 47 48 if (ST->is64Bit()) { 49 if (Vector && ST->hasAVX512()) 50 return 32; 51 return 16; 52 } 53 return 8; 54 } 55 56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 57 if (Vector) { 58 if (ST->hasAVX512()) return 512; 59 if (ST->hasAVX()) return 256; 60 if (ST->hasSSE1()) return 128; 61 return 0; 62 } 63 64 if (ST->is64Bit()) 65 return 64; 66 67 return 32; 68 } 69 70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 71 // If the loop will not be vectorized, don't interleave the loop. 72 // Let regular unroll to unroll the loop, which saves the overflow 73 // check and memory check cost. 74 if (VF == 1) 75 return 1; 76 77 if (ST->isAtom()) 78 return 1; 79 80 // Sandybridge and Haswell have multiple execution ports and pipelined 81 // vector units. 82 if (ST->hasAVX()) 83 return 4; 84 85 return 2; 86 } 87 88 int X86TTIImpl::getArithmeticInstrCost( 89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 91 TTI::OperandValueProperties Opd2PropInfo) { 92 // Legalize the type. 93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 94 95 int ISD = TLI->InstructionOpcodeToISD(Opcode); 96 assert(ISD && "Invalid opcode"); 97 98 if (ISD == ISD::SDIV && 99 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 101 // On X86, vector signed division by constants power-of-two are 102 // normally expanded to the sequence SRA + SRL + ADD + SRA. 103 // The OperandValue properties many not be same as that of previous 104 // operation;conservatively assume OP_None. 105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 106 Op2Info, TargetTransformInfo::OP_None, 107 TargetTransformInfo::OP_None); 108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 109 TargetTransformInfo::OP_None, 110 TargetTransformInfo::OP_None); 111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 112 TargetTransformInfo::OP_None, 113 TargetTransformInfo::OP_None); 114 115 return Cost; 116 } 117 118 static const CostTblEntry<MVT::SimpleValueType> 119 AVX2UniformConstCostTable[] = { 120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 121 122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 126 }; 127 128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 129 ST->hasAVX2()) { 130 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second); 131 if (Idx != -1) 132 return LT.first * AVX2UniformConstCostTable[Idx].Cost; 133 } 134 135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = { 136 { ISD::SHL, MVT::v16i32, 1 }, 137 { ISD::SRL, MVT::v16i32, 1 }, 138 { ISD::SRA, MVT::v16i32, 1 }, 139 { ISD::SHL, MVT::v8i64, 1 }, 140 { ISD::SRL, MVT::v8i64, 1 }, 141 { ISD::SRA, MVT::v8i64, 1 }, 142 }; 143 144 if (ST->hasAVX512()) { 145 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second); 146 if (Idx != -1) 147 return LT.first * AVX512CostTable[Idx].Cost; 148 } 149 150 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 151 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 152 // customize them to detect the cases where shift amount is a scalar one. 153 { ISD::SHL, MVT::v4i32, 1 }, 154 { ISD::SRL, MVT::v4i32, 1 }, 155 { ISD::SRA, MVT::v4i32, 1 }, 156 { ISD::SHL, MVT::v8i32, 1 }, 157 { ISD::SRL, MVT::v8i32, 1 }, 158 { ISD::SRA, MVT::v8i32, 1 }, 159 { ISD::SHL, MVT::v2i64, 1 }, 160 { ISD::SRL, MVT::v2i64, 1 }, 161 { ISD::SHL, MVT::v4i64, 1 }, 162 { ISD::SRL, MVT::v4i64, 1 }, 163 }; 164 165 // Look for AVX2 lowering tricks. 166 if (ST->hasAVX2()) { 167 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 168 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 169 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 170 // On AVX2, a packed v16i16 shift left by a constant build_vector 171 // is lowered into a vector multiply (vpmullw). 172 return LT.first; 173 174 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 175 if (Idx != -1) 176 return LT.first * AVX2CostTable[Idx].Cost; 177 } 178 179 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = { 180 // 128bit shifts take 1cy, but right shifts require negation beforehand. 181 { ISD::SHL, MVT::v16i8, 1 }, 182 { ISD::SRL, MVT::v16i8, 2 }, 183 { ISD::SRA, MVT::v16i8, 2 }, 184 { ISD::SHL, MVT::v8i16, 1 }, 185 { ISD::SRL, MVT::v8i16, 2 }, 186 { ISD::SRA, MVT::v8i16, 2 }, 187 { ISD::SHL, MVT::v4i32, 1 }, 188 { ISD::SRL, MVT::v4i32, 2 }, 189 { ISD::SRA, MVT::v4i32, 2 }, 190 { ISD::SHL, MVT::v2i64, 1 }, 191 { ISD::SRL, MVT::v2i64, 2 }, 192 { ISD::SRA, MVT::v2i64, 2 }, 193 // 256bit shifts require splitting if AVX2 didn't catch them above. 194 { ISD::SHL, MVT::v32i8, 2 }, 195 { ISD::SRL, MVT::v32i8, 4 }, 196 { ISD::SRA, MVT::v32i8, 4 }, 197 { ISD::SHL, MVT::v16i16, 2 }, 198 { ISD::SRL, MVT::v16i16, 4 }, 199 { ISD::SRA, MVT::v16i16, 4 }, 200 { ISD::SHL, MVT::v8i32, 2 }, 201 { ISD::SRL, MVT::v8i32, 4 }, 202 { ISD::SRA, MVT::v8i32, 4 }, 203 { ISD::SHL, MVT::v4i64, 2 }, 204 { ISD::SRL, MVT::v4i64, 4 }, 205 { ISD::SRA, MVT::v4i64, 4 }, 206 }; 207 208 // Look for XOP lowering tricks. 209 if (ST->hasXOP()) { 210 int Idx = CostTableLookup(XOPCostTable, ISD, LT.second); 211 if (Idx != -1) 212 return LT.first * XOPCostTable[Idx].Cost; 213 } 214 215 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = { 216 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 217 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 218 219 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 220 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 221 222 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 223 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 224 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 225 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 226 227 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 228 { ISD::SDIV, MVT::v32i8, 32*20 }, 229 { ISD::SDIV, MVT::v16i16, 16*20 }, 230 { ISD::SDIV, MVT::v8i32, 8*20 }, 231 { ISD::SDIV, MVT::v4i64, 4*20 }, 232 { ISD::UDIV, MVT::v32i8, 32*20 }, 233 { ISD::UDIV, MVT::v16i16, 16*20 }, 234 { ISD::UDIV, MVT::v8i32, 8*20 }, 235 { ISD::UDIV, MVT::v4i64, 4*20 }, 236 }; 237 238 // Look for AVX2 lowering tricks for custom cases. 239 if (ST->hasAVX2()) { 240 int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second); 241 if (Idx != -1) 242 return LT.first * AVX2CustomCostTable[Idx].Cost; 243 } 244 245 static const CostTblEntry<MVT::SimpleValueType> 246 SSE2UniformConstCostTable[] = { 247 // We don't correctly identify costs of casts because they are marked as 248 // custom. 249 // Constant splats are cheaper for the following instructions. 250 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 251 { ISD::SHL, MVT::v32i8, 2 }, // psllw. 252 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 253 { ISD::SHL, MVT::v16i16, 2 }, // psllw. 254 { ISD::SHL, MVT::v4i32, 1 }, // pslld 255 { ISD::SHL, MVT::v8i32, 2 }, // pslld 256 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 257 { ISD::SHL, MVT::v4i64, 2 }, // psllq. 258 259 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 260 { ISD::SRL, MVT::v32i8, 2 }, // psrlw. 261 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 262 { ISD::SRL, MVT::v16i16, 2 }, // psrlw. 263 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 264 { ISD::SRL, MVT::v8i32, 2 }, // psrld. 265 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 266 { ISD::SRL, MVT::v4i64, 2 }, // psrlq. 267 268 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 269 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb. 270 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 271 { ISD::SRA, MVT::v16i16, 2 }, // psraw. 272 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 273 { ISD::SRA, MVT::v8i32, 2 }, // psrad. 274 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 275 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle. 276 277 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 278 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 279 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 280 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 281 }; 282 283 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 284 ST->hasSSE2()) { 285 // pmuldq sequence. 286 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 287 return LT.first * 15; 288 289 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 290 if (Idx != -1) 291 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 292 } 293 294 if (ISD == ISD::SHL && 295 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 296 EVT VT = LT.second; 297 // Vector shift left by non uniform constant can be lowered 298 // into vector multiply (pmullw/pmulld). 299 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 300 (VT == MVT::v4i32 && ST->hasSSE41())) 301 return LT.first; 302 303 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 304 // sequence of extract + two vector multiply + insert. 305 if ((VT == MVT::v8i32 || VT == MVT::v16i16) && 306 (ST->hasAVX() && !ST->hasAVX2())) 307 ISD = ISD::MUL; 308 309 // A vector shift left by non uniform constant is converted 310 // into a vector multiply; the new multiply is eventually 311 // lowered into a sequence of shuffles and 2 x pmuludq. 312 if (VT == MVT::v4i32 && ST->hasSSE2()) 313 ISD = ISD::MUL; 314 } 315 316 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 317 // We don't correctly identify costs of casts because they are marked as 318 // custom. 319 // For some cases, where the shift amount is a scalar we would be able 320 // to generate better code. Unfortunately, when this is the case the value 321 // (the splat) will get hoisted out of the loop, thereby making it invisible 322 // to ISel. The cost model must return worst case assumptions because it is 323 // used for vectorization and we don't want to make vectorized code worse 324 // than scalar code. 325 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 326 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence. 327 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 328 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence. 329 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 330 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul. 331 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 332 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 333 334 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 335 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence. 336 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 337 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence. 338 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 339 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend. 340 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 341 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 342 343 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 344 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence. 345 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 346 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence. 347 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 348 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend. 349 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 350 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. 351 352 // It is not a good idea to vectorize division. We have to scalarize it and 353 // in the process we will often end up having to spilling regular 354 // registers. The overhead of division is going to dominate most kernels 355 // anyways so try hard to prevent vectorization of division - it is 356 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 357 // to hide "20 cycles" for each lane. 358 { ISD::SDIV, MVT::v16i8, 16*20 }, 359 { ISD::SDIV, MVT::v8i16, 8*20 }, 360 { ISD::SDIV, MVT::v4i32, 4*20 }, 361 { ISD::SDIV, MVT::v2i64, 2*20 }, 362 { ISD::UDIV, MVT::v16i8, 16*20 }, 363 { ISD::UDIV, MVT::v8i16, 8*20 }, 364 { ISD::UDIV, MVT::v4i32, 4*20 }, 365 { ISD::UDIV, MVT::v2i64, 2*20 }, 366 }; 367 368 if (ST->hasSSE2()) { 369 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 370 if (Idx != -1) 371 return LT.first * SSE2CostTable[Idx].Cost; 372 } 373 374 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 375 // We don't have to scalarize unsupported ops. We can issue two half-sized 376 // operations and we only need to extract the upper YMM half. 377 // Two ops + 1 extract + 1 insert = 4. 378 { ISD::MUL, MVT::v16i16, 4 }, 379 { ISD::MUL, MVT::v8i32, 4 }, 380 { ISD::SUB, MVT::v8i32, 4 }, 381 { ISD::ADD, MVT::v8i32, 4 }, 382 { ISD::SUB, MVT::v4i64, 4 }, 383 { ISD::ADD, MVT::v4i64, 4 }, 384 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 385 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 386 // Because we believe v4i64 to be a legal type, we must also include the 387 // split factor of two in the cost table. Therefore, the cost here is 18 388 // instead of 9. 389 { ISD::MUL, MVT::v4i64, 18 }, 390 }; 391 392 // Look for AVX1 lowering tricks. 393 if (ST->hasAVX() && !ST->hasAVX2()) { 394 EVT VT = LT.second; 395 396 int Idx = CostTableLookup(AVX1CostTable, ISD, VT); 397 if (Idx != -1) 398 return LT.first * AVX1CostTable[Idx].Cost; 399 } 400 401 // Custom lowering of vectors. 402 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 403 // A v2i64/v4i64 and multiply is custom lowered as a series of long 404 // multiplies(3), shifts(4) and adds(2). 405 { ISD::MUL, MVT::v2i64, 9 }, 406 { ISD::MUL, MVT::v4i64, 9 }, 407 }; 408 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 409 if (Idx != -1) 410 return LT.first * CustomLowered[Idx].Cost; 411 412 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 413 // 2x pmuludq, 2x shuffle. 414 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 415 !ST->hasSSE41()) 416 return LT.first * 6; 417 418 // Fallback to the default implementation. 419 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 420 } 421 422 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 423 Type *SubTp) { 424 // We only estimate the cost of reverse and alternate shuffles. 425 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 426 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 427 428 if (Kind == TTI::SK_Reverse) { 429 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 430 int Cost = 1; 431 if (LT.second.getSizeInBits() > 128) 432 Cost = 3; // Extract + insert + copy. 433 434 // Multiple by the number of parts. 435 return Cost * LT.first; 436 } 437 438 if (Kind == TTI::SK_Alternate) { 439 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 440 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 441 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 442 443 // The backend knows how to generate a single VEX.256 version of 444 // instruction VPBLENDW if the target supports AVX2. 445 if (ST->hasAVX2() && LT.second == MVT::v16i16) 446 return LT.first; 447 448 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = { 449 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd 450 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd 451 452 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps 453 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps 454 455 // This shuffle is custom lowered into a sequence of: 456 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128 457 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5}, 458 459 // This shuffle is custom lowered into a long sequence of: 460 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128 461 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9} 462 }; 463 464 if (ST->hasAVX()) { 465 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 466 if (Idx != -1) 467 return LT.first * AVXAltShuffleTbl[Idx].Cost; 468 } 469 470 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = { 471 // These are lowered into movsd. 472 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 473 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 474 475 // packed float vectors with four elements are lowered into BLENDI dag 476 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'. 477 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 478 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 479 480 // This shuffle generates a single pshufw. 481 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 482 483 // There is no instruction that matches a v16i8 alternate shuffle. 484 // The backend will expand it into the sequence 'pshufb + pshufb + or'. 485 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} 486 }; 487 488 if (ST->hasSSE41()) { 489 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 490 if (Idx != -1) 491 return LT.first * SSE41AltShuffleTbl[Idx].Cost; 492 } 493 494 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = { 495 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 496 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 497 498 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into 499 // the sequence 'shufps + pshufd' 500 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 501 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 502 503 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or 504 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or 505 }; 506 507 if (ST->hasSSSE3()) { 508 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 509 if (Idx != -1) 510 return LT.first * SSSE3AltShuffleTbl[Idx].Cost; 511 } 512 513 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = { 514 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 515 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 516 517 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd 518 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd 519 520 // This is expanded into a long sequence of four extract + four insert. 521 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw. 522 523 // 8 x (pinsrw + pextrw + and + movb + movzb + or) 524 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48} 525 }; 526 527 // Fall-back (SSE3 and SSE2). 528 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 529 if (Idx != -1) 530 return LT.first * SSEAltShuffleTbl[Idx].Cost; 531 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 532 } 533 534 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 535 } 536 537 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 538 int ISD = TLI->InstructionOpcodeToISD(Opcode); 539 assert(ISD && "Invalid opcode"); 540 541 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 542 AVX512ConversionTbl[] = { 543 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 544 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 545 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 546 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 }, 547 548 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 549 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 550 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 551 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 552 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 }, 553 554 // v16i1 -> v16i32 - load + broadcast 555 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 556 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 557 558 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 559 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 560 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 561 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 562 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 563 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 564 565 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 566 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 567 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 568 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 569 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 570 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 571 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 572 }; 573 574 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 575 AVX2ConversionTbl[] = { 576 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 577 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 578 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 579 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 580 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 581 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 582 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 583 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 584 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 585 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 586 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 587 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 588 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 589 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 590 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 591 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 592 593 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 594 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 595 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 596 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 597 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 598 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 599 600 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 601 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 602 603 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 604 }; 605 606 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 607 AVXConversionTbl[] = { 608 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 609 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 610 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 611 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 612 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 613 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 614 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 615 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 616 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 617 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 618 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 619 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 620 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 621 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 622 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 623 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 624 625 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 626 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 627 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 628 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 629 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 630 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 631 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 632 633 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 634 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 635 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 636 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 637 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 638 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 639 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 640 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 641 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 642 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 643 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 644 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 645 646 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 647 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 648 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 649 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 650 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 651 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 652 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 653 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 654 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 655 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 656 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 657 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 658 // The generic code to compute the scalar overhead is currently broken. 659 // Workaround this limitation by estimating the scalarization overhead 660 // here. We have roughly 10 instructions per scalar element. 661 // Multiply that by the vector width. 662 // FIXME: remove that when PR19268 is fixed. 663 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 664 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 }, 665 666 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 667 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 668 // This node is expanded into scalarized operations but BasicTTI is overly 669 // optimistic estimating its cost. It computes 3 per element (one 670 // vector-extract, one scalar conversion and one vector-insert). The 671 // problem is that the inserts form a read-modify-write chain so latency 672 // should be factored in too. Inflating the cost per element by 1. 673 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 674 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 675 }; 676 677 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 678 SSE2ConvTbl[] = { 679 // These are somewhat magic numbers justified by looking at the output of 680 // Intel's IACA, running some kernels and making sure when we take 681 // legalization into account the throughput will be overestimated. 682 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 683 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 684 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 685 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 686 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 687 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 688 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 689 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 690 // There are faster sequences for float conversions. 691 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 692 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 693 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 694 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 695 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 696 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 697 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 698 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 699 }; 700 701 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 702 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 703 704 if (ST->hasSSE2() && !ST->hasAVX()) { 705 int Idx = 706 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 707 if (Idx != -1) 708 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 709 } 710 711 if (ST->hasAVX512()) { 712 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second, 713 LTSrc.second); 714 if (Idx != -1) 715 return AVX512ConversionTbl[Idx].Cost; 716 } 717 718 EVT SrcTy = TLI->getValueType(DL, Src); 719 EVT DstTy = TLI->getValueType(DL, Dst); 720 721 // The function getSimpleVT only handles simple value types. 722 if (!SrcTy.isSimple() || !DstTy.isSimple()) 723 return BaseT::getCastInstrCost(Opcode, Dst, Src); 724 725 if (ST->hasAVX2()) { 726 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 727 DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 728 if (Idx != -1) 729 return AVX2ConversionTbl[Idx].Cost; 730 } 731 732 if (ST->hasAVX()) { 733 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 734 SrcTy.getSimpleVT()); 735 if (Idx != -1) 736 return AVXConversionTbl[Idx].Cost; 737 } 738 739 return BaseT::getCastInstrCost(Opcode, Dst, Src); 740 } 741 742 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 743 // Legalize the type. 744 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 745 746 MVT MTy = LT.second; 747 748 int ISD = TLI->InstructionOpcodeToISD(Opcode); 749 assert(ISD && "Invalid opcode"); 750 751 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 752 { ISD::SETCC, MVT::v2f64, 1 }, 753 { ISD::SETCC, MVT::v4f32, 1 }, 754 { ISD::SETCC, MVT::v2i64, 1 }, 755 { ISD::SETCC, MVT::v4i32, 1 }, 756 { ISD::SETCC, MVT::v8i16, 1 }, 757 { ISD::SETCC, MVT::v16i8, 1 }, 758 }; 759 760 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 761 { ISD::SETCC, MVT::v4f64, 1 }, 762 { ISD::SETCC, MVT::v8f32, 1 }, 763 // AVX1 does not support 8-wide integer compare. 764 { ISD::SETCC, MVT::v4i64, 4 }, 765 { ISD::SETCC, MVT::v8i32, 4 }, 766 { ISD::SETCC, MVT::v16i16, 4 }, 767 { ISD::SETCC, MVT::v32i8, 4 }, 768 }; 769 770 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 771 { ISD::SETCC, MVT::v4i64, 1 }, 772 { ISD::SETCC, MVT::v8i32, 1 }, 773 { ISD::SETCC, MVT::v16i16, 1 }, 774 { ISD::SETCC, MVT::v32i8, 1 }, 775 }; 776 777 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = { 778 { ISD::SETCC, MVT::v8i64, 1 }, 779 { ISD::SETCC, MVT::v16i32, 1 }, 780 { ISD::SETCC, MVT::v8f64, 1 }, 781 { ISD::SETCC, MVT::v16f32, 1 }, 782 }; 783 784 if (ST->hasAVX512()) { 785 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy); 786 if (Idx != -1) 787 return LT.first * AVX512CostTbl[Idx].Cost; 788 } 789 790 if (ST->hasAVX2()) { 791 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 792 if (Idx != -1) 793 return LT.first * AVX2CostTbl[Idx].Cost; 794 } 795 796 if (ST->hasAVX()) { 797 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 798 if (Idx != -1) 799 return LT.first * AVX1CostTbl[Idx].Cost; 800 } 801 802 if (ST->hasSSE42()) { 803 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 804 if (Idx != -1) 805 return LT.first * SSE42CostTbl[Idx].Cost; 806 } 807 808 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 809 } 810 811 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 812 assert(Val->isVectorTy() && "This must be a vector type"); 813 814 if (Index != -1U) { 815 // Legalize the type. 816 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 817 818 // This type is legalized to a scalar type. 819 if (!LT.second.isVector()) 820 return 0; 821 822 // The type may be split. Normalize the index to the new type. 823 unsigned Width = LT.second.getVectorNumElements(); 824 Index = Index % Width; 825 826 // Floating point scalars are already located in index #0. 827 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 828 return 0; 829 } 830 831 return BaseT::getVectorInstrCost(Opcode, Val, Index); 832 } 833 834 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 835 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 836 int Cost = 0; 837 838 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 839 if (Insert) 840 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); 841 if (Extract) 842 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); 843 } 844 845 return Cost; 846 } 847 848 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 849 unsigned AddressSpace) { 850 // Handle non-power-of-two vectors such as <3 x float> 851 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 852 unsigned NumElem = VTy->getVectorNumElements(); 853 854 // Handle a few common cases: 855 // <3 x float> 856 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 857 // Cost = 64 bit store + extract + 32 bit store. 858 return 3; 859 860 // <3 x double> 861 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 862 // Cost = 128 bit store + unpack + 64 bit store. 863 return 3; 864 865 // Assume that all other non-power-of-two numbers are scalarized. 866 if (!isPowerOf2_32(NumElem)) { 867 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 868 AddressSpace); 869 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 870 Opcode == Instruction::Store); 871 return NumElem * Cost + SplitCost; 872 } 873 } 874 875 // Legalize the type. 876 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 877 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 878 "Invalid Opcode"); 879 880 // Each load/store unit costs 1. 881 int Cost = LT.first * 1; 882 883 // On Sandybridge 256bit load/stores are double pumped 884 // (but not on Haswell). 885 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 886 Cost*=2; 887 888 return Cost; 889 } 890 891 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 892 unsigned Alignment, 893 unsigned AddressSpace) { 894 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 895 if (!SrcVTy) 896 // To calculate scalar take the regular cost, without mask 897 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 898 899 unsigned NumElem = SrcVTy->getVectorNumElements(); 900 VectorType *MaskTy = 901 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem); 902 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 903 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 904 !isPowerOf2_32(NumElem)) { 905 // Scalarization 906 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 907 int ScalarCompareCost = getCmpSelInstrCost( 908 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr); 909 int BranchCost = getCFInstrCost(Instruction::Br); 910 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 911 912 int ValueSplitCost = getScalarizationOverhead( 913 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 914 int MemopCost = 915 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 916 Alignment, AddressSpace); 917 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 918 } 919 920 // Legalize the type. 921 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 922 int Cost = 0; 923 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() && 924 LT.second.getVectorNumElements() == NumElem) 925 // Promotion requires expand/truncate for data and a shuffle for mask. 926 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 927 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 928 929 else if (LT.second.getVectorNumElements() > NumElem) { 930 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 931 LT.second.getVectorNumElements()); 932 // Expanding requires fill mask with zeroes 933 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 934 } 935 if (!ST->hasAVX512()) 936 return Cost + LT.first*4; // Each maskmov costs 4 937 938 // AVX-512 masked load/store is cheapper 939 return Cost+LT.first; 940 } 941 942 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 943 // Address computations in vectorized code with non-consecutive addresses will 944 // likely result in more instructions compared to scalar code where the 945 // computation can more often be merged into the index mode. The resulting 946 // extra micro-ops can significantly decrease throughput. 947 unsigned NumVectorInstToHideOverhead = 10; 948 949 if (Ty->isVectorTy() && IsComplex) 950 return NumVectorInstToHideOverhead; 951 952 return BaseT::getAddressComputationCost(Ty, IsComplex); 953 } 954 955 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 956 bool IsPairwise) { 957 958 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 959 960 MVT MTy = LT.second; 961 962 int ISD = TLI->InstructionOpcodeToISD(Opcode); 963 assert(ISD && "Invalid opcode"); 964 965 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 966 // and make it as the cost. 967 968 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = { 969 { ISD::FADD, MVT::v2f64, 2 }, 970 { ISD::FADD, MVT::v4f32, 4 }, 971 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 972 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 973 { ISD::ADD, MVT::v8i16, 5 }, 974 }; 975 976 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = { 977 { ISD::FADD, MVT::v4f32, 4 }, 978 { ISD::FADD, MVT::v4f64, 5 }, 979 { ISD::FADD, MVT::v8f32, 7 }, 980 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 981 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 982 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 983 { ISD::ADD, MVT::v8i16, 5 }, 984 { ISD::ADD, MVT::v8i32, 5 }, 985 }; 986 987 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = { 988 { ISD::FADD, MVT::v2f64, 2 }, 989 { ISD::FADD, MVT::v4f32, 4 }, 990 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 991 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 992 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 993 }; 994 995 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = { 996 { ISD::FADD, MVT::v4f32, 3 }, 997 { ISD::FADD, MVT::v4f64, 3 }, 998 { ISD::FADD, MVT::v8f32, 4 }, 999 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1000 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 1001 { ISD::ADD, MVT::v4i64, 3 }, 1002 { ISD::ADD, MVT::v8i16, 4 }, 1003 { ISD::ADD, MVT::v8i32, 5 }, 1004 }; 1005 1006 if (IsPairwise) { 1007 if (ST->hasAVX()) { 1008 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy); 1009 if (Idx != -1) 1010 return LT.first * AVX1CostTblPairWise[Idx].Cost; 1011 } 1012 1013 if (ST->hasSSE42()) { 1014 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy); 1015 if (Idx != -1) 1016 return LT.first * SSE42CostTblPairWise[Idx].Cost; 1017 } 1018 } else { 1019 if (ST->hasAVX()) { 1020 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy); 1021 if (Idx != -1) 1022 return LT.first * AVX1CostTblNoPairWise[Idx].Cost; 1023 } 1024 1025 if (ST->hasSSE42()) { 1026 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy); 1027 if (Idx != -1) 1028 return LT.first * SSE42CostTblNoPairWise[Idx].Cost; 1029 } 1030 } 1031 1032 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1033 } 1034 1035 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1036 /// method might only calculate a fraction of a larger immediate. Therefore it 1037 /// is valid to return a cost of ZERO. 1038 int X86TTIImpl::getIntImmCost(int64_t Val) { 1039 if (Val == 0) 1040 return TTI::TCC_Free; 1041 1042 if (isInt<32>(Val)) 1043 return TTI::TCC_Basic; 1044 1045 return 2 * TTI::TCC_Basic; 1046 } 1047 1048 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1049 assert(Ty->isIntegerTy()); 1050 1051 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1052 if (BitSize == 0) 1053 return ~0U; 1054 1055 // Never hoist constants larger than 128bit, because this might lead to 1056 // incorrect code generation or assertions in codegen. 1057 // Fixme: Create a cost model for types larger than i128 once the codegen 1058 // issues have been fixed. 1059 if (BitSize > 128) 1060 return TTI::TCC_Free; 1061 1062 if (Imm == 0) 1063 return TTI::TCC_Free; 1064 1065 // Sign-extend all constants to a multiple of 64-bit. 1066 APInt ImmVal = Imm; 1067 if (BitSize & 0x3f) 1068 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1069 1070 // Split the constant into 64-bit chunks and calculate the cost for each 1071 // chunk. 1072 int Cost = 0; 1073 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1074 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1075 int64_t Val = Tmp.getSExtValue(); 1076 Cost += getIntImmCost(Val); 1077 } 1078 // We need at least one instruction to materialze the constant. 1079 return std::max(1, Cost); 1080 } 1081 1082 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1083 Type *Ty) { 1084 assert(Ty->isIntegerTy()); 1085 1086 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1087 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1088 // here, so that constant hoisting will ignore this constant. 1089 if (BitSize == 0) 1090 return TTI::TCC_Free; 1091 1092 unsigned ImmIdx = ~0U; 1093 switch (Opcode) { 1094 default: 1095 return TTI::TCC_Free; 1096 case Instruction::GetElementPtr: 1097 // Always hoist the base address of a GetElementPtr. This prevents the 1098 // creation of new constants for every base constant that gets constant 1099 // folded with the offset. 1100 if (Idx == 0) 1101 return 2 * TTI::TCC_Basic; 1102 return TTI::TCC_Free; 1103 case Instruction::Store: 1104 ImmIdx = 0; 1105 break; 1106 case Instruction::And: 1107 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 1108 // by using a 32-bit operation with implicit zero extension. Detect such 1109 // immediates here as the normal path expects bit 31 to be sign extended. 1110 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 1111 return TTI::TCC_Free; 1112 // Fallthrough 1113 case Instruction::Add: 1114 case Instruction::Sub: 1115 case Instruction::Mul: 1116 case Instruction::UDiv: 1117 case Instruction::SDiv: 1118 case Instruction::URem: 1119 case Instruction::SRem: 1120 case Instruction::Or: 1121 case Instruction::Xor: 1122 case Instruction::ICmp: 1123 ImmIdx = 1; 1124 break; 1125 // Always return TCC_Free for the shift value of a shift instruction. 1126 case Instruction::Shl: 1127 case Instruction::LShr: 1128 case Instruction::AShr: 1129 if (Idx == 1) 1130 return TTI::TCC_Free; 1131 break; 1132 case Instruction::Trunc: 1133 case Instruction::ZExt: 1134 case Instruction::SExt: 1135 case Instruction::IntToPtr: 1136 case Instruction::PtrToInt: 1137 case Instruction::BitCast: 1138 case Instruction::PHI: 1139 case Instruction::Call: 1140 case Instruction::Select: 1141 case Instruction::Ret: 1142 case Instruction::Load: 1143 break; 1144 } 1145 1146 if (Idx == ImmIdx) { 1147 int NumConstants = (BitSize + 63) / 64; 1148 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1149 return (Cost <= NumConstants * TTI::TCC_Basic) 1150 ? static_cast<int>(TTI::TCC_Free) 1151 : Cost; 1152 } 1153 1154 return X86TTIImpl::getIntImmCost(Imm, Ty); 1155 } 1156 1157 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1158 Type *Ty) { 1159 assert(Ty->isIntegerTy()); 1160 1161 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1162 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1163 // here, so that constant hoisting will ignore this constant. 1164 if (BitSize == 0) 1165 return TTI::TCC_Free; 1166 1167 switch (IID) { 1168 default: 1169 return TTI::TCC_Free; 1170 case Intrinsic::sadd_with_overflow: 1171 case Intrinsic::uadd_with_overflow: 1172 case Intrinsic::ssub_with_overflow: 1173 case Intrinsic::usub_with_overflow: 1174 case Intrinsic::smul_with_overflow: 1175 case Intrinsic::umul_with_overflow: 1176 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1177 return TTI::TCC_Free; 1178 break; 1179 case Intrinsic::experimental_stackmap: 1180 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1181 return TTI::TCC_Free; 1182 break; 1183 case Intrinsic::experimental_patchpoint_void: 1184 case Intrinsic::experimental_patchpoint_i64: 1185 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1186 return TTI::TCC_Free; 1187 break; 1188 } 1189 return X86TTIImpl::getIntImmCost(Imm, Ty); 1190 } 1191 1192 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 1193 Type *ScalarTy = DataTy->getScalarType(); 1194 // TODO: Pointers should also be legal, 1195 // but it requires additional support in composing intrinsics name. 1196 // getPrimitiveSizeInBits() returns 0 for PointerType 1197 int DataWidth = ScalarTy->getPrimitiveSizeInBits(); 1198 1199 return (DataWidth >= 32 && ST->hasAVX2()); 1200 } 1201 1202 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 1203 return isLegalMaskedLoad(DataType); 1204 } 1205 1206 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 1207 const Function *Callee) const { 1208 const TargetMachine &TM = getTLI()->getTargetMachine(); 1209 1210 // Work this as a subsetting of subtarget features. 1211 const FeatureBitset &CallerBits = 1212 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 1213 const FeatureBitset &CalleeBits = 1214 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 1215 1216 // FIXME: This is likely too limiting as it will include subtarget features 1217 // that we might not care about for inlining, but it is conservatively 1218 // correct. 1219 return (CallerBits & CalleeBits) == CalleeBits; 1220 } 1221