1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #include "X86TargetTransformInfo.h" 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/CodeGen/BasicTTIImpl.h" 20 #include "llvm/IR/IntrinsicInst.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Target/CostTable.h" 23 #include "llvm/Target/TargetLowering.h" 24 using namespace llvm; 25 26 #define DEBUG_TYPE "x86tti" 27 28 //===----------------------------------------------------------------------===// 29 // 30 // X86 cost model. 31 // 32 //===----------------------------------------------------------------------===// 33 34 TargetTransformInfo::PopcntSupportKind 35 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 36 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 37 // TODO: Currently the __builtin_popcount() implementation using SSE3 38 // instructions is inefficient. Once the problem is fixed, we should 39 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 40 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 41 } 42 43 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 44 if (Vector && !ST->hasSSE1()) 45 return 0; 46 47 if (ST->is64Bit()) { 48 if (Vector && ST->hasAVX512()) 49 return 32; 50 return 16; 51 } 52 return 8; 53 } 54 55 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 56 if (Vector) { 57 if (ST->hasAVX512()) return 512; 58 if (ST->hasAVX()) return 256; 59 if (ST->hasSSE1()) return 128; 60 return 0; 61 } 62 63 if (ST->is64Bit()) 64 return 64; 65 return 32; 66 67 } 68 69 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 70 // If the loop will not be vectorized, don't interleave the loop. 71 // Let regular unroll to unroll the loop, which saves the overflow 72 // check and memory check cost. 73 if (VF == 1) 74 return 1; 75 76 if (ST->isAtom()) 77 return 1; 78 79 // Sandybridge and Haswell have multiple execution ports and pipelined 80 // vector units. 81 if (ST->hasAVX()) 82 return 4; 83 84 return 2; 85 } 86 87 int X86TTIImpl::getArithmeticInstrCost( 88 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 89 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 90 TTI::OperandValueProperties Opd2PropInfo) { 91 // Legalize the type. 92 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 93 94 int ISD = TLI->InstructionOpcodeToISD(Opcode); 95 assert(ISD && "Invalid opcode"); 96 97 if (ISD == ISD::SDIV && 98 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 99 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 100 // On X86, vector signed division by constants power-of-two are 101 // normally expanded to the sequence SRA + SRL + ADD + SRA. 102 // The OperandValue properties many not be same as that of previous 103 // operation;conservatively assume OP_None. 104 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 105 Op2Info, TargetTransformInfo::OP_None, 106 TargetTransformInfo::OP_None); 107 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 108 TargetTransformInfo::OP_None, 109 TargetTransformInfo::OP_None); 110 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 111 TargetTransformInfo::OP_None, 112 TargetTransformInfo::OP_None); 113 114 return Cost; 115 } 116 117 static const CostTblEntry<MVT::SimpleValueType> 118 AVX2UniformConstCostTable[] = { 119 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 120 121 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 122 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 123 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 124 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 125 }; 126 127 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 128 ST->hasAVX2()) { 129 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second); 130 if (Idx != -1) 131 return LT.first * AVX2UniformConstCostTable[Idx].Cost; 132 } 133 134 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = { 135 { ISD::SHL, MVT::v16i32, 1 }, 136 { ISD::SRL, MVT::v16i32, 1 }, 137 { ISD::SRA, MVT::v16i32, 1 }, 138 { ISD::SHL, MVT::v8i64, 1 }, 139 { ISD::SRL, MVT::v8i64, 1 }, 140 { ISD::SRA, MVT::v8i64, 1 }, 141 }; 142 143 if (ST->hasAVX512()) { 144 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second); 145 if (Idx != -1) 146 return LT.first * AVX512CostTable[Idx].Cost; 147 } 148 149 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 150 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 151 // customize them to detect the cases where shift amount is a scalar one. 152 { ISD::SHL, MVT::v4i32, 1 }, 153 { ISD::SRL, MVT::v4i32, 1 }, 154 { ISD::SRA, MVT::v4i32, 1 }, 155 { ISD::SHL, MVT::v8i32, 1 }, 156 { ISD::SRL, MVT::v8i32, 1 }, 157 { ISD::SRA, MVT::v8i32, 1 }, 158 { ISD::SHL, MVT::v2i64, 1 }, 159 { ISD::SRL, MVT::v2i64, 1 }, 160 { ISD::SHL, MVT::v4i64, 1 }, 161 { ISD::SRL, MVT::v4i64, 1 }, 162 }; 163 164 // Look for AVX2 lowering tricks. 165 if (ST->hasAVX2()) { 166 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 167 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 168 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 169 // On AVX2, a packed v16i16 shift left by a constant build_vector 170 // is lowered into a vector multiply (vpmullw). 171 return LT.first; 172 173 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 174 if (Idx != -1) 175 return LT.first * AVX2CostTable[Idx].Cost; 176 } 177 178 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = { 179 // 128bit shifts take 1cy, but right shifts require negation beforehand. 180 { ISD::SHL, MVT::v16i8, 1 }, 181 { ISD::SRL, MVT::v16i8, 2 }, 182 { ISD::SRA, MVT::v16i8, 2 }, 183 { ISD::SHL, MVT::v8i16, 1 }, 184 { ISD::SRL, MVT::v8i16, 2 }, 185 { ISD::SRA, MVT::v8i16, 2 }, 186 { ISD::SHL, MVT::v4i32, 1 }, 187 { ISD::SRL, MVT::v4i32, 2 }, 188 { ISD::SRA, MVT::v4i32, 2 }, 189 { ISD::SHL, MVT::v2i64, 1 }, 190 { ISD::SRL, MVT::v2i64, 2 }, 191 { ISD::SRA, MVT::v2i64, 2 }, 192 // 256bit shifts require splitting if AVX2 didn't catch them above. 193 { ISD::SHL, MVT::v32i8, 2 }, 194 { ISD::SRL, MVT::v32i8, 4 }, 195 { ISD::SRA, MVT::v32i8, 4 }, 196 { ISD::SHL, MVT::v16i16, 2 }, 197 { ISD::SRL, MVT::v16i16, 4 }, 198 { ISD::SRA, MVT::v16i16, 4 }, 199 { ISD::SHL, MVT::v8i32, 2 }, 200 { ISD::SRL, MVT::v8i32, 4 }, 201 { ISD::SRA, MVT::v8i32, 4 }, 202 { ISD::SHL, MVT::v4i64, 2 }, 203 { ISD::SRL, MVT::v4i64, 4 }, 204 { ISD::SRA, MVT::v4i64, 4 }, 205 }; 206 207 // Look for XOP lowering tricks. 208 if (ST->hasXOP()) { 209 int Idx = CostTableLookup(XOPCostTable, ISD, LT.second); 210 if (Idx != -1) 211 return LT.first * XOPCostTable[Idx].Cost; 212 } 213 214 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = { 215 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 216 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 217 218 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 219 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 220 221 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 222 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 223 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 224 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 225 226 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 227 { ISD::SDIV, MVT::v32i8, 32*20 }, 228 { ISD::SDIV, MVT::v16i16, 16*20 }, 229 { ISD::SDIV, MVT::v8i32, 8*20 }, 230 { ISD::SDIV, MVT::v4i64, 4*20 }, 231 { ISD::UDIV, MVT::v32i8, 32*20 }, 232 { ISD::UDIV, MVT::v16i16, 16*20 }, 233 { ISD::UDIV, MVT::v8i32, 8*20 }, 234 { ISD::UDIV, MVT::v4i64, 4*20 }, 235 }; 236 237 // Look for AVX2 lowering tricks for custom cases. 238 if (ST->hasAVX2()) { 239 int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second); 240 if (Idx != -1) 241 return LT.first * AVX2CustomCostTable[Idx].Cost; 242 } 243 244 static const CostTblEntry<MVT::SimpleValueType> 245 SSE2UniformConstCostTable[] = { 246 // We don't correctly identify costs of casts because they are marked as 247 // custom. 248 // Constant splats are cheaper for the following instructions. 249 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 250 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 251 { ISD::SHL, MVT::v4i32, 1 }, // pslld 252 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 253 254 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 255 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 256 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 257 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 258 259 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 260 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 261 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 262 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 263 264 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 265 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 266 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 267 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 268 }; 269 270 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 271 ST->hasSSE2()) { 272 // pmuldq sequence. 273 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 274 return LT.first * 15; 275 276 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 277 if (Idx != -1) 278 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 279 } 280 281 if (ISD == ISD::SHL && 282 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 283 EVT VT = LT.second; 284 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 285 (VT == MVT::v4i32 && ST->hasSSE41())) 286 // Vector shift left by non uniform constant can be lowered 287 // into vector multiply (pmullw/pmulld). 288 return LT.first; 289 if (VT == MVT::v4i32 && ST->hasSSE2()) 290 // A vector shift left by non uniform constant is converted 291 // into a vector multiply; the new multiply is eventually 292 // lowered into a sequence of shuffles and 2 x pmuludq. 293 ISD = ISD::MUL; 294 } 295 296 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 297 // We don't correctly identify costs of casts because they are marked as 298 // custom. 299 // For some cases, where the shift amount is a scalar we would be able 300 // to generate better code. Unfortunately, when this is the case the value 301 // (the splat) will get hoisted out of the loop, thereby making it invisible 302 // to ISel. The cost model must return worst case assumptions because it is 303 // used for vectorization and we don't want to make vectorized code worse 304 // than scalar code. 305 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 306 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 307 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 308 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 309 { ISD::SHL, MVT::v4i64, 8 }, // splat+shuffle sequence. 310 311 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 312 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 313 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 314 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 315 316 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 317 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 318 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 319 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 320 321 // It is not a good idea to vectorize division. We have to scalarize it and 322 // in the process we will often end up having to spilling regular 323 // registers. The overhead of division is going to dominate most kernels 324 // anyways so try hard to prevent vectorization of division - it is 325 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 326 // to hide "20 cycles" for each lane. 327 { ISD::SDIV, MVT::v16i8, 16*20 }, 328 { ISD::SDIV, MVT::v8i16, 8*20 }, 329 { ISD::SDIV, MVT::v4i32, 4*20 }, 330 { ISD::SDIV, MVT::v2i64, 2*20 }, 331 { ISD::UDIV, MVT::v16i8, 16*20 }, 332 { ISD::UDIV, MVT::v8i16, 8*20 }, 333 { ISD::UDIV, MVT::v4i32, 4*20 }, 334 { ISD::UDIV, MVT::v2i64, 2*20 }, 335 }; 336 337 if (ST->hasSSE2()) { 338 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 339 if (Idx != -1) 340 return LT.first * SSE2CostTable[Idx].Cost; 341 } 342 343 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 344 // We don't have to scalarize unsupported ops. We can issue two half-sized 345 // operations and we only need to extract the upper YMM half. 346 // Two ops + 1 extract + 1 insert = 4. 347 { ISD::MUL, MVT::v16i16, 4 }, 348 { ISD::MUL, MVT::v8i32, 4 }, 349 { ISD::SUB, MVT::v8i32, 4 }, 350 { ISD::ADD, MVT::v8i32, 4 }, 351 { ISD::SUB, MVT::v4i64, 4 }, 352 { ISD::ADD, MVT::v4i64, 4 }, 353 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 354 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 355 // Because we believe v4i64 to be a legal type, we must also include the 356 // split factor of two in the cost table. Therefore, the cost here is 18 357 // instead of 9. 358 { ISD::MUL, MVT::v4i64, 18 }, 359 }; 360 361 // Look for AVX1 lowering tricks. 362 if (ST->hasAVX() && !ST->hasAVX2()) { 363 EVT VT = LT.second; 364 365 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 366 // sequence of extract + two vector multiply + insert. 367 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) && 368 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) 369 ISD = ISD::MUL; 370 371 int Idx = CostTableLookup(AVX1CostTable, ISD, VT); 372 if (Idx != -1) 373 return LT.first * AVX1CostTable[Idx].Cost; 374 } 375 376 // Custom lowering of vectors. 377 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 378 // A v2i64/v4i64 and multiply is custom lowered as a series of long 379 // multiplies(3), shifts(4) and adds(2). 380 { ISD::MUL, MVT::v2i64, 9 }, 381 { ISD::MUL, MVT::v4i64, 9 }, 382 }; 383 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 384 if (Idx != -1) 385 return LT.first * CustomLowered[Idx].Cost; 386 387 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 388 // 2x pmuludq, 2x shuffle. 389 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 390 !ST->hasSSE41()) 391 return LT.first * 6; 392 393 // Fallback to the default implementation. 394 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 395 } 396 397 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 398 Type *SubTp) { 399 // We only estimate the cost of reverse and alternate shuffles. 400 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 401 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 402 403 if (Kind == TTI::SK_Reverse) { 404 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 405 int Cost = 1; 406 if (LT.second.getSizeInBits() > 128) 407 Cost = 3; // Extract + insert + copy. 408 409 // Multiple by the number of parts. 410 return Cost * LT.first; 411 } 412 413 if (Kind == TTI::SK_Alternate) { 414 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 415 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 416 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 417 418 // The backend knows how to generate a single VEX.256 version of 419 // instruction VPBLENDW if the target supports AVX2. 420 if (ST->hasAVX2() && LT.second == MVT::v16i16) 421 return LT.first; 422 423 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = { 424 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd 425 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd 426 427 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps 428 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps 429 430 // This shuffle is custom lowered into a sequence of: 431 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128 432 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5}, 433 434 // This shuffle is custom lowered into a long sequence of: 435 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128 436 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9} 437 }; 438 439 if (ST->hasAVX()) { 440 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 441 if (Idx != -1) 442 return LT.first * AVXAltShuffleTbl[Idx].Cost; 443 } 444 445 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = { 446 // These are lowered into movsd. 447 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 448 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 449 450 // packed float vectors with four elements are lowered into BLENDI dag 451 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'. 452 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 453 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 454 455 // This shuffle generates a single pshufw. 456 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 457 458 // There is no instruction that matches a v16i8 alternate shuffle. 459 // The backend will expand it into the sequence 'pshufb + pshufb + or'. 460 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} 461 }; 462 463 if (ST->hasSSE41()) { 464 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 465 if (Idx != -1) 466 return LT.first * SSE41AltShuffleTbl[Idx].Cost; 467 } 468 469 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = { 470 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 471 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 472 473 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into 474 // the sequence 'shufps + pshufd' 475 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 476 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 477 478 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or 479 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or 480 }; 481 482 if (ST->hasSSSE3()) { 483 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 484 if (Idx != -1) 485 return LT.first * SSSE3AltShuffleTbl[Idx].Cost; 486 } 487 488 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = { 489 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 490 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 491 492 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd 493 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd 494 495 // This is expanded into a long sequence of four extract + four insert. 496 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw. 497 498 // 8 x (pinsrw + pextrw + and + movb + movzb + or) 499 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48} 500 }; 501 502 // Fall-back (SSE3 and SSE2). 503 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 504 if (Idx != -1) 505 return LT.first * SSEAltShuffleTbl[Idx].Cost; 506 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 507 } 508 509 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 510 } 511 512 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 513 int ISD = TLI->InstructionOpcodeToISD(Opcode); 514 assert(ISD && "Invalid opcode"); 515 516 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 517 AVX512ConversionTbl[] = { 518 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 519 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 520 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 521 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 }, 522 523 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 524 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 525 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 526 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 527 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 }, 528 529 // v16i1 -> v16i32 - load + broadcast 530 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 531 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 532 533 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 534 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 535 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 536 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 537 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 538 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 539 540 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 541 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 542 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 543 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 544 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 545 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 546 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 547 }; 548 549 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 550 AVX2ConversionTbl[] = { 551 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 552 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 553 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 554 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 555 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 556 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 557 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 558 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 559 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 560 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 561 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 562 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 563 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 564 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 565 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 566 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 567 568 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 569 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 570 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 571 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 572 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 573 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 574 575 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 576 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 577 578 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 579 }; 580 581 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 582 AVXConversionTbl[] = { 583 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 584 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 585 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 586 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 587 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 588 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 589 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 590 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 591 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 592 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 593 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 594 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 595 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 596 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 597 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 598 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 599 600 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 601 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 602 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 603 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 604 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 605 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 606 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 607 608 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 609 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 610 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 611 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 612 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 613 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 615 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 616 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 617 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 618 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 619 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 620 621 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 622 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 623 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 625 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 626 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 627 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 628 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 629 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 630 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 631 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 632 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 633 // The generic code to compute the scalar overhead is currently broken. 634 // Workaround this limitation by estimating the scalarization overhead 635 // here. We have roughly 10 instructions per scalar element. 636 // Multiply that by the vector width. 637 // FIXME: remove that when PR19268 is fixed. 638 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 639 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 }, 640 641 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 642 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 643 // This node is expanded into scalarized operations but BasicTTI is overly 644 // optimistic estimating its cost. It computes 3 per element (one 645 // vector-extract, one scalar conversion and one vector-insert). The 646 // problem is that the inserts form a read-modify-write chain so latency 647 // should be factored in too. Inflating the cost per element by 1. 648 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 649 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 650 }; 651 652 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 653 SSE2ConvTbl[] = { 654 // These are somewhat magic numbers justified by looking at the output of 655 // Intel's IACA, running some kernels and making sure when we take 656 // legalization into account the throughput will be overestimated. 657 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 658 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 659 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 660 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 661 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 662 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 663 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 664 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 665 // There are faster sequences for float conversions. 666 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 667 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 668 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 669 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 670 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 671 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 672 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 673 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 674 }; 675 676 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 677 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 678 679 if (ST->hasSSE2() && !ST->hasAVX()) { 680 int Idx = 681 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 682 if (Idx != -1) 683 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 684 } 685 686 if (ST->hasAVX512()) { 687 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second, 688 LTSrc.second); 689 if (Idx != -1) 690 return AVX512ConversionTbl[Idx].Cost; 691 } 692 693 EVT SrcTy = TLI->getValueType(DL, Src); 694 EVT DstTy = TLI->getValueType(DL, Dst); 695 696 // The function getSimpleVT only handles simple value types. 697 if (!SrcTy.isSimple() || !DstTy.isSimple()) 698 return BaseT::getCastInstrCost(Opcode, Dst, Src); 699 700 if (ST->hasAVX2()) { 701 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 702 DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 703 if (Idx != -1) 704 return AVX2ConversionTbl[Idx].Cost; 705 } 706 707 if (ST->hasAVX()) { 708 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 709 SrcTy.getSimpleVT()); 710 if (Idx != -1) 711 return AVXConversionTbl[Idx].Cost; 712 } 713 714 return BaseT::getCastInstrCost(Opcode, Dst, Src); 715 } 716 717 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 718 // Legalize the type. 719 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 720 721 MVT MTy = LT.second; 722 723 int ISD = TLI->InstructionOpcodeToISD(Opcode); 724 assert(ISD && "Invalid opcode"); 725 726 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 727 { ISD::SETCC, MVT::v2f64, 1 }, 728 { ISD::SETCC, MVT::v4f32, 1 }, 729 { ISD::SETCC, MVT::v2i64, 1 }, 730 { ISD::SETCC, MVT::v4i32, 1 }, 731 { ISD::SETCC, MVT::v8i16, 1 }, 732 { ISD::SETCC, MVT::v16i8, 1 }, 733 }; 734 735 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 736 { ISD::SETCC, MVT::v4f64, 1 }, 737 { ISD::SETCC, MVT::v8f32, 1 }, 738 // AVX1 does not support 8-wide integer compare. 739 { ISD::SETCC, MVT::v4i64, 4 }, 740 { ISD::SETCC, MVT::v8i32, 4 }, 741 { ISD::SETCC, MVT::v16i16, 4 }, 742 { ISD::SETCC, MVT::v32i8, 4 }, 743 }; 744 745 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 746 { ISD::SETCC, MVT::v4i64, 1 }, 747 { ISD::SETCC, MVT::v8i32, 1 }, 748 { ISD::SETCC, MVT::v16i16, 1 }, 749 { ISD::SETCC, MVT::v32i8, 1 }, 750 }; 751 752 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = { 753 { ISD::SETCC, MVT::v8i64, 1 }, 754 { ISD::SETCC, MVT::v16i32, 1 }, 755 { ISD::SETCC, MVT::v8f64, 1 }, 756 { ISD::SETCC, MVT::v16f32, 1 }, 757 }; 758 759 if (ST->hasAVX512()) { 760 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy); 761 if (Idx != -1) 762 return LT.first * AVX512CostTbl[Idx].Cost; 763 } 764 765 if (ST->hasAVX2()) { 766 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 767 if (Idx != -1) 768 return LT.first * AVX2CostTbl[Idx].Cost; 769 } 770 771 if (ST->hasAVX()) { 772 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 773 if (Idx != -1) 774 return LT.first * AVX1CostTbl[Idx].Cost; 775 } 776 777 if (ST->hasSSE42()) { 778 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 779 if (Idx != -1) 780 return LT.first * SSE42CostTbl[Idx].Cost; 781 } 782 783 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 784 } 785 786 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 787 assert(Val->isVectorTy() && "This must be a vector type"); 788 789 if (Index != -1U) { 790 // Legalize the type. 791 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 792 793 // This type is legalized to a scalar type. 794 if (!LT.second.isVector()) 795 return 0; 796 797 // The type may be split. Normalize the index to the new type. 798 unsigned Width = LT.second.getVectorNumElements(); 799 Index = Index % Width; 800 801 // Floating point scalars are already located in index #0. 802 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 803 return 0; 804 } 805 806 return BaseT::getVectorInstrCost(Opcode, Val, Index); 807 } 808 809 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 810 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 811 int Cost = 0; 812 813 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 814 if (Insert) 815 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); 816 if (Extract) 817 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); 818 } 819 820 return Cost; 821 } 822 823 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 824 unsigned AddressSpace) { 825 // Handle non-power-of-two vectors such as <3 x float> 826 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 827 unsigned NumElem = VTy->getVectorNumElements(); 828 829 // Handle a few common cases: 830 // <3 x float> 831 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 832 // Cost = 64 bit store + extract + 32 bit store. 833 return 3; 834 835 // <3 x double> 836 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 837 // Cost = 128 bit store + unpack + 64 bit store. 838 return 3; 839 840 // Assume that all other non-power-of-two numbers are scalarized. 841 if (!isPowerOf2_32(NumElem)) { 842 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 843 AddressSpace); 844 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 845 Opcode == Instruction::Store); 846 return NumElem * Cost + SplitCost; 847 } 848 } 849 850 // Legalize the type. 851 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 852 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 853 "Invalid Opcode"); 854 855 // Each load/store unit costs 1. 856 int Cost = LT.first * 1; 857 858 // On Sandybridge 256bit load/stores are double pumped 859 // (but not on Haswell). 860 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 861 Cost*=2; 862 863 return Cost; 864 } 865 866 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 867 unsigned Alignment, 868 unsigned AddressSpace) { 869 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 870 if (!SrcVTy) 871 // To calculate scalar take the regular cost, without mask 872 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 873 874 unsigned NumElem = SrcVTy->getVectorNumElements(); 875 VectorType *MaskTy = 876 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem); 877 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) || 878 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) || 879 !isPowerOf2_32(NumElem)) { 880 // Scalarization 881 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 882 int ScalarCompareCost = getCmpSelInstrCost( 883 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), NULL); 884 int BranchCost = getCFInstrCost(Instruction::Br); 885 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 886 887 int ValueSplitCost = getScalarizationOverhead( 888 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 889 int MemopCost = 890 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 891 Alignment, AddressSpace); 892 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 893 } 894 895 // Legalize the type. 896 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 897 int Cost = 0; 898 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() && 899 LT.second.getVectorNumElements() == NumElem) 900 // Promotion requires expand/truncate for data and a shuffle for mask. 901 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) + 902 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0); 903 904 else if (LT.second.getVectorNumElements() > NumElem) { 905 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 906 LT.second.getVectorNumElements()); 907 // Expanding requires fill mask with zeroes 908 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 909 } 910 if (!ST->hasAVX512()) 911 return Cost + LT.first*4; // Each maskmov costs 4 912 913 // AVX-512 masked load/store is cheapper 914 return Cost+LT.first; 915 } 916 917 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 918 // Address computations in vectorized code with non-consecutive addresses will 919 // likely result in more instructions compared to scalar code where the 920 // computation can more often be merged into the index mode. The resulting 921 // extra micro-ops can significantly decrease throughput. 922 unsigned NumVectorInstToHideOverhead = 10; 923 924 if (Ty->isVectorTy() && IsComplex) 925 return NumVectorInstToHideOverhead; 926 927 return BaseT::getAddressComputationCost(Ty, IsComplex); 928 } 929 930 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 931 bool IsPairwise) { 932 933 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 934 935 MVT MTy = LT.second; 936 937 int ISD = TLI->InstructionOpcodeToISD(Opcode); 938 assert(ISD && "Invalid opcode"); 939 940 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 941 // and make it as the cost. 942 943 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = { 944 { ISD::FADD, MVT::v2f64, 2 }, 945 { ISD::FADD, MVT::v4f32, 4 }, 946 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 947 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 948 { ISD::ADD, MVT::v8i16, 5 }, 949 }; 950 951 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = { 952 { ISD::FADD, MVT::v4f32, 4 }, 953 { ISD::FADD, MVT::v4f64, 5 }, 954 { ISD::FADD, MVT::v8f32, 7 }, 955 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 956 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 957 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 958 { ISD::ADD, MVT::v8i16, 5 }, 959 { ISD::ADD, MVT::v8i32, 5 }, 960 }; 961 962 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = { 963 { ISD::FADD, MVT::v2f64, 2 }, 964 { ISD::FADD, MVT::v4f32, 4 }, 965 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 966 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 967 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 968 }; 969 970 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = { 971 { ISD::FADD, MVT::v4f32, 3 }, 972 { ISD::FADD, MVT::v4f64, 3 }, 973 { ISD::FADD, MVT::v8f32, 4 }, 974 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 975 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 976 { ISD::ADD, MVT::v4i64, 3 }, 977 { ISD::ADD, MVT::v8i16, 4 }, 978 { ISD::ADD, MVT::v8i32, 5 }, 979 }; 980 981 if (IsPairwise) { 982 if (ST->hasAVX()) { 983 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy); 984 if (Idx != -1) 985 return LT.first * AVX1CostTblPairWise[Idx].Cost; 986 } 987 988 if (ST->hasSSE42()) { 989 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy); 990 if (Idx != -1) 991 return LT.first * SSE42CostTblPairWise[Idx].Cost; 992 } 993 } else { 994 if (ST->hasAVX()) { 995 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy); 996 if (Idx != -1) 997 return LT.first * AVX1CostTblNoPairWise[Idx].Cost; 998 } 999 1000 if (ST->hasSSE42()) { 1001 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy); 1002 if (Idx != -1) 1003 return LT.first * SSE42CostTblNoPairWise[Idx].Cost; 1004 } 1005 } 1006 1007 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1008 } 1009 1010 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1011 /// method might only calculate a fraction of a larger immediate. Therefore it 1012 /// is valid to return a cost of ZERO. 1013 int X86TTIImpl::getIntImmCost(int64_t Val) { 1014 if (Val == 0) 1015 return TTI::TCC_Free; 1016 1017 if (isInt<32>(Val)) 1018 return TTI::TCC_Basic; 1019 1020 return 2 * TTI::TCC_Basic; 1021 } 1022 1023 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1024 assert(Ty->isIntegerTy()); 1025 1026 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1027 if (BitSize == 0) 1028 return ~0U; 1029 1030 // Never hoist constants larger than 128bit, because this might lead to 1031 // incorrect code generation or assertions in codegen. 1032 // Fixme: Create a cost model for types larger than i128 once the codegen 1033 // issues have been fixed. 1034 if (BitSize > 128) 1035 return TTI::TCC_Free; 1036 1037 if (Imm == 0) 1038 return TTI::TCC_Free; 1039 1040 // Sign-extend all constants to a multiple of 64-bit. 1041 APInt ImmVal = Imm; 1042 if (BitSize & 0x3f) 1043 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1044 1045 // Split the constant into 64-bit chunks and calculate the cost for each 1046 // chunk. 1047 int Cost = 0; 1048 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1049 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1050 int64_t Val = Tmp.getSExtValue(); 1051 Cost += getIntImmCost(Val); 1052 } 1053 // We need at least one instruction to materialze the constant. 1054 return std::max(1, Cost); 1055 } 1056 1057 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1058 Type *Ty) { 1059 assert(Ty->isIntegerTy()); 1060 1061 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1062 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1063 // here, so that constant hoisting will ignore this constant. 1064 if (BitSize == 0) 1065 return TTI::TCC_Free; 1066 1067 unsigned ImmIdx = ~0U; 1068 switch (Opcode) { 1069 default: 1070 return TTI::TCC_Free; 1071 case Instruction::GetElementPtr: 1072 // Always hoist the base address of a GetElementPtr. This prevents the 1073 // creation of new constants for every base constant that gets constant 1074 // folded with the offset. 1075 if (Idx == 0) 1076 return 2 * TTI::TCC_Basic; 1077 return TTI::TCC_Free; 1078 case Instruction::Store: 1079 ImmIdx = 0; 1080 break; 1081 case Instruction::Add: 1082 case Instruction::Sub: 1083 case Instruction::Mul: 1084 case Instruction::UDiv: 1085 case Instruction::SDiv: 1086 case Instruction::URem: 1087 case Instruction::SRem: 1088 case Instruction::And: 1089 case Instruction::Or: 1090 case Instruction::Xor: 1091 case Instruction::ICmp: 1092 ImmIdx = 1; 1093 break; 1094 // Always return TCC_Free for the shift value of a shift instruction. 1095 case Instruction::Shl: 1096 case Instruction::LShr: 1097 case Instruction::AShr: 1098 if (Idx == 1) 1099 return TTI::TCC_Free; 1100 break; 1101 case Instruction::Trunc: 1102 case Instruction::ZExt: 1103 case Instruction::SExt: 1104 case Instruction::IntToPtr: 1105 case Instruction::PtrToInt: 1106 case Instruction::BitCast: 1107 case Instruction::PHI: 1108 case Instruction::Call: 1109 case Instruction::Select: 1110 case Instruction::Ret: 1111 case Instruction::Load: 1112 break; 1113 } 1114 1115 if (Idx == ImmIdx) { 1116 int NumConstants = (BitSize + 63) / 64; 1117 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1118 return (Cost <= NumConstants * TTI::TCC_Basic) 1119 ? static_cast<int>(TTI::TCC_Free) 1120 : Cost; 1121 } 1122 1123 return X86TTIImpl::getIntImmCost(Imm, Ty); 1124 } 1125 1126 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1127 Type *Ty) { 1128 assert(Ty->isIntegerTy()); 1129 1130 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1131 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1132 // here, so that constant hoisting will ignore this constant. 1133 if (BitSize == 0) 1134 return TTI::TCC_Free; 1135 1136 switch (IID) { 1137 default: 1138 return TTI::TCC_Free; 1139 case Intrinsic::sadd_with_overflow: 1140 case Intrinsic::uadd_with_overflow: 1141 case Intrinsic::ssub_with_overflow: 1142 case Intrinsic::usub_with_overflow: 1143 case Intrinsic::smul_with_overflow: 1144 case Intrinsic::umul_with_overflow: 1145 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1146 return TTI::TCC_Free; 1147 break; 1148 case Intrinsic::experimental_stackmap: 1149 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1150 return TTI::TCC_Free; 1151 break; 1152 case Intrinsic::experimental_patchpoint_void: 1153 case Intrinsic::experimental_patchpoint_i64: 1154 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1155 return TTI::TCC_Free; 1156 break; 1157 } 1158 return X86TTIImpl::getIntImmCost(Imm, Ty); 1159 } 1160 1161 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) { 1162 int DataWidth = DataTy->getPrimitiveSizeInBits(); 1163 1164 // Todo: AVX512 allows gather/scatter, works with strided and random as well 1165 if ((DataWidth < 32) || (Consecutive == 0)) 1166 return false; 1167 if (ST->hasAVX512() || ST->hasAVX2()) 1168 return true; 1169 return false; 1170 } 1171 1172 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) { 1173 return isLegalMaskedLoad(DataType, Consecutive); 1174 } 1175 1176 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 1177 const Function *Callee) const { 1178 const TargetMachine &TM = getTLI()->getTargetMachine(); 1179 1180 // Work this as a subsetting of subtarget features. 1181 const FeatureBitset &CallerBits = 1182 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 1183 const FeatureBitset &CalleeBits = 1184 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 1185 1186 // FIXME: This is likely too limiting as it will include subtarget features 1187 // that we might not care about for inlining, but it is conservatively 1188 // correct. 1189 return (CallerBits & CalleeBits) == CalleeBits; 1190 } 1191