1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 17 #include "X86TargetTransformInfo.h" 18 #include "llvm/Analysis/TargetTransformInfo.h" 19 #include "llvm/CodeGen/BasicTTIImpl.h" 20 #include "llvm/IR/IntrinsicInst.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Target/CostTable.h" 23 #include "llvm/Target/TargetLowering.h" 24 25 using namespace llvm; 26 27 #define DEBUG_TYPE "x86tti" 28 29 //===----------------------------------------------------------------------===// 30 // 31 // X86 cost model. 32 // 33 //===----------------------------------------------------------------------===// 34 35 TargetTransformInfo::PopcntSupportKind 36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 38 // TODO: Currently the __builtin_popcount() implementation using SSE3 39 // instructions is inefficient. Once the problem is fixed, we should 40 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 42 } 43 44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 45 if (Vector && !ST->hasSSE1()) 46 return 0; 47 48 if (ST->is64Bit()) { 49 if (Vector && ST->hasAVX512()) 50 return 32; 51 return 16; 52 } 53 return 8; 54 } 55 56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 57 if (Vector) { 58 if (ST->hasAVX512()) return 512; 59 if (ST->hasAVX()) return 256; 60 if (ST->hasSSE1()) return 128; 61 return 0; 62 } 63 64 if (ST->is64Bit()) 65 return 64; 66 67 return 32; 68 } 69 70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 71 // If the loop will not be vectorized, don't interleave the loop. 72 // Let regular unroll to unroll the loop, which saves the overflow 73 // check and memory check cost. 74 if (VF == 1) 75 return 1; 76 77 if (ST->isAtom()) 78 return 1; 79 80 // Sandybridge and Haswell have multiple execution ports and pipelined 81 // vector units. 82 if (ST->hasAVX()) 83 return 4; 84 85 return 2; 86 } 87 88 int X86TTIImpl::getArithmeticInstrCost( 89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 91 TTI::OperandValueProperties Opd2PropInfo) { 92 // Legalize the type. 93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 94 95 int ISD = TLI->InstructionOpcodeToISD(Opcode); 96 assert(ISD && "Invalid opcode"); 97 98 if (ISD == ISD::SDIV && 99 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 101 // On X86, vector signed division by constants power-of-two are 102 // normally expanded to the sequence SRA + SRL + ADD + SRA. 103 // The OperandValue properties many not be same as that of previous 104 // operation;conservatively assume OP_None. 105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 106 Op2Info, TargetTransformInfo::OP_None, 107 TargetTransformInfo::OP_None); 108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 109 TargetTransformInfo::OP_None, 110 TargetTransformInfo::OP_None); 111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 112 TargetTransformInfo::OP_None, 113 TargetTransformInfo::OP_None); 114 115 return Cost; 116 } 117 118 static const CostTblEntry<MVT::SimpleValueType> 119 AVX2UniformConstCostTable[] = { 120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 121 122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 126 }; 127 128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 129 ST->hasAVX2()) { 130 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second); 131 if (Idx != -1) 132 return LT.first * AVX2UniformConstCostTable[Idx].Cost; 133 } 134 135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = { 136 { ISD::SHL, MVT::v16i32, 1 }, 137 { ISD::SRL, MVT::v16i32, 1 }, 138 { ISD::SRA, MVT::v16i32, 1 }, 139 { ISD::SHL, MVT::v8i64, 1 }, 140 { ISD::SRL, MVT::v8i64, 1 }, 141 { ISD::SRA, MVT::v8i64, 1 }, 142 }; 143 144 if (ST->hasAVX512()) { 145 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second); 146 if (Idx != -1) 147 return LT.first * AVX512CostTable[Idx].Cost; 148 } 149 150 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = { 151 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 152 // customize them to detect the cases where shift amount is a scalar one. 153 { ISD::SHL, MVT::v4i32, 1 }, 154 { ISD::SRL, MVT::v4i32, 1 }, 155 { ISD::SRA, MVT::v4i32, 1 }, 156 { ISD::SHL, MVT::v8i32, 1 }, 157 { ISD::SRL, MVT::v8i32, 1 }, 158 { ISD::SRA, MVT::v8i32, 1 }, 159 { ISD::SHL, MVT::v2i64, 1 }, 160 { ISD::SRL, MVT::v2i64, 1 }, 161 { ISD::SHL, MVT::v4i64, 1 }, 162 { ISD::SRL, MVT::v4i64, 1 }, 163 }; 164 165 // Look for AVX2 lowering tricks. 166 if (ST->hasAVX2()) { 167 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 168 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 169 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 170 // On AVX2, a packed v16i16 shift left by a constant build_vector 171 // is lowered into a vector multiply (vpmullw). 172 return LT.first; 173 174 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second); 175 if (Idx != -1) 176 return LT.first * AVX2CostTable[Idx].Cost; 177 } 178 179 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = { 180 // 128bit shifts take 1cy, but right shifts require negation beforehand. 181 { ISD::SHL, MVT::v16i8, 1 }, 182 { ISD::SRL, MVT::v16i8, 2 }, 183 { ISD::SRA, MVT::v16i8, 2 }, 184 { ISD::SHL, MVT::v8i16, 1 }, 185 { ISD::SRL, MVT::v8i16, 2 }, 186 { ISD::SRA, MVT::v8i16, 2 }, 187 { ISD::SHL, MVT::v4i32, 1 }, 188 { ISD::SRL, MVT::v4i32, 2 }, 189 { ISD::SRA, MVT::v4i32, 2 }, 190 { ISD::SHL, MVT::v2i64, 1 }, 191 { ISD::SRL, MVT::v2i64, 2 }, 192 { ISD::SRA, MVT::v2i64, 2 }, 193 // 256bit shifts require splitting if AVX2 didn't catch them above. 194 { ISD::SHL, MVT::v32i8, 2 }, 195 { ISD::SRL, MVT::v32i8, 4 }, 196 { ISD::SRA, MVT::v32i8, 4 }, 197 { ISD::SHL, MVT::v16i16, 2 }, 198 { ISD::SRL, MVT::v16i16, 4 }, 199 { ISD::SRA, MVT::v16i16, 4 }, 200 { ISD::SHL, MVT::v8i32, 2 }, 201 { ISD::SRL, MVT::v8i32, 4 }, 202 { ISD::SRA, MVT::v8i32, 4 }, 203 { ISD::SHL, MVT::v4i64, 2 }, 204 { ISD::SRL, MVT::v4i64, 4 }, 205 { ISD::SRA, MVT::v4i64, 4 }, 206 }; 207 208 // Look for XOP lowering tricks. 209 if (ST->hasXOP()) { 210 int Idx = CostTableLookup(XOPCostTable, ISD, LT.second); 211 if (Idx != -1) 212 return LT.first * XOPCostTable[Idx].Cost; 213 } 214 215 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = { 216 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 217 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 218 219 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 220 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 221 222 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 223 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 224 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 225 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 226 227 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 228 { ISD::SDIV, MVT::v32i8, 32*20 }, 229 { ISD::SDIV, MVT::v16i16, 16*20 }, 230 { ISD::SDIV, MVT::v8i32, 8*20 }, 231 { ISD::SDIV, MVT::v4i64, 4*20 }, 232 { ISD::UDIV, MVT::v32i8, 32*20 }, 233 { ISD::UDIV, MVT::v16i16, 16*20 }, 234 { ISD::UDIV, MVT::v8i32, 8*20 }, 235 { ISD::UDIV, MVT::v4i64, 4*20 }, 236 }; 237 238 // Look for AVX2 lowering tricks for custom cases. 239 if (ST->hasAVX2()) { 240 int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second); 241 if (Idx != -1) 242 return LT.first * AVX2CustomCostTable[Idx].Cost; 243 } 244 245 static const CostTblEntry<MVT::SimpleValueType> 246 SSE2UniformConstCostTable[] = { 247 // We don't correctly identify costs of casts because they are marked as 248 // custom. 249 // Constant splats are cheaper for the following instructions. 250 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 251 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 252 { ISD::SHL, MVT::v4i32, 1 }, // pslld 253 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 254 255 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 256 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 257 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 258 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 259 260 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 261 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 262 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 263 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 264 265 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 266 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 267 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 268 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 269 }; 270 271 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 272 ST->hasSSE2()) { 273 // pmuldq sequence. 274 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 275 return LT.first * 15; 276 277 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second); 278 if (Idx != -1) 279 return LT.first * SSE2UniformConstCostTable[Idx].Cost; 280 } 281 282 if (ISD == ISD::SHL && 283 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 284 EVT VT = LT.second; 285 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 286 (VT == MVT::v4i32 && ST->hasSSE41())) 287 // Vector shift left by non uniform constant can be lowered 288 // into vector multiply (pmullw/pmulld). 289 return LT.first; 290 if (VT == MVT::v4i32 && ST->hasSSE2()) 291 // A vector shift left by non uniform constant is converted 292 // into a vector multiply; the new multiply is eventually 293 // lowered into a sequence of shuffles and 2 x pmuludq. 294 ISD = ISD::MUL; 295 } 296 297 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = { 298 // We don't correctly identify costs of casts because they are marked as 299 // custom. 300 // For some cases, where the shift amount is a scalar we would be able 301 // to generate better code. Unfortunately, when this is the case the value 302 // (the splat) will get hoisted out of the loop, thereby making it invisible 303 // to ISel. The cost model must return worst case assumptions because it is 304 // used for vectorization and we don't want to make vectorized code worse 305 // than scalar code. 306 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 307 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 308 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 309 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 310 { ISD::SHL, MVT::v4i64, 8 }, // splat+shuffle sequence. 311 312 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 313 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 314 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 315 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 316 317 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 318 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 319 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 320 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 321 322 // It is not a good idea to vectorize division. We have to scalarize it and 323 // in the process we will often end up having to spilling regular 324 // registers. The overhead of division is going to dominate most kernels 325 // anyways so try hard to prevent vectorization of division - it is 326 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 327 // to hide "20 cycles" for each lane. 328 { ISD::SDIV, MVT::v16i8, 16*20 }, 329 { ISD::SDIV, MVT::v8i16, 8*20 }, 330 { ISD::SDIV, MVT::v4i32, 4*20 }, 331 { ISD::SDIV, MVT::v2i64, 2*20 }, 332 { ISD::UDIV, MVT::v16i8, 16*20 }, 333 { ISD::UDIV, MVT::v8i16, 8*20 }, 334 { ISD::UDIV, MVT::v4i32, 4*20 }, 335 { ISD::UDIV, MVT::v2i64, 2*20 }, 336 }; 337 338 if (ST->hasSSE2()) { 339 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second); 340 if (Idx != -1) 341 return LT.first * SSE2CostTable[Idx].Cost; 342 } 343 344 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = { 345 // We don't have to scalarize unsupported ops. We can issue two half-sized 346 // operations and we only need to extract the upper YMM half. 347 // Two ops + 1 extract + 1 insert = 4. 348 { ISD::MUL, MVT::v16i16, 4 }, 349 { ISD::MUL, MVT::v8i32, 4 }, 350 { ISD::SUB, MVT::v8i32, 4 }, 351 { ISD::ADD, MVT::v8i32, 4 }, 352 { ISD::SUB, MVT::v4i64, 4 }, 353 { ISD::ADD, MVT::v4i64, 4 }, 354 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 355 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 356 // Because we believe v4i64 to be a legal type, we must also include the 357 // split factor of two in the cost table. Therefore, the cost here is 18 358 // instead of 9. 359 { ISD::MUL, MVT::v4i64, 18 }, 360 }; 361 362 // Look for AVX1 lowering tricks. 363 if (ST->hasAVX() && !ST->hasAVX2()) { 364 EVT VT = LT.second; 365 366 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 367 // sequence of extract + two vector multiply + insert. 368 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) && 369 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) 370 ISD = ISD::MUL; 371 372 int Idx = CostTableLookup(AVX1CostTable, ISD, VT); 373 if (Idx != -1) 374 return LT.first * AVX1CostTable[Idx].Cost; 375 } 376 377 // Custom lowering of vectors. 378 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = { 379 // A v2i64/v4i64 and multiply is custom lowered as a series of long 380 // multiplies(3), shifts(4) and adds(2). 381 { ISD::MUL, MVT::v2i64, 9 }, 382 { ISD::MUL, MVT::v4i64, 9 }, 383 }; 384 int Idx = CostTableLookup(CustomLowered, ISD, LT.second); 385 if (Idx != -1) 386 return LT.first * CustomLowered[Idx].Cost; 387 388 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 389 // 2x pmuludq, 2x shuffle. 390 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 391 !ST->hasSSE41()) 392 return LT.first * 6; 393 394 // Fallback to the default implementation. 395 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 396 } 397 398 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 399 Type *SubTp) { 400 // We only estimate the cost of reverse and alternate shuffles. 401 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 402 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 403 404 if (Kind == TTI::SK_Reverse) { 405 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 406 int Cost = 1; 407 if (LT.second.getSizeInBits() > 128) 408 Cost = 3; // Extract + insert + copy. 409 410 // Multiple by the number of parts. 411 return Cost * LT.first; 412 } 413 414 if (Kind == TTI::SK_Alternate) { 415 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 416 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 417 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 418 419 // The backend knows how to generate a single VEX.256 version of 420 // instruction VPBLENDW if the target supports AVX2. 421 if (ST->hasAVX2() && LT.second == MVT::v16i16) 422 return LT.first; 423 424 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = { 425 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd 426 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd 427 428 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps 429 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps 430 431 // This shuffle is custom lowered into a sequence of: 432 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128 433 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5}, 434 435 // This shuffle is custom lowered into a long sequence of: 436 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128 437 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9} 438 }; 439 440 if (ST->hasAVX()) { 441 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 442 if (Idx != -1) 443 return LT.first * AVXAltShuffleTbl[Idx].Cost; 444 } 445 446 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = { 447 // These are lowered into movsd. 448 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 449 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 450 451 // packed float vectors with four elements are lowered into BLENDI dag 452 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'. 453 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 454 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 455 456 // This shuffle generates a single pshufw. 457 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 458 459 // There is no instruction that matches a v16i8 alternate shuffle. 460 // The backend will expand it into the sequence 'pshufb + pshufb + or'. 461 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} 462 }; 463 464 if (ST->hasSSE41()) { 465 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 466 if (Idx != -1) 467 return LT.first * SSE41AltShuffleTbl[Idx].Cost; 468 } 469 470 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = { 471 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 472 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 473 474 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into 475 // the sequence 'shufps + pshufd' 476 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 477 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 478 479 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or 480 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or 481 }; 482 483 if (ST->hasSSSE3()) { 484 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 485 if (Idx != -1) 486 return LT.first * SSSE3AltShuffleTbl[Idx].Cost; 487 } 488 489 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = { 490 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 491 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 492 493 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd 494 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd 495 496 // This is expanded into a long sequence of four extract + four insert. 497 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw. 498 499 // 8 x (pinsrw + pextrw + and + movb + movzb + or) 500 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48} 501 }; 502 503 // Fall-back (SSE3 and SSE2). 504 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second); 505 if (Idx != -1) 506 return LT.first * SSEAltShuffleTbl[Idx].Cost; 507 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 508 } 509 510 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 511 } 512 513 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 514 int ISD = TLI->InstructionOpcodeToISD(Opcode); 515 assert(ISD && "Invalid opcode"); 516 517 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 518 AVX512ConversionTbl[] = { 519 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 520 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 521 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 522 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 }, 523 524 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 525 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 526 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 527 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 528 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 }, 529 530 // v16i1 -> v16i32 - load + broadcast 531 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 532 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 533 534 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 535 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 536 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 537 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 538 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 539 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 }, 540 541 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 542 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 543 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 544 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 545 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 546 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 547 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 548 }; 549 550 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 551 AVX2ConversionTbl[] = { 552 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 553 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 554 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 555 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 556 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 557 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 558 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 559 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 560 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 561 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 562 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 563 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 564 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 565 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 566 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 567 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 568 569 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 570 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 571 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 572 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 573 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 574 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 575 576 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 577 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 578 579 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 580 }; 581 582 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 583 AVXConversionTbl[] = { 584 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 585 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 586 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 587 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 588 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 589 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 590 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 591 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 592 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 593 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 594 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 595 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 596 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 597 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 598 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 599 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 600 601 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 602 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 603 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 604 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 605 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 606 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 607 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 608 609 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 610 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 611 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 612 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 613 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 615 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 616 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 617 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 618 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 619 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 620 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 621 622 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 623 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 625 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 626 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 627 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 628 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 629 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 630 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 631 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 632 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 633 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 634 // The generic code to compute the scalar overhead is currently broken. 635 // Workaround this limitation by estimating the scalarization overhead 636 // here. We have roughly 10 instructions per scalar element. 637 // Multiply that by the vector width. 638 // FIXME: remove that when PR19268 is fixed. 639 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 640 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 }, 641 642 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 643 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 644 // This node is expanded into scalarized operations but BasicTTI is overly 645 // optimistic estimating its cost. It computes 3 per element (one 646 // vector-extract, one scalar conversion and one vector-insert). The 647 // problem is that the inserts form a read-modify-write chain so latency 648 // should be factored in too. Inflating the cost per element by 1. 649 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 650 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 651 }; 652 653 static const TypeConversionCostTblEntry<MVT::SimpleValueType> 654 SSE2ConvTbl[] = { 655 // These are somewhat magic numbers justified by looking at the output of 656 // Intel's IACA, running some kernels and making sure when we take 657 // legalization into account the throughput will be overestimated. 658 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 659 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 660 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 661 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 662 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 663 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 664 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 665 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 666 // There are faster sequences for float conversions. 667 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 668 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 669 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 670 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 671 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 672 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 }, 673 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 674 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 675 }; 676 677 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 678 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 679 680 if (ST->hasSSE2() && !ST->hasAVX()) { 681 int Idx = 682 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second); 683 if (Idx != -1) 684 return LTSrc.first * SSE2ConvTbl[Idx].Cost; 685 } 686 687 if (ST->hasAVX512()) { 688 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second, 689 LTSrc.second); 690 if (Idx != -1) 691 return AVX512ConversionTbl[Idx].Cost; 692 } 693 694 EVT SrcTy = TLI->getValueType(DL, Src); 695 EVT DstTy = TLI->getValueType(DL, Dst); 696 697 // The function getSimpleVT only handles simple value types. 698 if (!SrcTy.isSimple() || !DstTy.isSimple()) 699 return BaseT::getCastInstrCost(Opcode, Dst, Src); 700 701 if (ST->hasAVX2()) { 702 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 703 DstTy.getSimpleVT(), SrcTy.getSimpleVT()); 704 if (Idx != -1) 705 return AVX2ConversionTbl[Idx].Cost; 706 } 707 708 if (ST->hasAVX()) { 709 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(), 710 SrcTy.getSimpleVT()); 711 if (Idx != -1) 712 return AVXConversionTbl[Idx].Cost; 713 } 714 715 return BaseT::getCastInstrCost(Opcode, Dst, Src); 716 } 717 718 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 719 // Legalize the type. 720 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 721 722 MVT MTy = LT.second; 723 724 int ISD = TLI->InstructionOpcodeToISD(Opcode); 725 assert(ISD && "Invalid opcode"); 726 727 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = { 728 { ISD::SETCC, MVT::v2f64, 1 }, 729 { ISD::SETCC, MVT::v4f32, 1 }, 730 { ISD::SETCC, MVT::v2i64, 1 }, 731 { ISD::SETCC, MVT::v4i32, 1 }, 732 { ISD::SETCC, MVT::v8i16, 1 }, 733 { ISD::SETCC, MVT::v16i8, 1 }, 734 }; 735 736 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = { 737 { ISD::SETCC, MVT::v4f64, 1 }, 738 { ISD::SETCC, MVT::v8f32, 1 }, 739 // AVX1 does not support 8-wide integer compare. 740 { ISD::SETCC, MVT::v4i64, 4 }, 741 { ISD::SETCC, MVT::v8i32, 4 }, 742 { ISD::SETCC, MVT::v16i16, 4 }, 743 { ISD::SETCC, MVT::v32i8, 4 }, 744 }; 745 746 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = { 747 { ISD::SETCC, MVT::v4i64, 1 }, 748 { ISD::SETCC, MVT::v8i32, 1 }, 749 { ISD::SETCC, MVT::v16i16, 1 }, 750 { ISD::SETCC, MVT::v32i8, 1 }, 751 }; 752 753 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = { 754 { ISD::SETCC, MVT::v8i64, 1 }, 755 { ISD::SETCC, MVT::v16i32, 1 }, 756 { ISD::SETCC, MVT::v8f64, 1 }, 757 { ISD::SETCC, MVT::v16f32, 1 }, 758 }; 759 760 if (ST->hasAVX512()) { 761 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy); 762 if (Idx != -1) 763 return LT.first * AVX512CostTbl[Idx].Cost; 764 } 765 766 if (ST->hasAVX2()) { 767 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy); 768 if (Idx != -1) 769 return LT.first * AVX2CostTbl[Idx].Cost; 770 } 771 772 if (ST->hasAVX()) { 773 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy); 774 if (Idx != -1) 775 return LT.first * AVX1CostTbl[Idx].Cost; 776 } 777 778 if (ST->hasSSE42()) { 779 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy); 780 if (Idx != -1) 781 return LT.first * SSE42CostTbl[Idx].Cost; 782 } 783 784 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 785 } 786 787 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 788 assert(Val->isVectorTy() && "This must be a vector type"); 789 790 if (Index != -1U) { 791 // Legalize the type. 792 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 793 794 // This type is legalized to a scalar type. 795 if (!LT.second.isVector()) 796 return 0; 797 798 // The type may be split. Normalize the index to the new type. 799 unsigned Width = LT.second.getVectorNumElements(); 800 Index = Index % Width; 801 802 // Floating point scalars are already located in index #0. 803 if (Val->getScalarType()->isFloatingPointTy() && Index == 0) 804 return 0; 805 } 806 807 return BaseT::getVectorInstrCost(Opcode, Val, Index); 808 } 809 810 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 811 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 812 int Cost = 0; 813 814 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 815 if (Insert) 816 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); 817 if (Extract) 818 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); 819 } 820 821 return Cost; 822 } 823 824 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 825 unsigned AddressSpace) { 826 // Handle non-power-of-two vectors such as <3 x float> 827 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 828 unsigned NumElem = VTy->getVectorNumElements(); 829 830 // Handle a few common cases: 831 // <3 x float> 832 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 833 // Cost = 64 bit store + extract + 32 bit store. 834 return 3; 835 836 // <3 x double> 837 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 838 // Cost = 128 bit store + unpack + 64 bit store. 839 return 3; 840 841 // Assume that all other non-power-of-two numbers are scalarized. 842 if (!isPowerOf2_32(NumElem)) { 843 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 844 AddressSpace); 845 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 846 Opcode == Instruction::Store); 847 return NumElem * Cost + SplitCost; 848 } 849 } 850 851 // Legalize the type. 852 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 853 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 854 "Invalid Opcode"); 855 856 // Each load/store unit costs 1. 857 int Cost = LT.first * 1; 858 859 // On Sandybridge 256bit load/stores are double pumped 860 // (but not on Haswell). 861 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2()) 862 Cost*=2; 863 864 return Cost; 865 } 866 867 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 868 unsigned Alignment, 869 unsigned AddressSpace) { 870 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 871 if (!SrcVTy) 872 // To calculate scalar take the regular cost, without mask 873 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 874 875 unsigned NumElem = SrcVTy->getVectorNumElements(); 876 VectorType *MaskTy = 877 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem); 878 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) || 879 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) || 880 !isPowerOf2_32(NumElem)) { 881 // Scalarization 882 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 883 int ScalarCompareCost = getCmpSelInstrCost( 884 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr); 885 int BranchCost = getCFInstrCost(Instruction::Br); 886 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 887 888 int ValueSplitCost = getScalarizationOverhead( 889 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 890 int MemopCost = 891 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 892 Alignment, AddressSpace); 893 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 894 } 895 896 // Legalize the type. 897 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 898 int Cost = 0; 899 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() && 900 LT.second.getVectorNumElements() == NumElem) 901 // Promotion requires expand/truncate for data and a shuffle for mask. 902 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 903 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 904 905 else if (LT.second.getVectorNumElements() > NumElem) { 906 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 907 LT.second.getVectorNumElements()); 908 // Expanding requires fill mask with zeroes 909 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 910 } 911 if (!ST->hasAVX512()) 912 return Cost + LT.first*4; // Each maskmov costs 4 913 914 // AVX-512 masked load/store is cheapper 915 return Cost+LT.first; 916 } 917 918 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 919 // Address computations in vectorized code with non-consecutive addresses will 920 // likely result in more instructions compared to scalar code where the 921 // computation can more often be merged into the index mode. The resulting 922 // extra micro-ops can significantly decrease throughput. 923 unsigned NumVectorInstToHideOverhead = 10; 924 925 if (Ty->isVectorTy() && IsComplex) 926 return NumVectorInstToHideOverhead; 927 928 return BaseT::getAddressComputationCost(Ty, IsComplex); 929 } 930 931 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 932 bool IsPairwise) { 933 934 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 935 936 MVT MTy = LT.second; 937 938 int ISD = TLI->InstructionOpcodeToISD(Opcode); 939 assert(ISD && "Invalid opcode"); 940 941 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 942 // and make it as the cost. 943 944 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = { 945 { ISD::FADD, MVT::v2f64, 2 }, 946 { ISD::FADD, MVT::v4f32, 4 }, 947 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 948 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 949 { ISD::ADD, MVT::v8i16, 5 }, 950 }; 951 952 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = { 953 { ISD::FADD, MVT::v4f32, 4 }, 954 { ISD::FADD, MVT::v4f64, 5 }, 955 { ISD::FADD, MVT::v8f32, 7 }, 956 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 957 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 958 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 959 { ISD::ADD, MVT::v8i16, 5 }, 960 { ISD::ADD, MVT::v8i32, 5 }, 961 }; 962 963 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = { 964 { ISD::FADD, MVT::v2f64, 2 }, 965 { ISD::FADD, MVT::v4f32, 4 }, 966 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 967 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 968 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 969 }; 970 971 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = { 972 { ISD::FADD, MVT::v4f32, 3 }, 973 { ISD::FADD, MVT::v4f64, 3 }, 974 { ISD::FADD, MVT::v8f32, 4 }, 975 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 976 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 977 { ISD::ADD, MVT::v4i64, 3 }, 978 { ISD::ADD, MVT::v8i16, 4 }, 979 { ISD::ADD, MVT::v8i32, 5 }, 980 }; 981 982 if (IsPairwise) { 983 if (ST->hasAVX()) { 984 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy); 985 if (Idx != -1) 986 return LT.first * AVX1CostTblPairWise[Idx].Cost; 987 } 988 989 if (ST->hasSSE42()) { 990 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy); 991 if (Idx != -1) 992 return LT.first * SSE42CostTblPairWise[Idx].Cost; 993 } 994 } else { 995 if (ST->hasAVX()) { 996 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy); 997 if (Idx != -1) 998 return LT.first * AVX1CostTblNoPairWise[Idx].Cost; 999 } 1000 1001 if (ST->hasSSE42()) { 1002 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy); 1003 if (Idx != -1) 1004 return LT.first * SSE42CostTblNoPairWise[Idx].Cost; 1005 } 1006 } 1007 1008 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1009 } 1010 1011 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1012 /// method might only calculate a fraction of a larger immediate. Therefore it 1013 /// is valid to return a cost of ZERO. 1014 int X86TTIImpl::getIntImmCost(int64_t Val) { 1015 if (Val == 0) 1016 return TTI::TCC_Free; 1017 1018 if (isInt<32>(Val)) 1019 return TTI::TCC_Basic; 1020 1021 return 2 * TTI::TCC_Basic; 1022 } 1023 1024 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1025 assert(Ty->isIntegerTy()); 1026 1027 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1028 if (BitSize == 0) 1029 return ~0U; 1030 1031 // Never hoist constants larger than 128bit, because this might lead to 1032 // incorrect code generation or assertions in codegen. 1033 // Fixme: Create a cost model for types larger than i128 once the codegen 1034 // issues have been fixed. 1035 if (BitSize > 128) 1036 return TTI::TCC_Free; 1037 1038 if (Imm == 0) 1039 return TTI::TCC_Free; 1040 1041 // Sign-extend all constants to a multiple of 64-bit. 1042 APInt ImmVal = Imm; 1043 if (BitSize & 0x3f) 1044 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1045 1046 // Split the constant into 64-bit chunks and calculate the cost for each 1047 // chunk. 1048 int Cost = 0; 1049 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1050 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1051 int64_t Val = Tmp.getSExtValue(); 1052 Cost += getIntImmCost(Val); 1053 } 1054 // We need at least one instruction to materialze the constant. 1055 return std::max(1, Cost); 1056 } 1057 1058 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1059 Type *Ty) { 1060 assert(Ty->isIntegerTy()); 1061 1062 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1063 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1064 // here, so that constant hoisting will ignore this constant. 1065 if (BitSize == 0) 1066 return TTI::TCC_Free; 1067 1068 unsigned ImmIdx = ~0U; 1069 switch (Opcode) { 1070 default: 1071 return TTI::TCC_Free; 1072 case Instruction::GetElementPtr: 1073 // Always hoist the base address of a GetElementPtr. This prevents the 1074 // creation of new constants for every base constant that gets constant 1075 // folded with the offset. 1076 if (Idx == 0) 1077 return 2 * TTI::TCC_Basic; 1078 return TTI::TCC_Free; 1079 case Instruction::Store: 1080 ImmIdx = 0; 1081 break; 1082 case Instruction::And: 1083 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 1084 // by using a 32-bit operation with implicit zero extension. Detect such 1085 // immediates here as the normal path expects bit 31 to be sign extended. 1086 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 1087 return TTI::TCC_Free; 1088 // Fallthrough 1089 case Instruction::Add: 1090 case Instruction::Sub: 1091 case Instruction::Mul: 1092 case Instruction::UDiv: 1093 case Instruction::SDiv: 1094 case Instruction::URem: 1095 case Instruction::SRem: 1096 case Instruction::Or: 1097 case Instruction::Xor: 1098 case Instruction::ICmp: 1099 ImmIdx = 1; 1100 break; 1101 // Always return TCC_Free for the shift value of a shift instruction. 1102 case Instruction::Shl: 1103 case Instruction::LShr: 1104 case Instruction::AShr: 1105 if (Idx == 1) 1106 return TTI::TCC_Free; 1107 break; 1108 case Instruction::Trunc: 1109 case Instruction::ZExt: 1110 case Instruction::SExt: 1111 case Instruction::IntToPtr: 1112 case Instruction::PtrToInt: 1113 case Instruction::BitCast: 1114 case Instruction::PHI: 1115 case Instruction::Call: 1116 case Instruction::Select: 1117 case Instruction::Ret: 1118 case Instruction::Load: 1119 break; 1120 } 1121 1122 if (Idx == ImmIdx) { 1123 int NumConstants = (BitSize + 63) / 64; 1124 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1125 return (Cost <= NumConstants * TTI::TCC_Basic) 1126 ? static_cast<int>(TTI::TCC_Free) 1127 : Cost; 1128 } 1129 1130 return X86TTIImpl::getIntImmCost(Imm, Ty); 1131 } 1132 1133 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1134 Type *Ty) { 1135 assert(Ty->isIntegerTy()); 1136 1137 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1138 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1139 // here, so that constant hoisting will ignore this constant. 1140 if (BitSize == 0) 1141 return TTI::TCC_Free; 1142 1143 switch (IID) { 1144 default: 1145 return TTI::TCC_Free; 1146 case Intrinsic::sadd_with_overflow: 1147 case Intrinsic::uadd_with_overflow: 1148 case Intrinsic::ssub_with_overflow: 1149 case Intrinsic::usub_with_overflow: 1150 case Intrinsic::smul_with_overflow: 1151 case Intrinsic::umul_with_overflow: 1152 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1153 return TTI::TCC_Free; 1154 break; 1155 case Intrinsic::experimental_stackmap: 1156 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1157 return TTI::TCC_Free; 1158 break; 1159 case Intrinsic::experimental_patchpoint_void: 1160 case Intrinsic::experimental_patchpoint_i64: 1161 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1162 return TTI::TCC_Free; 1163 break; 1164 } 1165 return X86TTIImpl::getIntImmCost(Imm, Ty); 1166 } 1167 1168 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) { 1169 int DataWidth = DataTy->getPrimitiveSizeInBits(); 1170 1171 // Todo: AVX512 allows gather/scatter, works with strided and random as well 1172 if ((DataWidth < 32) || (Consecutive == 0)) 1173 return false; 1174 if (ST->hasAVX512() || ST->hasAVX2()) 1175 return true; 1176 return false; 1177 } 1178 1179 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) { 1180 return isLegalMaskedLoad(DataType, Consecutive); 1181 } 1182 1183 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 1184 const Function *Callee) const { 1185 const TargetMachine &TM = getTLI()->getTargetMachine(); 1186 1187 // Work this as a subsetting of subtarget features. 1188 const FeatureBitset &CallerBits = 1189 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 1190 const FeatureBitset &CalleeBits = 1191 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 1192 1193 // FIXME: This is likely too limiting as it will include subtarget features 1194 // that we might not care about for inlining, but it is conservatively 1195 // correct. 1196 return (CallerBits & CalleeBits) == CalleeBits; 1197 } 1198