1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Target/CostTable.h" 48 #include "llvm/Target/TargetLowering.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 70 if (Vector && !ST->hasSSE1()) 71 return 0; 72 73 if (ST->is64Bit()) { 74 if (Vector && ST->hasAVX512()) 75 return 32; 76 return 16; 77 } 78 return 8; 79 } 80 81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 82 if (Vector) { 83 if (ST->hasAVX512()) 84 return 512; 85 if (ST->hasAVX()) 86 return 256; 87 if (ST->hasSSE1()) 88 return 128; 89 return 0; 90 } 91 92 if (ST->is64Bit()) 93 return 64; 94 95 return 32; 96 } 97 98 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 99 // If the loop will not be vectorized, don't interleave the loop. 100 // Let regular unroll to unroll the loop, which saves the overflow 101 // check and memory check cost. 102 if (VF == 1) 103 return 1; 104 105 if (ST->isAtom()) 106 return 1; 107 108 // Sandybridge and Haswell have multiple execution ports and pipelined 109 // vector units. 110 if (ST->hasAVX()) 111 return 4; 112 113 return 2; 114 } 115 116 int X86TTIImpl::getArithmeticInstrCost( 117 unsigned Opcode, Type *Ty, 118 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 119 TTI::OperandValueProperties Opd1PropInfo, 120 TTI::OperandValueProperties Opd2PropInfo, 121 ArrayRef<const Value *> Args) { 122 // Legalize the type. 123 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 124 125 int ISD = TLI->InstructionOpcodeToISD(Opcode); 126 assert(ISD && "Invalid opcode"); 127 128 static const CostTblEntry SLMCostTable[] = { 129 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 130 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 131 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 132 { ISD::FMUL, MVT::f64, 2 }, // mulsd 133 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 134 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 135 { ISD::FDIV, MVT::f32, 17 }, // divss 136 { ISD::FDIV, MVT::v4f32, 39 }, // divps 137 { ISD::FDIV, MVT::f64, 32 }, // divsd 138 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 139 { ISD::FADD, MVT::v2f64, 2 }, // addpd 140 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 141 // v2i64/v4i64 mul is custom lowered as a series of long 142 // multiplies(3), shifts(3) and adds(2). 143 // slm muldq version throughput is 2 144 { ISD::MUL, MVT::v2i64, 11 }, 145 }; 146 147 if (ST->isSLM()) { 148 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 149 // Check if the operands can be shrinked into a smaller datatype. 150 bool Op1Signed = false; 151 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 152 bool Op2Signed = false; 153 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 154 155 bool signedMode = Op1Signed | Op2Signed; 156 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 157 158 if (OpMinSize <= 7) 159 return LT.first * 3; // pmullw/sext 160 if (!signedMode && OpMinSize <= 8) 161 return LT.first * 3; // pmullw/zext 162 if (OpMinSize <= 15) 163 return LT.first * 5; // pmullw/pmulhw/pshuf 164 if (!signedMode && OpMinSize <= 16) 165 return LT.first * 5; // pmullw/pmulhw/pshuf 166 } 167 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 168 LT.second)) { 169 return LT.first * Entry->Cost; 170 } 171 } 172 173 if (ISD == ISD::SDIV && 174 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 175 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 176 // On X86, vector signed division by constants power-of-two are 177 // normally expanded to the sequence SRA + SRL + ADD + SRA. 178 // The OperandValue properties many not be same as that of previous 179 // operation;conservatively assume OP_None. 180 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 181 Op2Info, TargetTransformInfo::OP_None, 182 TargetTransformInfo::OP_None); 183 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 184 TargetTransformInfo::OP_None, 185 TargetTransformInfo::OP_None); 186 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 187 TargetTransformInfo::OP_None, 188 TargetTransformInfo::OP_None); 189 190 return Cost; 191 } 192 193 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 194 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 195 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 196 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 197 198 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 199 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 200 }; 201 202 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 203 ST->hasBWI()) { 204 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 205 LT.second)) 206 return LT.first * Entry->Cost; 207 } 208 209 static const CostTblEntry AVX512UniformConstCostTable[] = { 210 { ISD::SRA, MVT::v2i64, 1 }, 211 { ISD::SRA, MVT::v4i64, 1 }, 212 { ISD::SRA, MVT::v8i64, 1 }, 213 214 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 215 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 216 }; 217 218 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 219 ST->hasAVX512()) { 220 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 221 LT.second)) 222 return LT.first * Entry->Cost; 223 } 224 225 static const CostTblEntry AVX2UniformConstCostTable[] = { 226 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 227 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 228 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 229 230 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 231 232 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 233 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 234 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 235 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 236 }; 237 238 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 239 ST->hasAVX2()) { 240 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 241 LT.second)) 242 return LT.first * Entry->Cost; 243 } 244 245 static const CostTblEntry SSE2UniformConstCostTable[] = { 246 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 247 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 248 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 249 250 { ISD::SHL, MVT::v32i8, 4 }, // 2*(psllw + pand). 251 { ISD::SRL, MVT::v32i8, 4 }, // 2*(psrlw + pand). 252 { ISD::SRA, MVT::v32i8, 8 }, // 2*(psrlw, pand, pxor, psubb). 253 254 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence 255 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 256 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence 257 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 258 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence 259 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 260 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence 261 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 262 }; 263 264 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 265 ST->hasSSE2()) { 266 // pmuldq sequence. 267 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 268 return LT.first * 30; 269 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 270 return LT.first * 15; 271 272 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD, 273 LT.second)) 274 return LT.first * Entry->Cost; 275 } 276 277 static const CostTblEntry AVX2UniformCostTable[] = { 278 // Uniform splats are cheaper for the following instructions. 279 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 280 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 281 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 282 }; 283 284 if (ST->hasAVX2() && 285 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 286 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 287 if (const auto *Entry = 288 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 289 return LT.first * Entry->Cost; 290 } 291 292 static const CostTblEntry SSE2UniformCostTable[] = { 293 // Uniform splats are cheaper for the following instructions. 294 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 295 { ISD::SHL, MVT::v4i32, 1 }, // pslld 296 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 297 298 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 299 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 300 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 301 302 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 303 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 304 }; 305 306 if (ST->hasSSE2() && 307 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 308 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 309 if (const auto *Entry = 310 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 311 return LT.first * Entry->Cost; 312 } 313 314 static const CostTblEntry AVX512DQCostTable[] = { 315 { ISD::MUL, MVT::v2i64, 1 }, 316 { ISD::MUL, MVT::v4i64, 1 }, 317 { ISD::MUL, MVT::v8i64, 1 } 318 }; 319 320 // Look for AVX512DQ lowering tricks for custom cases. 321 if (ST->hasDQI()) 322 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 323 return LT.first * Entry->Cost; 324 325 static const CostTblEntry AVX512BWCostTable[] = { 326 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 327 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 328 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 329 330 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 331 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 332 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 333 334 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 335 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 336 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 337 338 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 339 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 340 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 341 342 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 343 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 344 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 345 346 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 347 { ISD::SDIV, MVT::v64i8, 64*20 }, 348 { ISD::SDIV, MVT::v32i16, 32*20 }, 349 { ISD::UDIV, MVT::v64i8, 64*20 }, 350 { ISD::UDIV, MVT::v32i16, 32*20 } 351 }; 352 353 // Look for AVX512BW lowering tricks for custom cases. 354 if (ST->hasBWI()) 355 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 356 return LT.first * Entry->Cost; 357 358 static const CostTblEntry AVX512CostTable[] = { 359 { ISD::SHL, MVT::v16i32, 1 }, 360 { ISD::SRL, MVT::v16i32, 1 }, 361 { ISD::SRA, MVT::v16i32, 1 }, 362 363 { ISD::SHL, MVT::v8i64, 1 }, 364 { ISD::SRL, MVT::v8i64, 1 }, 365 366 { ISD::SRA, MVT::v2i64, 1 }, 367 { ISD::SRA, MVT::v4i64, 1 }, 368 { ISD::SRA, MVT::v8i64, 1 }, 369 370 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 371 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 372 { ISD::MUL, MVT::v16i32, 1 }, // pmulld 373 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 374 375 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 376 { ISD::SDIV, MVT::v16i32, 16*20 }, 377 { ISD::SDIV, MVT::v8i64, 8*20 }, 378 { ISD::UDIV, MVT::v16i32, 16*20 }, 379 { ISD::UDIV, MVT::v8i64, 8*20 } 380 }; 381 382 if (ST->hasAVX512()) 383 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 384 return LT.first * Entry->Cost; 385 386 static const CostTblEntry AVX2ShiftCostTable[] = { 387 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 388 // customize them to detect the cases where shift amount is a scalar one. 389 { ISD::SHL, MVT::v4i32, 1 }, 390 { ISD::SRL, MVT::v4i32, 1 }, 391 { ISD::SRA, MVT::v4i32, 1 }, 392 { ISD::SHL, MVT::v8i32, 1 }, 393 { ISD::SRL, MVT::v8i32, 1 }, 394 { ISD::SRA, MVT::v8i32, 1 }, 395 { ISD::SHL, MVT::v2i64, 1 }, 396 { ISD::SRL, MVT::v2i64, 1 }, 397 { ISD::SHL, MVT::v4i64, 1 }, 398 { ISD::SRL, MVT::v4i64, 1 }, 399 }; 400 401 // Look for AVX2 lowering tricks. 402 if (ST->hasAVX2()) { 403 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 404 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 405 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 406 // On AVX2, a packed v16i16 shift left by a constant build_vector 407 // is lowered into a vector multiply (vpmullw). 408 return LT.first; 409 410 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 411 return LT.first * Entry->Cost; 412 } 413 414 static const CostTblEntry XOPShiftCostTable[] = { 415 // 128bit shifts take 1cy, but right shifts require negation beforehand. 416 { ISD::SHL, MVT::v16i8, 1 }, 417 { ISD::SRL, MVT::v16i8, 2 }, 418 { ISD::SRA, MVT::v16i8, 2 }, 419 { ISD::SHL, MVT::v8i16, 1 }, 420 { ISD::SRL, MVT::v8i16, 2 }, 421 { ISD::SRA, MVT::v8i16, 2 }, 422 { ISD::SHL, MVT::v4i32, 1 }, 423 { ISD::SRL, MVT::v4i32, 2 }, 424 { ISD::SRA, MVT::v4i32, 2 }, 425 { ISD::SHL, MVT::v2i64, 1 }, 426 { ISD::SRL, MVT::v2i64, 2 }, 427 { ISD::SRA, MVT::v2i64, 2 }, 428 // 256bit shifts require splitting if AVX2 didn't catch them above. 429 { ISD::SHL, MVT::v32i8, 2 }, 430 { ISD::SRL, MVT::v32i8, 4 }, 431 { ISD::SRA, MVT::v32i8, 4 }, 432 { ISD::SHL, MVT::v16i16, 2 }, 433 { ISD::SRL, MVT::v16i16, 4 }, 434 { ISD::SRA, MVT::v16i16, 4 }, 435 { ISD::SHL, MVT::v8i32, 2 }, 436 { ISD::SRL, MVT::v8i32, 4 }, 437 { ISD::SRA, MVT::v8i32, 4 }, 438 { ISD::SHL, MVT::v4i64, 2 }, 439 { ISD::SRL, MVT::v4i64, 4 }, 440 { ISD::SRA, MVT::v4i64, 4 }, 441 }; 442 443 // Look for XOP lowering tricks. 444 if (ST->hasXOP()) 445 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 446 return LT.first * Entry->Cost; 447 448 static const CostTblEntry SSE2UniformShiftCostTable[] = { 449 // Uniform splats are cheaper for the following instructions. 450 { ISD::SHL, MVT::v16i16, 2 }, // psllw. 451 { ISD::SHL, MVT::v8i32, 2 }, // pslld 452 { ISD::SHL, MVT::v4i64, 2 }, // psllq. 453 454 { ISD::SRL, MVT::v16i16, 2 }, // psrlw. 455 { ISD::SRL, MVT::v8i32, 2 }, // psrld. 456 { ISD::SRL, MVT::v4i64, 2 }, // psrlq. 457 458 { ISD::SRA, MVT::v16i16, 2 }, // psraw. 459 { ISD::SRA, MVT::v8i32, 2 }, // psrad. 460 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 461 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle. 462 }; 463 464 if (ST->hasSSE2() && 465 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 466 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 467 if (const auto *Entry = 468 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 469 return LT.first * Entry->Cost; 470 } 471 472 if (ISD == ISD::SHL && 473 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 474 MVT VT = LT.second; 475 // Vector shift left by non uniform constant can be lowered 476 // into vector multiply. 477 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 478 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 479 ISD = ISD::MUL; 480 } 481 482 static const CostTblEntry AVX2CostTable[] = { 483 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 484 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 485 486 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 487 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 488 489 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 490 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 491 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 492 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 493 494 { ISD::SUB, MVT::v32i8, 1 }, // psubb 495 { ISD::ADD, MVT::v32i8, 1 }, // paddb 496 { ISD::SUB, MVT::v16i16, 1 }, // psubw 497 { ISD::ADD, MVT::v16i16, 1 }, // paddw 498 { ISD::SUB, MVT::v8i32, 1 }, // psubd 499 { ISD::ADD, MVT::v8i32, 1 }, // paddd 500 { ISD::SUB, MVT::v4i64, 1 }, // psubq 501 { ISD::ADD, MVT::v4i64, 1 }, // paddq 502 503 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 504 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 505 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 506 { ISD::MUL, MVT::v8i32, 1 }, // pmulld 507 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 508 509 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 510 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 511 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 512 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 513 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 514 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 515 }; 516 517 // Look for AVX2 lowering tricks for custom cases. 518 if (ST->hasAVX2()) 519 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 520 return LT.first * Entry->Cost; 521 522 static const CostTblEntry AVX1CostTable[] = { 523 // We don't have to scalarize unsupported ops. We can issue two half-sized 524 // operations and we only need to extract the upper YMM half. 525 // Two ops + 1 extract + 1 insert = 4. 526 { ISD::MUL, MVT::v16i16, 4 }, 527 { ISD::MUL, MVT::v8i32, 4 }, 528 { ISD::SUB, MVT::v32i8, 4 }, 529 { ISD::ADD, MVT::v32i8, 4 }, 530 { ISD::SUB, MVT::v16i16, 4 }, 531 { ISD::ADD, MVT::v16i16, 4 }, 532 { ISD::SUB, MVT::v8i32, 4 }, 533 { ISD::ADD, MVT::v8i32, 4 }, 534 { ISD::SUB, MVT::v4i64, 4 }, 535 { ISD::ADD, MVT::v4i64, 4 }, 536 537 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 538 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 539 // Because we believe v4i64 to be a legal type, we must also include the 540 // extract+insert in the cost table. Therefore, the cost here is 18 541 // instead of 8. 542 { ISD::MUL, MVT::v4i64, 18 }, 543 544 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 545 546 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 547 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 548 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 549 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 550 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 551 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 552 553 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 554 { ISD::SDIV, MVT::v32i8, 32*20 }, 555 { ISD::SDIV, MVT::v16i16, 16*20 }, 556 { ISD::SDIV, MVT::v8i32, 8*20 }, 557 { ISD::SDIV, MVT::v4i64, 4*20 }, 558 { ISD::UDIV, MVT::v32i8, 32*20 }, 559 { ISD::UDIV, MVT::v16i16, 16*20 }, 560 { ISD::UDIV, MVT::v8i32, 8*20 }, 561 { ISD::UDIV, MVT::v4i64, 4*20 }, 562 }; 563 564 if (ST->hasAVX()) 565 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 566 return LT.first * Entry->Cost; 567 568 static const CostTblEntry SSE42CostTable[] = { 569 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 570 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 571 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 572 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 573 }; 574 575 if (ST->hasSSE42()) 576 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 577 return LT.first * Entry->Cost; 578 579 static const CostTblEntry SSE41CostTable[] = { 580 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 581 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence. 582 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 583 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence. 584 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 585 { ISD::SHL, MVT::v8i32, 2*4 }, // pslld/paddd/cvttps2dq/pmulld 586 587 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 588 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence. 589 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 590 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence. 591 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 592 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend. 593 594 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 595 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence. 596 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 597 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence. 598 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 599 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend. 600 601 { ISD::MUL, MVT::v4i32, 1 } // pmulld 602 }; 603 604 if (ST->hasSSE41()) 605 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 606 return LT.first * Entry->Cost; 607 608 static const CostTblEntry SSE2CostTable[] = { 609 // We don't correctly identify costs of casts because they are marked as 610 // custom. 611 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 612 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 613 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 614 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 615 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 616 617 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 618 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 619 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 620 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 621 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 622 623 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 624 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 625 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 626 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 627 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. 628 629 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 630 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 631 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 632 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 633 634 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 635 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 636 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 637 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 638 639 // It is not a good idea to vectorize division. We have to scalarize it and 640 // in the process we will often end up having to spilling regular 641 // registers. The overhead of division is going to dominate most kernels 642 // anyways so try hard to prevent vectorization of division - it is 643 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 644 // to hide "20 cycles" for each lane. 645 { ISD::SDIV, MVT::v16i8, 16*20 }, 646 { ISD::SDIV, MVT::v8i16, 8*20 }, 647 { ISD::SDIV, MVT::v4i32, 4*20 }, 648 { ISD::SDIV, MVT::v2i64, 2*20 }, 649 { ISD::UDIV, MVT::v16i8, 16*20 }, 650 { ISD::UDIV, MVT::v8i16, 8*20 }, 651 { ISD::UDIV, MVT::v4i32, 4*20 }, 652 { ISD::UDIV, MVT::v2i64, 2*20 }, 653 }; 654 655 if (ST->hasSSE2()) 656 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 657 return LT.first * Entry->Cost; 658 659 static const CostTblEntry SSE1CostTable[] = { 660 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 661 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 662 }; 663 664 if (ST->hasSSE1()) 665 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 666 return LT.first * Entry->Cost; 667 668 // Fallback to the default implementation. 669 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 670 } 671 672 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 673 Type *SubTp) { 674 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 675 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 676 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 677 678 // For Broadcasts we are splatting the first element from the first input 679 // register, so only need to reference that input and all the output 680 // registers are the same. 681 if (Kind == TTI::SK_Broadcast) 682 LT.first = 1; 683 684 // We are going to permute multiple sources and the result will be in multiple 685 // destinations. Providing an accurate cost only for splits where the element 686 // type remains the same. 687 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 688 MVT LegalVT = LT.second; 689 if (LegalVT.getVectorElementType().getSizeInBits() == 690 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 691 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 692 693 unsigned VecTySize = DL.getTypeStoreSize(Tp); 694 unsigned LegalVTSize = LegalVT.getStoreSize(); 695 // Number of source vectors after legalization: 696 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 697 // Number of destination vectors after legalization: 698 unsigned NumOfDests = LT.first; 699 700 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 701 LegalVT.getVectorNumElements()); 702 703 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 704 return NumOfShuffles * 705 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 706 } 707 708 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 709 } 710 711 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 712 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 713 // We assume that source and destination have the same vector type. 714 int NumOfDests = LT.first; 715 int NumOfShufflesPerDest = LT.first * 2 - 1; 716 LT.first = NumOfDests * NumOfShufflesPerDest; 717 } 718 719 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 720 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 721 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 722 723 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 724 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 725 726 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 727 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 728 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 729 }; 730 731 if (ST->hasVBMI()) 732 if (const auto *Entry = 733 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 734 return LT.first * Entry->Cost; 735 736 static const CostTblEntry AVX512BWShuffleTbl[] = { 737 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 738 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 739 740 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 741 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 742 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 743 744 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 745 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 746 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 747 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 748 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 749 750 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 751 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 752 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 753 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 754 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 755 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 756 }; 757 758 if (ST->hasBWI()) 759 if (const auto *Entry = 760 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 761 return LT.first * Entry->Cost; 762 763 static const CostTblEntry AVX512ShuffleTbl[] = { 764 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 765 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 766 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 767 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 768 769 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 770 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 771 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 772 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 773 774 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 775 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 776 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 777 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 778 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 779 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 780 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 781 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 782 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 783 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 784 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 785 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 786 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 787 788 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 789 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 790 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 791 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 792 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 793 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 794 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 795 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 796 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 797 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 798 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 799 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 800 }; 801 802 if (ST->hasAVX512()) 803 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 804 return LT.first * Entry->Cost; 805 806 static const CostTblEntry AVX2ShuffleTbl[] = { 807 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 808 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 809 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 810 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 811 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 812 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 813 814 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 815 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 816 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 817 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 818 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 819 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 820 821 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw 822 { TTI::SK_Alternate, MVT::v32i8, 1 } // vpblendvb 823 }; 824 825 if (ST->hasAVX2()) 826 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 827 return LT.first * Entry->Cost; 828 829 static const CostTblEntry AVX1ShuffleTbl[] = { 830 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 831 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 832 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 833 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 834 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 835 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 836 837 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 838 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 839 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 840 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 841 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 842 // + vinsertf128 843 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 844 // + vinsertf128 845 846 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd 847 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd 848 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps 849 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps 850 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor 851 { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor 852 }; 853 854 if (ST->hasAVX()) 855 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 856 return LT.first * Entry->Cost; 857 858 static const CostTblEntry SSE41ShuffleTbl[] = { 859 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw 860 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 861 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw 862 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps 863 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw 864 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb 865 }; 866 867 if (ST->hasSSE41()) 868 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 869 return LT.first * Entry->Cost; 870 871 static const CostTblEntry SSSE3ShuffleTbl[] = { 872 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 873 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 874 875 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 876 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 877 878 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por 879 { TTI::SK_Alternate, MVT::v16i8, 3 } // pshufb + pshufb + por 880 }; 881 882 if (ST->hasSSSE3()) 883 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 884 return LT.first * Entry->Cost; 885 886 static const CostTblEntry SSE2ShuffleTbl[] = { 887 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 888 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 889 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 890 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 891 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 892 893 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 894 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 895 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 896 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 897 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 898 // + 2*pshufd + 2*unpck + packus 899 900 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd 901 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 902 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps 903 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por 904 { TTI::SK_Alternate, MVT::v16i8, 3 } // pand + pandn + por 905 }; 906 907 if (ST->hasSSE2()) 908 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 909 return LT.first * Entry->Cost; 910 911 static const CostTblEntry SSE1ShuffleTbl[] = { 912 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 913 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 914 { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps 915 }; 916 917 if (ST->hasSSE1()) 918 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 919 return LT.first * Entry->Cost; 920 921 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 922 } 923 924 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 925 int ISD = TLI->InstructionOpcodeToISD(Opcode); 926 assert(ISD && "Invalid opcode"); 927 928 // FIXME: Need a better design of the cost table to handle non-simple types of 929 // potential massive combinations (elem_num x src_type x dst_type). 930 931 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 932 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 933 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 934 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 935 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 936 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 937 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 938 939 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 940 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 941 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 942 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 943 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 944 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 945 946 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 947 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 948 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 949 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 950 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 951 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 952 953 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 954 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 955 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 956 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 957 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 958 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 959 }; 960 961 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 962 // 256-bit wide vectors. 963 964 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 965 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 966 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 967 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 968 969 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 970 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 971 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 972 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 973 974 // v16i1 -> v16i32 - load + broadcast 975 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 976 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 977 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 978 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 979 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 980 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 981 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 982 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 983 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 984 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 985 986 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 987 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 988 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 989 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 990 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 991 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 992 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 993 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 994 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 995 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 996 997 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 998 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 999 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1000 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1001 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1002 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1003 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1004 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1005 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1006 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1007 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1008 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1009 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1010 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1011 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1012 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1013 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1014 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1015 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1016 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1017 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1018 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 1019 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1020 1021 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1022 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1023 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1024 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1025 }; 1026 1027 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1028 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1029 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1030 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1031 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1032 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1033 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1034 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1035 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1036 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1037 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1038 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1039 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1040 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1041 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1042 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1043 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1044 1045 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1046 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1047 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1048 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1049 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1050 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1051 1052 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1053 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1054 1055 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1056 }; 1057 1058 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1059 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1060 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1061 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1062 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1063 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1064 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1065 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1066 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1067 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1068 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1069 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1070 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1071 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1072 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1073 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1074 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1075 1076 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1077 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1078 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1079 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1080 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1081 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1082 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1083 1084 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1085 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1086 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1087 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1088 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1089 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1090 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1091 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1092 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1093 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1094 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1095 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1096 1097 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1098 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1099 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1100 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1101 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1102 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1103 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1104 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1105 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1106 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1107 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1108 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1109 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1110 // The generic code to compute the scalar overhead is currently broken. 1111 // Workaround this limitation by estimating the scalarization overhead 1112 // here. We have roughly 10 instructions per scalar element. 1113 // Multiply that by the vector width. 1114 // FIXME: remove that when PR19268 is fixed. 1115 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 1116 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 1117 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1118 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1119 1120 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1121 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1122 // This node is expanded into scalarized operations but BasicTTI is overly 1123 // optimistic estimating its cost. It computes 3 per element (one 1124 // vector-extract, one scalar conversion and one vector-insert). The 1125 // problem is that the inserts form a read-modify-write chain so latency 1126 // should be factored in too. Inflating the cost per element by 1. 1127 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1128 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1129 1130 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1131 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1132 }; 1133 1134 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1135 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1136 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1137 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1138 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1139 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1140 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1141 1142 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1143 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1144 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1145 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1146 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1147 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1148 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1149 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1150 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1151 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1152 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1153 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1154 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1155 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1156 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1157 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1158 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1159 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1160 1161 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1162 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1163 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1164 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1165 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1166 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1167 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1168 1169 }; 1170 1171 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1172 // These are somewhat magic numbers justified by looking at the output of 1173 // Intel's IACA, running some kernels and making sure when we take 1174 // legalization into account the throughput will be overestimated. 1175 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1176 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1177 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1178 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1179 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1180 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1181 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1182 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1183 1184 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1185 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1186 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1187 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1188 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1189 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1190 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1191 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1192 1193 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1194 1195 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1196 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1197 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1198 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1199 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1200 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1201 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1202 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1203 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1204 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1205 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1206 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1207 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1208 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1209 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1210 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1211 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1212 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1213 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1214 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1215 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1216 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1217 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1218 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1219 1220 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1221 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1222 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1223 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1224 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1225 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1226 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1227 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1228 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1229 }; 1230 1231 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1232 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1233 1234 if (ST->hasSSE2() && !ST->hasAVX()) { 1235 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1236 LTDest.second, LTSrc.second)) 1237 return LTSrc.first * Entry->Cost; 1238 } 1239 1240 EVT SrcTy = TLI->getValueType(DL, Src); 1241 EVT DstTy = TLI->getValueType(DL, Dst); 1242 1243 // The function getSimpleVT only handles simple value types. 1244 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1245 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1246 1247 if (ST->hasDQI()) 1248 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1249 DstTy.getSimpleVT(), 1250 SrcTy.getSimpleVT())) 1251 return Entry->Cost; 1252 1253 if (ST->hasAVX512()) 1254 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1255 DstTy.getSimpleVT(), 1256 SrcTy.getSimpleVT())) 1257 return Entry->Cost; 1258 1259 if (ST->hasAVX2()) { 1260 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1261 DstTy.getSimpleVT(), 1262 SrcTy.getSimpleVT())) 1263 return Entry->Cost; 1264 } 1265 1266 if (ST->hasAVX()) { 1267 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1268 DstTy.getSimpleVT(), 1269 SrcTy.getSimpleVT())) 1270 return Entry->Cost; 1271 } 1272 1273 if (ST->hasSSE41()) { 1274 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1275 DstTy.getSimpleVT(), 1276 SrcTy.getSimpleVT())) 1277 return Entry->Cost; 1278 } 1279 1280 if (ST->hasSSE2()) { 1281 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1282 DstTy.getSimpleVT(), 1283 SrcTy.getSimpleVT())) 1284 return Entry->Cost; 1285 } 1286 1287 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1288 } 1289 1290 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 1291 // Legalize the type. 1292 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1293 1294 MVT MTy = LT.second; 1295 1296 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1297 assert(ISD && "Invalid opcode"); 1298 1299 static const CostTblEntry SSE2CostTbl[] = { 1300 { ISD::SETCC, MVT::v2i64, 8 }, 1301 { ISD::SETCC, MVT::v4i32, 1 }, 1302 { ISD::SETCC, MVT::v8i16, 1 }, 1303 { ISD::SETCC, MVT::v16i8, 1 }, 1304 }; 1305 1306 static const CostTblEntry SSE42CostTbl[] = { 1307 { ISD::SETCC, MVT::v2f64, 1 }, 1308 { ISD::SETCC, MVT::v4f32, 1 }, 1309 { ISD::SETCC, MVT::v2i64, 1 }, 1310 }; 1311 1312 static const CostTblEntry AVX1CostTbl[] = { 1313 { ISD::SETCC, MVT::v4f64, 1 }, 1314 { ISD::SETCC, MVT::v8f32, 1 }, 1315 // AVX1 does not support 8-wide integer compare. 1316 { ISD::SETCC, MVT::v4i64, 4 }, 1317 { ISD::SETCC, MVT::v8i32, 4 }, 1318 { ISD::SETCC, MVT::v16i16, 4 }, 1319 { ISD::SETCC, MVT::v32i8, 4 }, 1320 }; 1321 1322 static const CostTblEntry AVX2CostTbl[] = { 1323 { ISD::SETCC, MVT::v4i64, 1 }, 1324 { ISD::SETCC, MVT::v8i32, 1 }, 1325 { ISD::SETCC, MVT::v16i16, 1 }, 1326 { ISD::SETCC, MVT::v32i8, 1 }, 1327 }; 1328 1329 static const CostTblEntry AVX512CostTbl[] = { 1330 { ISD::SETCC, MVT::v8i64, 1 }, 1331 { ISD::SETCC, MVT::v16i32, 1 }, 1332 { ISD::SETCC, MVT::v8f64, 1 }, 1333 { ISD::SETCC, MVT::v16f32, 1 }, 1334 }; 1335 1336 if (ST->hasAVX512()) 1337 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1338 return LT.first * Entry->Cost; 1339 1340 if (ST->hasAVX2()) 1341 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1342 return LT.first * Entry->Cost; 1343 1344 if (ST->hasAVX()) 1345 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1346 return LT.first * Entry->Cost; 1347 1348 if (ST->hasSSE42()) 1349 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1350 return LT.first * Entry->Cost; 1351 1352 if (ST->hasSSE2()) 1353 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1354 return LT.first * Entry->Cost; 1355 1356 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 1357 } 1358 1359 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1360 ArrayRef<Type *> Tys, FastMathFlags FMF) { 1361 // Costs should match the codegen from: 1362 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1363 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1364 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1365 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1366 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1367 static const CostTblEntry XOPCostTbl[] = { 1368 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1369 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1370 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1371 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1372 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1373 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1374 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1375 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1376 { ISD::BITREVERSE, MVT::i64, 3 }, 1377 { ISD::BITREVERSE, MVT::i32, 3 }, 1378 { ISD::BITREVERSE, MVT::i16, 3 }, 1379 { ISD::BITREVERSE, MVT::i8, 3 } 1380 }; 1381 static const CostTblEntry AVX2CostTbl[] = { 1382 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1383 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1384 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1385 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1386 { ISD::BSWAP, MVT::v4i64, 1 }, 1387 { ISD::BSWAP, MVT::v8i32, 1 }, 1388 { ISD::BSWAP, MVT::v16i16, 1 }, 1389 { ISD::CTLZ, MVT::v4i64, 23 }, 1390 { ISD::CTLZ, MVT::v8i32, 18 }, 1391 { ISD::CTLZ, MVT::v16i16, 14 }, 1392 { ISD::CTLZ, MVT::v32i8, 9 }, 1393 { ISD::CTPOP, MVT::v4i64, 7 }, 1394 { ISD::CTPOP, MVT::v8i32, 11 }, 1395 { ISD::CTPOP, MVT::v16i16, 9 }, 1396 { ISD::CTPOP, MVT::v32i8, 6 }, 1397 { ISD::CTTZ, MVT::v4i64, 10 }, 1398 { ISD::CTTZ, MVT::v8i32, 14 }, 1399 { ISD::CTTZ, MVT::v16i16, 12 }, 1400 { ISD::CTTZ, MVT::v32i8, 9 }, 1401 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1402 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1403 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1404 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1405 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1406 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1407 }; 1408 static const CostTblEntry AVX1CostTbl[] = { 1409 { ISD::BITREVERSE, MVT::v4i64, 10 }, 1410 { ISD::BITREVERSE, MVT::v8i32, 10 }, 1411 { ISD::BITREVERSE, MVT::v16i16, 10 }, 1412 { ISD::BITREVERSE, MVT::v32i8, 10 }, 1413 { ISD::BSWAP, MVT::v4i64, 4 }, 1414 { ISD::BSWAP, MVT::v8i32, 4 }, 1415 { ISD::BSWAP, MVT::v16i16, 4 }, 1416 { ISD::CTLZ, MVT::v4i64, 46 }, 1417 { ISD::CTLZ, MVT::v8i32, 36 }, 1418 { ISD::CTLZ, MVT::v16i16, 28 }, 1419 { ISD::CTLZ, MVT::v32i8, 18 }, 1420 { ISD::CTPOP, MVT::v4i64, 14 }, 1421 { ISD::CTPOP, MVT::v8i32, 22 }, 1422 { ISD::CTPOP, MVT::v16i16, 18 }, 1423 { ISD::CTPOP, MVT::v32i8, 12 }, 1424 { ISD::CTTZ, MVT::v4i64, 20 }, 1425 { ISD::CTTZ, MVT::v8i32, 28 }, 1426 { ISD::CTTZ, MVT::v16i16, 24 }, 1427 { ISD::CTTZ, MVT::v32i8, 18 }, 1428 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1429 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1430 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1431 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1432 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1433 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1434 }; 1435 static const CostTblEntry SSE42CostTbl[] = { 1436 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1437 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1438 }; 1439 static const CostTblEntry SSSE3CostTbl[] = { 1440 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1441 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1442 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1443 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1444 { ISD::BSWAP, MVT::v2i64, 1 }, 1445 { ISD::BSWAP, MVT::v4i32, 1 }, 1446 { ISD::BSWAP, MVT::v8i16, 1 }, 1447 { ISD::CTLZ, MVT::v2i64, 23 }, 1448 { ISD::CTLZ, MVT::v4i32, 18 }, 1449 { ISD::CTLZ, MVT::v8i16, 14 }, 1450 { ISD::CTLZ, MVT::v16i8, 9 }, 1451 { ISD::CTPOP, MVT::v2i64, 7 }, 1452 { ISD::CTPOP, MVT::v4i32, 11 }, 1453 { ISD::CTPOP, MVT::v8i16, 9 }, 1454 { ISD::CTPOP, MVT::v16i8, 6 }, 1455 { ISD::CTTZ, MVT::v2i64, 10 }, 1456 { ISD::CTTZ, MVT::v4i32, 14 }, 1457 { ISD::CTTZ, MVT::v8i16, 12 }, 1458 { ISD::CTTZ, MVT::v16i8, 9 } 1459 }; 1460 static const CostTblEntry SSE2CostTbl[] = { 1461 { ISD::BSWAP, MVT::v2i64, 7 }, 1462 { ISD::BSWAP, MVT::v4i32, 7 }, 1463 { ISD::BSWAP, MVT::v8i16, 7 }, 1464 { ISD::CTLZ, MVT::v2i64, 25 }, 1465 { ISD::CTLZ, MVT::v4i32, 26 }, 1466 { ISD::CTLZ, MVT::v8i16, 20 }, 1467 { ISD::CTLZ, MVT::v16i8, 17 }, 1468 { ISD::CTPOP, MVT::v2i64, 12 }, 1469 { ISD::CTPOP, MVT::v4i32, 15 }, 1470 { ISD::CTPOP, MVT::v8i16, 13 }, 1471 { ISD::CTPOP, MVT::v16i8, 10 }, 1472 { ISD::CTTZ, MVT::v2i64, 14 }, 1473 { ISD::CTTZ, MVT::v4i32, 18 }, 1474 { ISD::CTTZ, MVT::v8i16, 16 }, 1475 { ISD::CTTZ, MVT::v16i8, 13 }, 1476 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1477 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1478 }; 1479 static const CostTblEntry SSE1CostTbl[] = { 1480 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1481 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1482 }; 1483 1484 unsigned ISD = ISD::DELETED_NODE; 1485 switch (IID) { 1486 default: 1487 break; 1488 case Intrinsic::bitreverse: 1489 ISD = ISD::BITREVERSE; 1490 break; 1491 case Intrinsic::bswap: 1492 ISD = ISD::BSWAP; 1493 break; 1494 case Intrinsic::ctlz: 1495 ISD = ISD::CTLZ; 1496 break; 1497 case Intrinsic::ctpop: 1498 ISD = ISD::CTPOP; 1499 break; 1500 case Intrinsic::cttz: 1501 ISD = ISD::CTTZ; 1502 break; 1503 case Intrinsic::sqrt: 1504 ISD = ISD::FSQRT; 1505 break; 1506 } 1507 1508 // Legalize the type. 1509 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1510 MVT MTy = LT.second; 1511 1512 // Attempt to lookup cost. 1513 if (ST->hasXOP()) 1514 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1515 return LT.first * Entry->Cost; 1516 1517 if (ST->hasAVX2()) 1518 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1519 return LT.first * Entry->Cost; 1520 1521 if (ST->hasAVX()) 1522 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1523 return LT.first * Entry->Cost; 1524 1525 if (ST->hasSSE42()) 1526 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1527 return LT.first * Entry->Cost; 1528 1529 if (ST->hasSSSE3()) 1530 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1531 return LT.first * Entry->Cost; 1532 1533 if (ST->hasSSE2()) 1534 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1535 return LT.first * Entry->Cost; 1536 1537 if (ST->hasSSE1()) 1538 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1539 return LT.first * Entry->Cost; 1540 1541 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF); 1542 } 1543 1544 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1545 ArrayRef<Value *> Args, FastMathFlags FMF) { 1546 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF); 1547 } 1548 1549 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1550 assert(Val->isVectorTy() && "This must be a vector type"); 1551 1552 Type *ScalarType = Val->getScalarType(); 1553 1554 if (Index != -1U) { 1555 // Legalize the type. 1556 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1557 1558 // This type is legalized to a scalar type. 1559 if (!LT.second.isVector()) 1560 return 0; 1561 1562 // The type may be split. Normalize the index to the new type. 1563 unsigned Width = LT.second.getVectorNumElements(); 1564 Index = Index % Width; 1565 1566 // Floating point scalars are already located in index #0. 1567 if (ScalarType->isFloatingPointTy() && Index == 0) 1568 return 0; 1569 } 1570 1571 // Add to the base cost if we know that the extracted element of a vector is 1572 // destined to be moved to and used in the integer register file. 1573 int RegisterFileMoveCost = 0; 1574 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1575 RegisterFileMoveCost = 1; 1576 1577 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1578 } 1579 1580 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 1581 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 1582 int Cost = 0; 1583 1584 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 1585 if (Insert) 1586 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); 1587 if (Extract) 1588 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); 1589 } 1590 1591 return Cost; 1592 } 1593 1594 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1595 unsigned AddressSpace) { 1596 // Handle non-power-of-two vectors such as <3 x float> 1597 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1598 unsigned NumElem = VTy->getVectorNumElements(); 1599 1600 // Handle a few common cases: 1601 // <3 x float> 1602 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1603 // Cost = 64 bit store + extract + 32 bit store. 1604 return 3; 1605 1606 // <3 x double> 1607 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1608 // Cost = 128 bit store + unpack + 64 bit store. 1609 return 3; 1610 1611 // Assume that all other non-power-of-two numbers are scalarized. 1612 if (!isPowerOf2_32(NumElem)) { 1613 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1614 AddressSpace); 1615 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1616 Opcode == Instruction::Store); 1617 return NumElem * Cost + SplitCost; 1618 } 1619 } 1620 1621 // Legalize the type. 1622 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1623 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1624 "Invalid Opcode"); 1625 1626 // Each load/store unit costs 1. 1627 int Cost = LT.first * 1; 1628 1629 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1630 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1631 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1632 Cost *= 2; 1633 1634 return Cost; 1635 } 1636 1637 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1638 unsigned Alignment, 1639 unsigned AddressSpace) { 1640 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1641 if (!SrcVTy) 1642 // To calculate scalar take the regular cost, without mask 1643 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1644 1645 unsigned NumElem = SrcVTy->getVectorNumElements(); 1646 VectorType *MaskTy = 1647 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1648 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1649 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1650 !isPowerOf2_32(NumElem)) { 1651 // Scalarization 1652 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1653 int ScalarCompareCost = getCmpSelInstrCost( 1654 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1655 int BranchCost = getCFInstrCost(Instruction::Br); 1656 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1657 1658 int ValueSplitCost = getScalarizationOverhead( 1659 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1660 int MemopCost = 1661 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1662 Alignment, AddressSpace); 1663 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1664 } 1665 1666 // Legalize the type. 1667 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1668 auto VT = TLI->getValueType(DL, SrcVTy); 1669 int Cost = 0; 1670 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1671 LT.second.getVectorNumElements() == NumElem) 1672 // Promotion requires expand/truncate for data and a shuffle for mask. 1673 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1674 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1675 1676 else if (LT.second.getVectorNumElements() > NumElem) { 1677 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1678 LT.second.getVectorNumElements()); 1679 // Expanding requires fill mask with zeroes 1680 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1681 } 1682 if (!ST->hasAVX512()) 1683 return Cost + LT.first*4; // Each maskmov costs 4 1684 1685 // AVX-512 masked load/store is cheapper 1686 return Cost+LT.first; 1687 } 1688 1689 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 1690 const SCEV *Ptr) { 1691 // Address computations in vectorized code with non-consecutive addresses will 1692 // likely result in more instructions compared to scalar code where the 1693 // computation can more often be merged into the index mode. The resulting 1694 // extra micro-ops can significantly decrease throughput. 1695 unsigned NumVectorInstToHideOverhead = 10; 1696 1697 // Cost modeling of Strided Access Computation is hidden by the indexing 1698 // modes of X86 regardless of the stride value. We dont believe that there 1699 // is a difference between constant strided access in gerenal and constant 1700 // strided value which is less than or equal to 64. 1701 // Even in the case of (loop invariant) stride whose value is not known at 1702 // compile time, the address computation will not incur more than one extra 1703 // ADD instruction. 1704 if (Ty->isVectorTy() && SE) { 1705 if (!BaseT::isStridedAccess(Ptr)) 1706 return NumVectorInstToHideOverhead; 1707 if (!BaseT::getConstantStrideStep(SE, Ptr)) 1708 return 1; 1709 } 1710 1711 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1712 } 1713 1714 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 1715 bool IsPairwise) { 1716 1717 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1718 1719 MVT MTy = LT.second; 1720 1721 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1722 assert(ISD && "Invalid opcode"); 1723 1724 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 1725 // and make it as the cost. 1726 1727 static const CostTblEntry SSE42CostTblPairWise[] = { 1728 { ISD::FADD, MVT::v2f64, 2 }, 1729 { ISD::FADD, MVT::v4f32, 4 }, 1730 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1731 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1732 { ISD::ADD, MVT::v8i16, 5 }, 1733 }; 1734 1735 static const CostTblEntry AVX1CostTblPairWise[] = { 1736 { ISD::FADD, MVT::v4f32, 4 }, 1737 { ISD::FADD, MVT::v4f64, 5 }, 1738 { ISD::FADD, MVT::v8f32, 7 }, 1739 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1740 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1741 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 1742 { ISD::ADD, MVT::v8i16, 5 }, 1743 { ISD::ADD, MVT::v8i32, 5 }, 1744 }; 1745 1746 static const CostTblEntry SSE42CostTblNoPairWise[] = { 1747 { ISD::FADD, MVT::v2f64, 2 }, 1748 { ISD::FADD, MVT::v4f32, 4 }, 1749 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1750 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 1751 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 1752 }; 1753 1754 static const CostTblEntry AVX1CostTblNoPairWise[] = { 1755 { ISD::FADD, MVT::v4f32, 3 }, 1756 { ISD::FADD, MVT::v4f64, 3 }, 1757 { ISD::FADD, MVT::v8f32, 4 }, 1758 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1759 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 1760 { ISD::ADD, MVT::v4i64, 3 }, 1761 { ISD::ADD, MVT::v8i16, 4 }, 1762 { ISD::ADD, MVT::v8i32, 5 }, 1763 }; 1764 1765 if (IsPairwise) { 1766 if (ST->hasAVX()) 1767 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 1768 return LT.first * Entry->Cost; 1769 1770 if (ST->hasSSE42()) 1771 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 1772 return LT.first * Entry->Cost; 1773 } else { 1774 if (ST->hasAVX()) 1775 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 1776 return LT.first * Entry->Cost; 1777 1778 if (ST->hasSSE42()) 1779 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 1780 return LT.first * Entry->Cost; 1781 } 1782 1783 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1784 } 1785 1786 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1787 /// method might only calculate a fraction of a larger immediate. Therefore it 1788 /// is valid to return a cost of ZERO. 1789 int X86TTIImpl::getIntImmCost(int64_t Val) { 1790 if (Val == 0) 1791 return TTI::TCC_Free; 1792 1793 if (isInt<32>(Val)) 1794 return TTI::TCC_Basic; 1795 1796 return 2 * TTI::TCC_Basic; 1797 } 1798 1799 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1800 assert(Ty->isIntegerTy()); 1801 1802 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1803 if (BitSize == 0) 1804 return ~0U; 1805 1806 // Never hoist constants larger than 128bit, because this might lead to 1807 // incorrect code generation or assertions in codegen. 1808 // Fixme: Create a cost model for types larger than i128 once the codegen 1809 // issues have been fixed. 1810 if (BitSize > 128) 1811 return TTI::TCC_Free; 1812 1813 if (Imm == 0) 1814 return TTI::TCC_Free; 1815 1816 // Sign-extend all constants to a multiple of 64-bit. 1817 APInt ImmVal = Imm; 1818 if (BitSize & 0x3f) 1819 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1820 1821 // Split the constant into 64-bit chunks and calculate the cost for each 1822 // chunk. 1823 int Cost = 0; 1824 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1825 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1826 int64_t Val = Tmp.getSExtValue(); 1827 Cost += getIntImmCost(Val); 1828 } 1829 // We need at least one instruction to materialize the constant. 1830 return std::max(1, Cost); 1831 } 1832 1833 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1834 Type *Ty) { 1835 assert(Ty->isIntegerTy()); 1836 1837 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1838 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1839 // here, so that constant hoisting will ignore this constant. 1840 if (BitSize == 0) 1841 return TTI::TCC_Free; 1842 1843 unsigned ImmIdx = ~0U; 1844 switch (Opcode) { 1845 default: 1846 return TTI::TCC_Free; 1847 case Instruction::GetElementPtr: 1848 // Always hoist the base address of a GetElementPtr. This prevents the 1849 // creation of new constants for every base constant that gets constant 1850 // folded with the offset. 1851 if (Idx == 0) 1852 return 2 * TTI::TCC_Basic; 1853 return TTI::TCC_Free; 1854 case Instruction::Store: 1855 ImmIdx = 0; 1856 break; 1857 case Instruction::ICmp: 1858 // This is an imperfect hack to prevent constant hoisting of 1859 // compares that might be trying to check if a 64-bit value fits in 1860 // 32-bits. The backend can optimize these cases using a right shift by 32. 1861 // Ideally we would check the compare predicate here. There also other 1862 // similar immediates the backend can use shifts for. 1863 if (Idx == 1 && Imm.getBitWidth() == 64) { 1864 uint64_t ImmVal = Imm.getZExtValue(); 1865 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 1866 return TTI::TCC_Free; 1867 } 1868 ImmIdx = 1; 1869 break; 1870 case Instruction::And: 1871 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 1872 // by using a 32-bit operation with implicit zero extension. Detect such 1873 // immediates here as the normal path expects bit 31 to be sign extended. 1874 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 1875 return TTI::TCC_Free; 1876 LLVM_FALLTHROUGH; 1877 case Instruction::Add: 1878 case Instruction::Sub: 1879 case Instruction::Mul: 1880 case Instruction::UDiv: 1881 case Instruction::SDiv: 1882 case Instruction::URem: 1883 case Instruction::SRem: 1884 case Instruction::Or: 1885 case Instruction::Xor: 1886 ImmIdx = 1; 1887 break; 1888 // Always return TCC_Free for the shift value of a shift instruction. 1889 case Instruction::Shl: 1890 case Instruction::LShr: 1891 case Instruction::AShr: 1892 if (Idx == 1) 1893 return TTI::TCC_Free; 1894 break; 1895 case Instruction::Trunc: 1896 case Instruction::ZExt: 1897 case Instruction::SExt: 1898 case Instruction::IntToPtr: 1899 case Instruction::PtrToInt: 1900 case Instruction::BitCast: 1901 case Instruction::PHI: 1902 case Instruction::Call: 1903 case Instruction::Select: 1904 case Instruction::Ret: 1905 case Instruction::Load: 1906 break; 1907 } 1908 1909 if (Idx == ImmIdx) { 1910 int NumConstants = (BitSize + 63) / 64; 1911 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1912 return (Cost <= NumConstants * TTI::TCC_Basic) 1913 ? static_cast<int>(TTI::TCC_Free) 1914 : Cost; 1915 } 1916 1917 return X86TTIImpl::getIntImmCost(Imm, Ty); 1918 } 1919 1920 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1921 Type *Ty) { 1922 assert(Ty->isIntegerTy()); 1923 1924 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1925 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1926 // here, so that constant hoisting will ignore this constant. 1927 if (BitSize == 0) 1928 return TTI::TCC_Free; 1929 1930 switch (IID) { 1931 default: 1932 return TTI::TCC_Free; 1933 case Intrinsic::sadd_with_overflow: 1934 case Intrinsic::uadd_with_overflow: 1935 case Intrinsic::ssub_with_overflow: 1936 case Intrinsic::usub_with_overflow: 1937 case Intrinsic::smul_with_overflow: 1938 case Intrinsic::umul_with_overflow: 1939 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1940 return TTI::TCC_Free; 1941 break; 1942 case Intrinsic::experimental_stackmap: 1943 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1944 return TTI::TCC_Free; 1945 break; 1946 case Intrinsic::experimental_patchpoint_void: 1947 case Intrinsic::experimental_patchpoint_i64: 1948 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1949 return TTI::TCC_Free; 1950 break; 1951 } 1952 return X86TTIImpl::getIntImmCost(Imm, Ty); 1953 } 1954 1955 // Return an average cost of Gather / Scatter instruction, maybe improved later 1956 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 1957 unsigned Alignment, unsigned AddressSpace) { 1958 1959 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 1960 unsigned VF = SrcVTy->getVectorNumElements(); 1961 1962 // Try to reduce index size from 64 bit (default for GEP) 1963 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 1964 // operation will use 16 x 64 indices which do not fit in a zmm and needs 1965 // to split. Also check that the base pointer is the same for all lanes, 1966 // and that there's at most one variable index. 1967 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 1968 unsigned IndexSize = DL.getPointerSizeInBits(); 1969 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 1970 if (IndexSize < 64 || !GEP) 1971 return IndexSize; 1972 1973 unsigned NumOfVarIndices = 0; 1974 Value *Ptrs = GEP->getPointerOperand(); 1975 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 1976 return IndexSize; 1977 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 1978 if (isa<Constant>(GEP->getOperand(i))) 1979 continue; 1980 Type *IndxTy = GEP->getOperand(i)->getType(); 1981 if (IndxTy->isVectorTy()) 1982 IndxTy = IndxTy->getVectorElementType(); 1983 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 1984 !isa<SExtInst>(GEP->getOperand(i))) || 1985 ++NumOfVarIndices > 1) 1986 return IndexSize; // 64 1987 } 1988 return (unsigned)32; 1989 }; 1990 1991 1992 // Trying to reduce IndexSize to 32 bits for vector 16. 1993 // By default the IndexSize is equal to pointer size. 1994 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) : 1995 DL.getPointerSizeInBits(); 1996 1997 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 1998 IndexSize), VF); 1999 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2000 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2001 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2002 if (SplitFactor > 1) { 2003 // Handle splitting of vector of pointers 2004 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2005 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2006 AddressSpace); 2007 } 2008 2009 // The gather / scatter cost is given by Intel architects. It is a rough 2010 // number since we are looking at one instruction in a time. 2011 const int GSOverhead = 2; 2012 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2013 Alignment, AddressSpace); 2014 } 2015 2016 /// Return the cost of full scalarization of gather / scatter operation. 2017 /// 2018 /// Opcode - Load or Store instruction. 2019 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2020 /// VariableMask - The mask is non-constant at compile time. 2021 /// Alignment - Alignment for one element. 2022 /// AddressSpace - pointer[s] address space. 2023 /// 2024 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2025 bool VariableMask, unsigned Alignment, 2026 unsigned AddressSpace) { 2027 unsigned VF = SrcVTy->getVectorNumElements(); 2028 2029 int MaskUnpackCost = 0; 2030 if (VariableMask) { 2031 VectorType *MaskTy = 2032 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2033 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2034 int ScalarCompareCost = 2035 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2036 nullptr); 2037 int BranchCost = getCFInstrCost(Instruction::Br); 2038 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2039 } 2040 2041 // The cost of the scalar loads/stores. 2042 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2043 Alignment, AddressSpace); 2044 2045 int InsertExtractCost = 0; 2046 if (Opcode == Instruction::Load) 2047 for (unsigned i = 0; i < VF; ++i) 2048 // Add the cost of inserting each scalar load into the vector 2049 InsertExtractCost += 2050 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2051 else 2052 for (unsigned i = 0; i < VF; ++i) 2053 // Add the cost of extracting each element out of the data vector 2054 InsertExtractCost += 2055 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2056 2057 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2058 } 2059 2060 /// Calculate the cost of Gather / Scatter operation 2061 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2062 Value *Ptr, bool VariableMask, 2063 unsigned Alignment) { 2064 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2065 unsigned VF = SrcVTy->getVectorNumElements(); 2066 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2067 if (!PtrTy && Ptr->getType()->isVectorTy()) 2068 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2069 assert(PtrTy && "Unexpected type for Ptr argument"); 2070 unsigned AddressSpace = PtrTy->getAddressSpace(); 2071 2072 bool Scalarize = false; 2073 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2074 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2075 Scalarize = true; 2076 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2077 // Vector-4 of gather/scatter instruction does not exist on KNL. 2078 // We can extend it to 8 elements, but zeroing upper bits of 2079 // the mask vector will add more instructions. Right now we give the scalar 2080 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2081 // is better in the VariableMask case. 2082 if (VF == 2 || (VF == 4 && !ST->hasVLX())) 2083 Scalarize = true; 2084 2085 if (Scalarize) 2086 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2087 AddressSpace); 2088 2089 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2090 } 2091 2092 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2093 Type *ScalarTy = DataTy->getScalarType(); 2094 int DataWidth = isa<PointerType>(ScalarTy) ? 2095 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2096 2097 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2098 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2099 } 2100 2101 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2102 return isLegalMaskedLoad(DataType); 2103 } 2104 2105 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2106 // This function is called now in two cases: from the Loop Vectorizer 2107 // and from the Scalarizer. 2108 // When the Loop Vectorizer asks about legality of the feature, 2109 // the vectorization factor is not calculated yet. The Loop Vectorizer 2110 // sends a scalar type and the decision is based on the width of the 2111 // scalar element. 2112 // Later on, the cost model will estimate usage this intrinsic based on 2113 // the vector type. 2114 // The Scalarizer asks again about legality. It sends a vector type. 2115 // In this case we can reject non-power-of-2 vectors. 2116 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements())) 2117 return false; 2118 Type *ScalarTy = DataTy->getScalarType(); 2119 int DataWidth = isa<PointerType>(ScalarTy) ? 2120 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2121 2122 // AVX-512 allows gather and scatter 2123 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512(); 2124 } 2125 2126 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2127 return isLegalMaskedGather(DataType); 2128 } 2129 2130 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2131 const Function *Callee) const { 2132 const TargetMachine &TM = getTLI()->getTargetMachine(); 2133 2134 // Work this as a subsetting of subtarget features. 2135 const FeatureBitset &CallerBits = 2136 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2137 const FeatureBitset &CalleeBits = 2138 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2139 2140 // FIXME: This is likely too limiting as it will include subtarget features 2141 // that we might not care about for inlining, but it is conservatively 2142 // correct. 2143 return (CallerBits & CalleeBits) == CalleeBits; 2144 } 2145 2146 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2147 // TODO: We expect this to be beneficial regardless of arch, 2148 // but there are currently some unexplained performance artifacts on Atom. 2149 // As a temporary solution, disable on Atom. 2150 return !(ST->isAtom() || ST->isSLM()); 2151 } 2152 2153 // Get estimation for interleaved load/store operations and strided load. 2154 // \p Indices contains indices for strided load. 2155 // \p Factor - the factor of interleaving. 2156 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2157 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2158 unsigned Factor, 2159 ArrayRef<unsigned> Indices, 2160 unsigned Alignment, 2161 unsigned AddressSpace) { 2162 2163 // VecTy for interleave memop is <VF*Factor x Elt>. 2164 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2165 // VecTy = <12 x i32>. 2166 2167 // Calculate the number of memory operations (NumOfMemOps), required 2168 // for load/store the VecTy. 2169 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2170 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2171 unsigned LegalVTSize = LegalVT.getStoreSize(); 2172 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2173 2174 // Get the cost of one memory operation. 2175 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2176 LegalVT.getVectorNumElements()); 2177 unsigned MemOpCost = 2178 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2179 2180 if (Opcode == Instruction::Load) { 2181 // Kind of shuffle depends on number of loaded values. 2182 // If we load the entire data in one register, we can use a 1-src shuffle. 2183 // Otherwise, we'll merge 2 sources in each operation. 2184 TTI::ShuffleKind ShuffleKind = 2185 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2186 2187 unsigned ShuffleCost = 2188 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2189 2190 unsigned NumOfLoadsInInterleaveGrp = 2191 Indices.size() ? Indices.size() : Factor; 2192 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2193 VecTy->getVectorNumElements() / Factor); 2194 unsigned NumOfResults = 2195 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2196 NumOfLoadsInInterleaveGrp; 2197 2198 // About a half of the loads may be folded in shuffles when we have only 2199 // one result. If we have more than one result, we do not fold loads at all. 2200 unsigned NumOfUnfoldedLoads = 2201 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2202 2203 // Get a number of shuffle operations per result. 2204 unsigned NumOfShufflesPerResult = 2205 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2206 2207 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2208 // When we have more than one destination, we need additional instructions 2209 // to keep sources. 2210 unsigned NumOfMoves = 0; 2211 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2212 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2213 2214 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2215 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2216 2217 return Cost; 2218 } 2219 2220 // Store. 2221 assert(Opcode == Instruction::Store && 2222 "Expected Store Instruction at this point"); 2223 2224 // There is no strided stores meanwhile. And store can't be folded in 2225 // shuffle. 2226 unsigned NumOfSources = Factor; // The number of values to be merged. 2227 unsigned ShuffleCost = 2228 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 2229 unsigned NumOfShufflesPerStore = NumOfSources - 1; 2230 2231 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2232 // We need additional instructions to keep sources. 2233 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 2234 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 2235 NumOfMoves; 2236 return Cost; 2237 } 2238 2239 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 2240 unsigned Factor, 2241 ArrayRef<unsigned> Indices, 2242 unsigned Alignment, 2243 unsigned AddressSpace) { 2244 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) { 2245 RequiresBW = false; 2246 Type *EltTy = VecTy->getVectorElementType(); 2247 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 2248 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 2249 return true; 2250 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) { 2251 RequiresBW = true; 2252 return true; 2253 } 2254 return false; 2255 }; 2256 bool RequiresBW; 2257 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW); 2258 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI())) 2259 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 2260 Alignment, AddressSpace); 2261 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2262 Alignment, AddressSpace); 2263 } 2264