1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Target/CostTable.h" 48 #include "llvm/Target/TargetLowering.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 70 if (Vector && !ST->hasSSE1()) 71 return 0; 72 73 if (ST->is64Bit()) { 74 if (Vector && ST->hasAVX512()) 75 return 32; 76 return 16; 77 } 78 return 8; 79 } 80 81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 82 if (Vector) { 83 if (ST->hasAVX512()) 84 return 512; 85 if (ST->hasAVX()) 86 return 256; 87 if (ST->hasSSE1()) 88 return 128; 89 return 0; 90 } 91 92 if (ST->is64Bit()) 93 return 64; 94 95 return 32; 96 } 97 98 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 99 // If the loop will not be vectorized, don't interleave the loop. 100 // Let regular unroll to unroll the loop, which saves the overflow 101 // check and memory check cost. 102 if (VF == 1) 103 return 1; 104 105 if (ST->isAtom()) 106 return 1; 107 108 // Sandybridge and Haswell have multiple execution ports and pipelined 109 // vector units. 110 if (ST->hasAVX()) 111 return 4; 112 113 return 2; 114 } 115 116 int X86TTIImpl::getArithmeticInstrCost( 117 unsigned Opcode, Type *Ty, 118 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 119 TTI::OperandValueProperties Opd1PropInfo, 120 TTI::OperandValueProperties Opd2PropInfo, 121 ArrayRef<const Value *> Args) { 122 // Legalize the type. 123 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 124 125 int ISD = TLI->InstructionOpcodeToISD(Opcode); 126 assert(ISD && "Invalid opcode"); 127 128 static const CostTblEntry SLMCostTable[] = { 129 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 130 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 131 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 132 { ISD::FMUL, MVT::f64, 2 }, // mulsd 133 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 134 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 135 { ISD::FDIV, MVT::f32, 17 }, // divss 136 { ISD::FDIV, MVT::v4f32, 39 }, // divps 137 { ISD::FDIV, MVT::f64, 32 }, // divsd 138 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 139 { ISD::FADD, MVT::v2f64, 2 }, // addpd 140 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 141 // v2i64/v4i64 mul is custom lowered as a series of long 142 // multiplies(3), shifts(3) and adds(2). 143 // slm muldq version throughput is 2 144 { ISD::MUL, MVT::v2i64, 11 }, 145 }; 146 147 if (ST->isSLM()) { 148 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 149 // Check if the operands can be shrinked into a smaller datatype. 150 bool Op1Signed = false; 151 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 152 bool Op2Signed = false; 153 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 154 155 bool signedMode = Op1Signed | Op2Signed; 156 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 157 158 if (OpMinSize <= 7) 159 return LT.first * 3; // pmullw/sext 160 if (!signedMode && OpMinSize <= 8) 161 return LT.first * 3; // pmullw/zext 162 if (OpMinSize <= 15) 163 return LT.first * 5; // pmullw/pmulhw/pshuf 164 if (!signedMode && OpMinSize <= 16) 165 return LT.first * 5; // pmullw/pmulhw/pshuf 166 } 167 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 168 LT.second)) { 169 return LT.first * Entry->Cost; 170 } 171 } 172 173 if (ISD == ISD::SDIV && 174 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 175 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 176 // On X86, vector signed division by constants power-of-two are 177 // normally expanded to the sequence SRA + SRL + ADD + SRA. 178 // The OperandValue properties many not be same as that of previous 179 // operation;conservatively assume OP_None. 180 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 181 Op2Info, TargetTransformInfo::OP_None, 182 TargetTransformInfo::OP_None); 183 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 184 TargetTransformInfo::OP_None, 185 TargetTransformInfo::OP_None); 186 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 187 TargetTransformInfo::OP_None, 188 TargetTransformInfo::OP_None); 189 190 return Cost; 191 } 192 193 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 194 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 195 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 196 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 197 198 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 199 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 200 }; 201 202 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 203 ST->hasBWI()) { 204 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 205 LT.second)) 206 return LT.first * Entry->Cost; 207 } 208 209 static const CostTblEntry AVX512UniformConstCostTable[] = { 210 { ISD::SRA, MVT::v2i64, 1 }, 211 { ISD::SRA, MVT::v4i64, 1 }, 212 { ISD::SRA, MVT::v8i64, 1 }, 213 214 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 215 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 216 }; 217 218 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 219 ST->hasAVX512()) { 220 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 221 LT.second)) 222 return LT.first * Entry->Cost; 223 } 224 225 static const CostTblEntry AVX2UniformConstCostTable[] = { 226 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 227 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 228 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 229 230 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 231 232 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 233 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 234 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 235 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 236 }; 237 238 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 239 ST->hasAVX2()) { 240 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 241 LT.second)) 242 return LT.first * Entry->Cost; 243 } 244 245 static const CostTblEntry SSE2UniformConstCostTable[] = { 246 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 247 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 248 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 249 250 { ISD::SHL, MVT::v32i8, 4 }, // 2*(psllw + pand). 251 { ISD::SRL, MVT::v32i8, 4 }, // 2*(psrlw + pand). 252 { ISD::SRA, MVT::v32i8, 8 }, // 2*(psrlw, pand, pxor, psubb). 253 254 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence 255 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 256 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence 257 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 258 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence 259 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 260 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence 261 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 262 }; 263 264 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 265 ST->hasSSE2()) { 266 // pmuldq sequence. 267 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 268 return LT.first * 30; 269 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 270 return LT.first * 15; 271 272 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD, 273 LT.second)) 274 return LT.first * Entry->Cost; 275 } 276 277 static const CostTblEntry AVX2UniformCostTable[] = { 278 // Uniform splats are cheaper for the following instructions. 279 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 280 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 281 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 282 }; 283 284 if (ST->hasAVX2() && 285 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 286 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 287 if (const auto *Entry = 288 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 289 return LT.first * Entry->Cost; 290 } 291 292 static const CostTblEntry SSE2UniformCostTable[] = { 293 // Uniform splats are cheaper for the following instructions. 294 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 295 { ISD::SHL, MVT::v4i32, 1 }, // pslld 296 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 297 298 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 299 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 300 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 301 302 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 303 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 304 }; 305 306 if (ST->hasSSE2() && 307 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 308 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 309 if (const auto *Entry = 310 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 311 return LT.first * Entry->Cost; 312 } 313 314 static const CostTblEntry AVX512DQCostTable[] = { 315 { ISD::MUL, MVT::v2i64, 1 }, 316 { ISD::MUL, MVT::v4i64, 1 }, 317 { ISD::MUL, MVT::v8i64, 1 } 318 }; 319 320 // Look for AVX512DQ lowering tricks for custom cases. 321 if (ST->hasDQI()) 322 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 323 return LT.first * Entry->Cost; 324 325 static const CostTblEntry AVX512BWCostTable[] = { 326 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 327 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 328 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 329 330 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 331 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 332 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 333 334 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 335 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 336 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 337 338 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 339 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 340 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 341 342 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 343 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 344 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 345 346 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 347 { ISD::SDIV, MVT::v64i8, 64*20 }, 348 { ISD::SDIV, MVT::v32i16, 32*20 }, 349 { ISD::UDIV, MVT::v64i8, 64*20 }, 350 { ISD::UDIV, MVT::v32i16, 32*20 } 351 }; 352 353 // Look for AVX512BW lowering tricks for custom cases. 354 if (ST->hasBWI()) 355 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 356 return LT.first * Entry->Cost; 357 358 static const CostTblEntry AVX512CostTable[] = { 359 { ISD::SHL, MVT::v16i32, 1 }, 360 { ISD::SRL, MVT::v16i32, 1 }, 361 { ISD::SRA, MVT::v16i32, 1 }, 362 363 { ISD::SHL, MVT::v8i64, 1 }, 364 { ISD::SRL, MVT::v8i64, 1 }, 365 366 { ISD::SRA, MVT::v2i64, 1 }, 367 { ISD::SRA, MVT::v4i64, 1 }, 368 { ISD::SRA, MVT::v8i64, 1 }, 369 370 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 371 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 372 { ISD::MUL, MVT::v16i32, 1 }, // pmulld 373 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 374 375 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 376 { ISD::SDIV, MVT::v16i32, 16*20 }, 377 { ISD::SDIV, MVT::v8i64, 8*20 }, 378 { ISD::UDIV, MVT::v16i32, 16*20 }, 379 { ISD::UDIV, MVT::v8i64, 8*20 } 380 }; 381 382 if (ST->hasAVX512()) 383 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 384 return LT.first * Entry->Cost; 385 386 static const CostTblEntry AVX2ShiftCostTable[] = { 387 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 388 // customize them to detect the cases where shift amount is a scalar one. 389 { ISD::SHL, MVT::v4i32, 1 }, 390 { ISD::SRL, MVT::v4i32, 1 }, 391 { ISD::SRA, MVT::v4i32, 1 }, 392 { ISD::SHL, MVT::v8i32, 1 }, 393 { ISD::SRL, MVT::v8i32, 1 }, 394 { ISD::SRA, MVT::v8i32, 1 }, 395 { ISD::SHL, MVT::v2i64, 1 }, 396 { ISD::SRL, MVT::v2i64, 1 }, 397 { ISD::SHL, MVT::v4i64, 1 }, 398 { ISD::SRL, MVT::v4i64, 1 }, 399 }; 400 401 // Look for AVX2 lowering tricks. 402 if (ST->hasAVX2()) { 403 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 404 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 405 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 406 // On AVX2, a packed v16i16 shift left by a constant build_vector 407 // is lowered into a vector multiply (vpmullw). 408 return LT.first; 409 410 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 411 return LT.first * Entry->Cost; 412 } 413 414 static const CostTblEntry XOPShiftCostTable[] = { 415 // 128bit shifts take 1cy, but right shifts require negation beforehand. 416 { ISD::SHL, MVT::v16i8, 1 }, 417 { ISD::SRL, MVT::v16i8, 2 }, 418 { ISD::SRA, MVT::v16i8, 2 }, 419 { ISD::SHL, MVT::v8i16, 1 }, 420 { ISD::SRL, MVT::v8i16, 2 }, 421 { ISD::SRA, MVT::v8i16, 2 }, 422 { ISD::SHL, MVT::v4i32, 1 }, 423 { ISD::SRL, MVT::v4i32, 2 }, 424 { ISD::SRA, MVT::v4i32, 2 }, 425 { ISD::SHL, MVT::v2i64, 1 }, 426 { ISD::SRL, MVT::v2i64, 2 }, 427 { ISD::SRA, MVT::v2i64, 2 }, 428 // 256bit shifts require splitting if AVX2 didn't catch them above. 429 { ISD::SHL, MVT::v32i8, 2 }, 430 { ISD::SRL, MVT::v32i8, 4 }, 431 { ISD::SRA, MVT::v32i8, 4 }, 432 { ISD::SHL, MVT::v16i16, 2 }, 433 { ISD::SRL, MVT::v16i16, 4 }, 434 { ISD::SRA, MVT::v16i16, 4 }, 435 { ISD::SHL, MVT::v8i32, 2 }, 436 { ISD::SRL, MVT::v8i32, 4 }, 437 { ISD::SRA, MVT::v8i32, 4 }, 438 { ISD::SHL, MVT::v4i64, 2 }, 439 { ISD::SRL, MVT::v4i64, 4 }, 440 { ISD::SRA, MVT::v4i64, 4 }, 441 }; 442 443 // Look for XOP lowering tricks. 444 if (ST->hasXOP()) 445 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 446 return LT.first * Entry->Cost; 447 448 static const CostTblEntry SSE2UniformShiftCostTable[] = { 449 // Uniform splats are cheaper for the following instructions. 450 { ISD::SHL, MVT::v16i16, 2 }, // psllw. 451 { ISD::SHL, MVT::v8i32, 2 }, // pslld 452 { ISD::SHL, MVT::v4i64, 2 }, // psllq. 453 454 { ISD::SRL, MVT::v16i16, 2 }, // psrlw. 455 { ISD::SRL, MVT::v8i32, 2 }, // psrld. 456 { ISD::SRL, MVT::v4i64, 2 }, // psrlq. 457 458 { ISD::SRA, MVT::v16i16, 2 }, // psraw. 459 { ISD::SRA, MVT::v8i32, 2 }, // psrad. 460 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 461 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle. 462 }; 463 464 if (ST->hasSSE2() && 465 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 466 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 467 if (const auto *Entry = 468 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 469 return LT.first * Entry->Cost; 470 } 471 472 if (ISD == ISD::SHL && 473 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 474 MVT VT = LT.second; 475 // Vector shift left by non uniform constant can be lowered 476 // into vector multiply. 477 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 478 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 479 ISD = ISD::MUL; 480 } 481 482 static const CostTblEntry AVX2CostTable[] = { 483 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 484 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 485 486 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 487 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 488 489 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 490 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 491 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 492 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 493 494 { ISD::SUB, MVT::v32i8, 1 }, // psubb 495 { ISD::ADD, MVT::v32i8, 1 }, // paddb 496 { ISD::SUB, MVT::v16i16, 1 }, // psubw 497 { ISD::ADD, MVT::v16i16, 1 }, // paddw 498 { ISD::SUB, MVT::v8i32, 1 }, // psubd 499 { ISD::ADD, MVT::v8i32, 1 }, // paddd 500 { ISD::SUB, MVT::v4i64, 1 }, // psubq 501 { ISD::ADD, MVT::v4i64, 1 }, // paddq 502 503 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 504 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 505 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 506 { ISD::MUL, MVT::v8i32, 1 }, // pmulld 507 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 508 509 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 510 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 511 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 512 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 513 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 514 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 515 }; 516 517 // Look for AVX2 lowering tricks for custom cases. 518 if (ST->hasAVX2()) 519 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 520 return LT.first * Entry->Cost; 521 522 static const CostTblEntry AVX1CostTable[] = { 523 // We don't have to scalarize unsupported ops. We can issue two half-sized 524 // operations and we only need to extract the upper YMM half. 525 // Two ops + 1 extract + 1 insert = 4. 526 { ISD::MUL, MVT::v16i16, 4 }, 527 { ISD::MUL, MVT::v8i32, 4 }, 528 { ISD::SUB, MVT::v32i8, 4 }, 529 { ISD::ADD, MVT::v32i8, 4 }, 530 { ISD::SUB, MVT::v16i16, 4 }, 531 { ISD::ADD, MVT::v16i16, 4 }, 532 { ISD::SUB, MVT::v8i32, 4 }, 533 { ISD::ADD, MVT::v8i32, 4 }, 534 { ISD::SUB, MVT::v4i64, 4 }, 535 { ISD::ADD, MVT::v4i64, 4 }, 536 537 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 538 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 539 // Because we believe v4i64 to be a legal type, we must also include the 540 // extract+insert in the cost table. Therefore, the cost here is 18 541 // instead of 8. 542 { ISD::MUL, MVT::v4i64, 18 }, 543 544 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 545 546 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 547 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 548 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 549 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 550 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 551 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 552 553 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 554 { ISD::SDIV, MVT::v32i8, 32*20 }, 555 { ISD::SDIV, MVT::v16i16, 16*20 }, 556 { ISD::SDIV, MVT::v8i32, 8*20 }, 557 { ISD::SDIV, MVT::v4i64, 4*20 }, 558 { ISD::UDIV, MVT::v32i8, 32*20 }, 559 { ISD::UDIV, MVT::v16i16, 16*20 }, 560 { ISD::UDIV, MVT::v8i32, 8*20 }, 561 { ISD::UDIV, MVT::v4i64, 4*20 }, 562 }; 563 564 if (ST->hasAVX()) 565 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 566 return LT.first * Entry->Cost; 567 568 static const CostTblEntry SSE42CostTable[] = { 569 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 570 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 571 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 572 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 573 }; 574 575 if (ST->hasSSE42()) 576 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 577 return LT.first * Entry->Cost; 578 579 static const CostTblEntry SSE41CostTable[] = { 580 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 581 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence. 582 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 583 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence. 584 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 585 { ISD::SHL, MVT::v8i32, 2*4 }, // pslld/paddd/cvttps2dq/pmulld 586 587 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 588 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence. 589 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 590 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence. 591 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 592 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend. 593 594 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 595 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence. 596 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 597 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence. 598 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 599 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend. 600 601 { ISD::MUL, MVT::v4i32, 1 } // pmulld 602 }; 603 604 if (ST->hasSSE41()) 605 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 606 return LT.first * Entry->Cost; 607 608 static const CostTblEntry SSE2CostTable[] = { 609 // We don't correctly identify costs of casts because they are marked as 610 // custom. 611 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 612 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 613 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 614 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul. 615 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 616 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 617 618 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 619 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 620 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 621 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 622 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 623 624 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 625 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 626 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 627 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 628 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. 629 630 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 631 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 632 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 633 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 634 635 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 636 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 637 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 638 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 639 640 // It is not a good idea to vectorize division. We have to scalarize it and 641 // in the process we will often end up having to spilling regular 642 // registers. The overhead of division is going to dominate most kernels 643 // anyways so try hard to prevent vectorization of division - it is 644 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 645 // to hide "20 cycles" for each lane. 646 { ISD::SDIV, MVT::v16i8, 16*20 }, 647 { ISD::SDIV, MVT::v8i16, 8*20 }, 648 { ISD::SDIV, MVT::v4i32, 4*20 }, 649 { ISD::SDIV, MVT::v2i64, 2*20 }, 650 { ISD::UDIV, MVT::v16i8, 16*20 }, 651 { ISD::UDIV, MVT::v8i16, 8*20 }, 652 { ISD::UDIV, MVT::v4i32, 4*20 }, 653 { ISD::UDIV, MVT::v2i64, 2*20 }, 654 }; 655 656 if (ST->hasSSE2()) 657 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 658 return LT.first * Entry->Cost; 659 660 static const CostTblEntry SSE1CostTable[] = { 661 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 662 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 663 }; 664 665 if (ST->hasSSE1()) 666 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 667 return LT.first * Entry->Cost; 668 669 // Fallback to the default implementation. 670 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 671 } 672 673 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 674 Type *SubTp) { 675 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 676 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 677 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 678 679 // For Broadcasts we are splatting the first element from the first input 680 // register, so only need to reference that input and all the output 681 // registers are the same. 682 if (Kind == TTI::SK_Broadcast) 683 LT.first = 1; 684 685 // We are going to permute multiple sources and the result will be in multiple 686 // destinations. Providing an accurate cost only for splits where the element 687 // type remains the same. 688 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 689 MVT LegalVT = LT.second; 690 if (LegalVT.getVectorElementType().getSizeInBits() == 691 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 692 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 693 694 unsigned VecTySize = DL.getTypeStoreSize(Tp); 695 unsigned LegalVTSize = LegalVT.getStoreSize(); 696 // Number of source vectors after legalization: 697 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 698 // Number of destination vectors after legalization: 699 unsigned NumOfDests = LT.first; 700 701 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 702 LegalVT.getVectorNumElements()); 703 704 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 705 return NumOfShuffles * 706 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 707 } 708 709 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 710 } 711 712 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 713 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 714 // We assume that source and destination have the same vector type. 715 int NumOfDests = LT.first; 716 int NumOfShufflesPerDest = LT.first * 2 - 1; 717 LT.first = NumOfDests * NumOfShufflesPerDest; 718 } 719 720 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 721 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 722 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 723 724 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 725 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 726 727 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 728 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 729 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 730 }; 731 732 if (ST->hasVBMI()) 733 if (const auto *Entry = 734 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 735 return LT.first * Entry->Cost; 736 737 static const CostTblEntry AVX512BWShuffleTbl[] = { 738 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 739 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 740 741 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 742 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 743 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 744 745 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 746 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 747 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 748 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 749 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 750 751 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 752 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 753 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 754 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 755 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 756 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 757 }; 758 759 if (ST->hasBWI()) 760 if (const auto *Entry = 761 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 762 return LT.first * Entry->Cost; 763 764 static const CostTblEntry AVX512ShuffleTbl[] = { 765 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 766 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 767 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 768 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 769 770 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 771 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 772 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 773 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 774 775 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 776 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 777 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 778 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 779 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 780 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 781 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 782 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 783 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 784 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 785 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 786 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 787 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 788 789 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 790 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 791 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 792 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 793 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 794 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 795 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 796 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 797 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 798 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 799 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 800 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 801 }; 802 803 if (ST->hasAVX512()) 804 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 805 return LT.first * Entry->Cost; 806 807 static const CostTblEntry AVX2ShuffleTbl[] = { 808 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 809 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 810 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 811 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 812 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 813 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 814 815 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 816 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 817 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 818 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 819 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 820 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 821 822 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw 823 { TTI::SK_Alternate, MVT::v32i8, 1 } // vpblendvb 824 }; 825 826 if (ST->hasAVX2()) 827 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 828 return LT.first * Entry->Cost; 829 830 static const CostTblEntry AVX1ShuffleTbl[] = { 831 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 832 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 833 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 834 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 835 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 836 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 837 838 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 839 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 840 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 841 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 842 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 843 // + vinsertf128 844 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 845 // + vinsertf128 846 847 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd 848 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd 849 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps 850 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps 851 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor 852 { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor 853 }; 854 855 if (ST->hasAVX()) 856 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 857 return LT.first * Entry->Cost; 858 859 static const CostTblEntry SSE41ShuffleTbl[] = { 860 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw 861 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 862 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw 863 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps 864 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw 865 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb 866 }; 867 868 if (ST->hasSSE41()) 869 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 870 return LT.first * Entry->Cost; 871 872 static const CostTblEntry SSSE3ShuffleTbl[] = { 873 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 874 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 875 876 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 877 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 878 879 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por 880 { TTI::SK_Alternate, MVT::v16i8, 3 } // pshufb + pshufb + por 881 }; 882 883 if (ST->hasSSSE3()) 884 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 885 return LT.first * Entry->Cost; 886 887 static const CostTblEntry SSE2ShuffleTbl[] = { 888 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 889 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 890 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 891 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 892 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 893 894 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 895 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 896 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 897 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 898 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 899 // + 2*pshufd + 2*unpck + packus 900 901 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd 902 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 903 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps 904 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por 905 { TTI::SK_Alternate, MVT::v16i8, 3 } // pand + pandn + por 906 }; 907 908 if (ST->hasSSE2()) 909 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 910 return LT.first * Entry->Cost; 911 912 static const CostTblEntry SSE1ShuffleTbl[] = { 913 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 914 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 915 { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps 916 }; 917 918 if (ST->hasSSE1()) 919 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 920 return LT.first * Entry->Cost; 921 922 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 923 } 924 925 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 926 int ISD = TLI->InstructionOpcodeToISD(Opcode); 927 assert(ISD && "Invalid opcode"); 928 929 // FIXME: Need a better design of the cost table to handle non-simple types of 930 // potential massive combinations (elem_num x src_type x dst_type). 931 932 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 933 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 934 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 935 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 936 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 937 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 938 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 939 940 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 941 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 942 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 943 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 944 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 945 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 946 947 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 948 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 949 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 950 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 951 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 952 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 953 954 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 955 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 956 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 957 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 958 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 959 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 960 }; 961 962 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 963 // 256-bit wide vectors. 964 965 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 966 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 967 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 968 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 969 970 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 971 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 972 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 973 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 974 975 // v16i1 -> v16i32 - load + broadcast 976 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 977 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 978 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 979 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 980 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 981 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 982 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 983 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 984 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 985 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 986 987 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 988 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 989 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 990 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 991 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 992 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 993 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 994 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 995 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 996 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 997 998 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 999 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1000 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1001 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1002 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1003 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1004 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1005 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1006 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1007 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1008 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1009 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1010 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1011 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1012 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1013 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1014 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1015 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1016 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1017 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1018 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1019 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 1020 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1021 1022 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1023 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1024 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1025 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1026 }; 1027 1028 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1029 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1030 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1031 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1032 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1033 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1034 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1035 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1036 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1037 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1038 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1039 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1040 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1041 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1042 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1043 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1044 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1045 1046 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1047 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1048 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1049 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1050 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1051 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1052 1053 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1054 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1055 1056 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1057 }; 1058 1059 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1060 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1061 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1062 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1063 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1064 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1065 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1066 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1067 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1068 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1069 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1070 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1071 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1072 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1073 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1074 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1075 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1076 1077 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1078 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1079 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1080 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1081 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1082 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1083 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1084 1085 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1086 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1087 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1088 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1089 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1090 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1091 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1092 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1093 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1094 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1095 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1096 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1097 1098 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1099 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1100 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1101 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1102 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1103 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1104 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1105 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1106 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1107 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1108 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1109 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1110 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1111 // The generic code to compute the scalar overhead is currently broken. 1112 // Workaround this limitation by estimating the scalarization overhead 1113 // here. We have roughly 10 instructions per scalar element. 1114 // Multiply that by the vector width. 1115 // FIXME: remove that when PR19268 is fixed. 1116 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 1117 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 1118 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1119 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1120 1121 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1122 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1123 // This node is expanded into scalarized operations but BasicTTI is overly 1124 // optimistic estimating its cost. It computes 3 per element (one 1125 // vector-extract, one scalar conversion and one vector-insert). The 1126 // problem is that the inserts form a read-modify-write chain so latency 1127 // should be factored in too. Inflating the cost per element by 1. 1128 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1129 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1130 1131 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1132 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1133 }; 1134 1135 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1136 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1137 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1138 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1139 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1140 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1141 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1142 1143 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1144 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1145 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1146 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1147 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1148 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1149 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1150 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1151 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1152 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1153 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1154 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1155 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1156 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1157 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1158 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1159 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1160 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1161 1162 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1163 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1164 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1165 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1166 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1167 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1168 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1169 1170 }; 1171 1172 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1173 // These are somewhat magic numbers justified by looking at the output of 1174 // Intel's IACA, running some kernels and making sure when we take 1175 // legalization into account the throughput will be overestimated. 1176 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1177 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1178 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1179 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1180 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1181 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1182 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1183 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1184 1185 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1186 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1187 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1188 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1189 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1190 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1191 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1192 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1193 1194 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1195 1196 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1197 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1198 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1199 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1200 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1201 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1202 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1203 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1204 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1205 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1206 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1207 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1208 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1209 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1210 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1211 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1212 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1213 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1214 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1215 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1216 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1217 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1218 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1219 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1220 1221 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1222 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1223 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1224 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1225 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1226 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1227 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1228 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1229 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1230 }; 1231 1232 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1233 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1234 1235 if (ST->hasSSE2() && !ST->hasAVX()) { 1236 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1237 LTDest.second, LTSrc.second)) 1238 return LTSrc.first * Entry->Cost; 1239 } 1240 1241 EVT SrcTy = TLI->getValueType(DL, Src); 1242 EVT DstTy = TLI->getValueType(DL, Dst); 1243 1244 // The function getSimpleVT only handles simple value types. 1245 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1246 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1247 1248 if (ST->hasDQI()) 1249 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1250 DstTy.getSimpleVT(), 1251 SrcTy.getSimpleVT())) 1252 return Entry->Cost; 1253 1254 if (ST->hasAVX512()) 1255 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1256 DstTy.getSimpleVT(), 1257 SrcTy.getSimpleVT())) 1258 return Entry->Cost; 1259 1260 if (ST->hasAVX2()) { 1261 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1262 DstTy.getSimpleVT(), 1263 SrcTy.getSimpleVT())) 1264 return Entry->Cost; 1265 } 1266 1267 if (ST->hasAVX()) { 1268 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1269 DstTy.getSimpleVT(), 1270 SrcTy.getSimpleVT())) 1271 return Entry->Cost; 1272 } 1273 1274 if (ST->hasSSE41()) { 1275 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1276 DstTy.getSimpleVT(), 1277 SrcTy.getSimpleVT())) 1278 return Entry->Cost; 1279 } 1280 1281 if (ST->hasSSE2()) { 1282 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1283 DstTy.getSimpleVT(), 1284 SrcTy.getSimpleVT())) 1285 return Entry->Cost; 1286 } 1287 1288 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1289 } 1290 1291 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 1292 // Legalize the type. 1293 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1294 1295 MVT MTy = LT.second; 1296 1297 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1298 assert(ISD && "Invalid opcode"); 1299 1300 static const CostTblEntry SSE2CostTbl[] = { 1301 { ISD::SETCC, MVT::v2i64, 8 }, 1302 { ISD::SETCC, MVT::v4i32, 1 }, 1303 { ISD::SETCC, MVT::v8i16, 1 }, 1304 { ISD::SETCC, MVT::v16i8, 1 }, 1305 }; 1306 1307 static const CostTblEntry SSE42CostTbl[] = { 1308 { ISD::SETCC, MVT::v2f64, 1 }, 1309 { ISD::SETCC, MVT::v4f32, 1 }, 1310 { ISD::SETCC, MVT::v2i64, 1 }, 1311 }; 1312 1313 static const CostTblEntry AVX1CostTbl[] = { 1314 { ISD::SETCC, MVT::v4f64, 1 }, 1315 { ISD::SETCC, MVT::v8f32, 1 }, 1316 // AVX1 does not support 8-wide integer compare. 1317 { ISD::SETCC, MVT::v4i64, 4 }, 1318 { ISD::SETCC, MVT::v8i32, 4 }, 1319 { ISD::SETCC, MVT::v16i16, 4 }, 1320 { ISD::SETCC, MVT::v32i8, 4 }, 1321 }; 1322 1323 static const CostTblEntry AVX2CostTbl[] = { 1324 { ISD::SETCC, MVT::v4i64, 1 }, 1325 { ISD::SETCC, MVT::v8i32, 1 }, 1326 { ISD::SETCC, MVT::v16i16, 1 }, 1327 { ISD::SETCC, MVT::v32i8, 1 }, 1328 }; 1329 1330 static const CostTblEntry AVX512CostTbl[] = { 1331 { ISD::SETCC, MVT::v8i64, 1 }, 1332 { ISD::SETCC, MVT::v16i32, 1 }, 1333 { ISD::SETCC, MVT::v8f64, 1 }, 1334 { ISD::SETCC, MVT::v16f32, 1 }, 1335 }; 1336 1337 if (ST->hasAVX512()) 1338 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1339 return LT.first * Entry->Cost; 1340 1341 if (ST->hasAVX2()) 1342 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1343 return LT.first * Entry->Cost; 1344 1345 if (ST->hasAVX()) 1346 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1347 return LT.first * Entry->Cost; 1348 1349 if (ST->hasSSE42()) 1350 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1351 return LT.first * Entry->Cost; 1352 1353 if (ST->hasSSE2()) 1354 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1355 return LT.first * Entry->Cost; 1356 1357 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 1358 } 1359 1360 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1361 ArrayRef<Type *> Tys, FastMathFlags FMF) { 1362 // Costs should match the codegen from: 1363 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1364 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1365 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1366 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1367 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1368 static const CostTblEntry XOPCostTbl[] = { 1369 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1370 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1371 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1372 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1373 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1374 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1375 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1376 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1377 { ISD::BITREVERSE, MVT::i64, 3 }, 1378 { ISD::BITREVERSE, MVT::i32, 3 }, 1379 { ISD::BITREVERSE, MVT::i16, 3 }, 1380 { ISD::BITREVERSE, MVT::i8, 3 } 1381 }; 1382 static const CostTblEntry AVX2CostTbl[] = { 1383 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1384 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1385 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1386 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1387 { ISD::BSWAP, MVT::v4i64, 1 }, 1388 { ISD::BSWAP, MVT::v8i32, 1 }, 1389 { ISD::BSWAP, MVT::v16i16, 1 }, 1390 { ISD::CTLZ, MVT::v4i64, 23 }, 1391 { ISD::CTLZ, MVT::v8i32, 18 }, 1392 { ISD::CTLZ, MVT::v16i16, 14 }, 1393 { ISD::CTLZ, MVT::v32i8, 9 }, 1394 { ISD::CTPOP, MVT::v4i64, 7 }, 1395 { ISD::CTPOP, MVT::v8i32, 11 }, 1396 { ISD::CTPOP, MVT::v16i16, 9 }, 1397 { ISD::CTPOP, MVT::v32i8, 6 }, 1398 { ISD::CTTZ, MVT::v4i64, 10 }, 1399 { ISD::CTTZ, MVT::v8i32, 14 }, 1400 { ISD::CTTZ, MVT::v16i16, 12 }, 1401 { ISD::CTTZ, MVT::v32i8, 9 }, 1402 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1403 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1404 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1405 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1406 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1407 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1408 }; 1409 static const CostTblEntry AVX1CostTbl[] = { 1410 { ISD::BITREVERSE, MVT::v4i64, 10 }, 1411 { ISD::BITREVERSE, MVT::v8i32, 10 }, 1412 { ISD::BITREVERSE, MVT::v16i16, 10 }, 1413 { ISD::BITREVERSE, MVT::v32i8, 10 }, 1414 { ISD::BSWAP, MVT::v4i64, 4 }, 1415 { ISD::BSWAP, MVT::v8i32, 4 }, 1416 { ISD::BSWAP, MVT::v16i16, 4 }, 1417 { ISD::CTLZ, MVT::v4i64, 46 }, 1418 { ISD::CTLZ, MVT::v8i32, 36 }, 1419 { ISD::CTLZ, MVT::v16i16, 28 }, 1420 { ISD::CTLZ, MVT::v32i8, 18 }, 1421 { ISD::CTPOP, MVT::v4i64, 14 }, 1422 { ISD::CTPOP, MVT::v8i32, 22 }, 1423 { ISD::CTPOP, MVT::v16i16, 18 }, 1424 { ISD::CTPOP, MVT::v32i8, 12 }, 1425 { ISD::CTTZ, MVT::v4i64, 20 }, 1426 { ISD::CTTZ, MVT::v8i32, 28 }, 1427 { ISD::CTTZ, MVT::v16i16, 24 }, 1428 { ISD::CTTZ, MVT::v32i8, 18 }, 1429 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1430 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1431 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1432 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1433 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1434 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1435 }; 1436 static const CostTblEntry SSE42CostTbl[] = { 1437 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1438 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1439 }; 1440 static const CostTblEntry SSSE3CostTbl[] = { 1441 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1442 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1443 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1444 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1445 { ISD::BSWAP, MVT::v2i64, 1 }, 1446 { ISD::BSWAP, MVT::v4i32, 1 }, 1447 { ISD::BSWAP, MVT::v8i16, 1 }, 1448 { ISD::CTLZ, MVT::v2i64, 23 }, 1449 { ISD::CTLZ, MVT::v4i32, 18 }, 1450 { ISD::CTLZ, MVT::v8i16, 14 }, 1451 { ISD::CTLZ, MVT::v16i8, 9 }, 1452 { ISD::CTPOP, MVT::v2i64, 7 }, 1453 { ISD::CTPOP, MVT::v4i32, 11 }, 1454 { ISD::CTPOP, MVT::v8i16, 9 }, 1455 { ISD::CTPOP, MVT::v16i8, 6 }, 1456 { ISD::CTTZ, MVT::v2i64, 10 }, 1457 { ISD::CTTZ, MVT::v4i32, 14 }, 1458 { ISD::CTTZ, MVT::v8i16, 12 }, 1459 { ISD::CTTZ, MVT::v16i8, 9 } 1460 }; 1461 static const CostTblEntry SSE2CostTbl[] = { 1462 { ISD::BSWAP, MVT::v2i64, 7 }, 1463 { ISD::BSWAP, MVT::v4i32, 7 }, 1464 { ISD::BSWAP, MVT::v8i16, 7 }, 1465 { ISD::CTLZ, MVT::v2i64, 25 }, 1466 { ISD::CTLZ, MVT::v4i32, 26 }, 1467 { ISD::CTLZ, MVT::v8i16, 20 }, 1468 { ISD::CTLZ, MVT::v16i8, 17 }, 1469 { ISD::CTPOP, MVT::v2i64, 12 }, 1470 { ISD::CTPOP, MVT::v4i32, 15 }, 1471 { ISD::CTPOP, MVT::v8i16, 13 }, 1472 { ISD::CTPOP, MVT::v16i8, 10 }, 1473 { ISD::CTTZ, MVT::v2i64, 14 }, 1474 { ISD::CTTZ, MVT::v4i32, 18 }, 1475 { ISD::CTTZ, MVT::v8i16, 16 }, 1476 { ISD::CTTZ, MVT::v16i8, 13 }, 1477 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1478 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1479 }; 1480 static const CostTblEntry SSE1CostTbl[] = { 1481 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1482 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1483 }; 1484 1485 unsigned ISD = ISD::DELETED_NODE; 1486 switch (IID) { 1487 default: 1488 break; 1489 case Intrinsic::bitreverse: 1490 ISD = ISD::BITREVERSE; 1491 break; 1492 case Intrinsic::bswap: 1493 ISD = ISD::BSWAP; 1494 break; 1495 case Intrinsic::ctlz: 1496 ISD = ISD::CTLZ; 1497 break; 1498 case Intrinsic::ctpop: 1499 ISD = ISD::CTPOP; 1500 break; 1501 case Intrinsic::cttz: 1502 ISD = ISD::CTTZ; 1503 break; 1504 case Intrinsic::sqrt: 1505 ISD = ISD::FSQRT; 1506 break; 1507 } 1508 1509 // Legalize the type. 1510 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1511 MVT MTy = LT.second; 1512 1513 // Attempt to lookup cost. 1514 if (ST->hasXOP()) 1515 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1516 return LT.first * Entry->Cost; 1517 1518 if (ST->hasAVX2()) 1519 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1520 return LT.first * Entry->Cost; 1521 1522 if (ST->hasAVX()) 1523 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1524 return LT.first * Entry->Cost; 1525 1526 if (ST->hasSSE42()) 1527 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1528 return LT.first * Entry->Cost; 1529 1530 if (ST->hasSSSE3()) 1531 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1532 return LT.first * Entry->Cost; 1533 1534 if (ST->hasSSE2()) 1535 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1536 return LT.first * Entry->Cost; 1537 1538 if (ST->hasSSE1()) 1539 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1540 return LT.first * Entry->Cost; 1541 1542 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF); 1543 } 1544 1545 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1546 ArrayRef<Value *> Args, FastMathFlags FMF) { 1547 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF); 1548 } 1549 1550 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1551 assert(Val->isVectorTy() && "This must be a vector type"); 1552 1553 Type *ScalarType = Val->getScalarType(); 1554 1555 if (Index != -1U) { 1556 // Legalize the type. 1557 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1558 1559 // This type is legalized to a scalar type. 1560 if (!LT.second.isVector()) 1561 return 0; 1562 1563 // The type may be split. Normalize the index to the new type. 1564 unsigned Width = LT.second.getVectorNumElements(); 1565 Index = Index % Width; 1566 1567 // Floating point scalars are already located in index #0. 1568 if (ScalarType->isFloatingPointTy() && Index == 0) 1569 return 0; 1570 } 1571 1572 // Add to the base cost if we know that the extracted element of a vector is 1573 // destined to be moved to and used in the integer register file. 1574 int RegisterFileMoveCost = 0; 1575 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1576 RegisterFileMoveCost = 1; 1577 1578 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1579 } 1580 1581 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 1582 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 1583 int Cost = 0; 1584 1585 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 1586 if (Insert) 1587 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); 1588 if (Extract) 1589 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); 1590 } 1591 1592 return Cost; 1593 } 1594 1595 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1596 unsigned AddressSpace) { 1597 // Handle non-power-of-two vectors such as <3 x float> 1598 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1599 unsigned NumElem = VTy->getVectorNumElements(); 1600 1601 // Handle a few common cases: 1602 // <3 x float> 1603 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1604 // Cost = 64 bit store + extract + 32 bit store. 1605 return 3; 1606 1607 // <3 x double> 1608 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1609 // Cost = 128 bit store + unpack + 64 bit store. 1610 return 3; 1611 1612 // Assume that all other non-power-of-two numbers are scalarized. 1613 if (!isPowerOf2_32(NumElem)) { 1614 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1615 AddressSpace); 1616 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1617 Opcode == Instruction::Store); 1618 return NumElem * Cost + SplitCost; 1619 } 1620 } 1621 1622 // Legalize the type. 1623 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1624 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1625 "Invalid Opcode"); 1626 1627 // Each load/store unit costs 1. 1628 int Cost = LT.first * 1; 1629 1630 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1631 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1632 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1633 Cost *= 2; 1634 1635 return Cost; 1636 } 1637 1638 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1639 unsigned Alignment, 1640 unsigned AddressSpace) { 1641 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1642 if (!SrcVTy) 1643 // To calculate scalar take the regular cost, without mask 1644 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1645 1646 unsigned NumElem = SrcVTy->getVectorNumElements(); 1647 VectorType *MaskTy = 1648 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1649 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1650 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1651 !isPowerOf2_32(NumElem)) { 1652 // Scalarization 1653 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1654 int ScalarCompareCost = getCmpSelInstrCost( 1655 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1656 int BranchCost = getCFInstrCost(Instruction::Br); 1657 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1658 1659 int ValueSplitCost = getScalarizationOverhead( 1660 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1661 int MemopCost = 1662 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1663 Alignment, AddressSpace); 1664 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1665 } 1666 1667 // Legalize the type. 1668 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1669 auto VT = TLI->getValueType(DL, SrcVTy); 1670 int Cost = 0; 1671 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1672 LT.second.getVectorNumElements() == NumElem) 1673 // Promotion requires expand/truncate for data and a shuffle for mask. 1674 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1675 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1676 1677 else if (LT.second.getVectorNumElements() > NumElem) { 1678 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1679 LT.second.getVectorNumElements()); 1680 // Expanding requires fill mask with zeroes 1681 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1682 } 1683 if (!ST->hasAVX512()) 1684 return Cost + LT.first*4; // Each maskmov costs 4 1685 1686 // AVX-512 masked load/store is cheapper 1687 return Cost+LT.first; 1688 } 1689 1690 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 1691 const SCEV *Ptr) { 1692 // Address computations in vectorized code with non-consecutive addresses will 1693 // likely result in more instructions compared to scalar code where the 1694 // computation can more often be merged into the index mode. The resulting 1695 // extra micro-ops can significantly decrease throughput. 1696 unsigned NumVectorInstToHideOverhead = 10; 1697 1698 // Cost modeling of Strided Access Computation is hidden by the indexing 1699 // modes of X86 regardless of the stride value. We dont believe that there 1700 // is a difference between constant strided access in gerenal and constant 1701 // strided value which is less than or equal to 64. 1702 // Even in the case of (loop invariant) stride whose value is not known at 1703 // compile time, the address computation will not incur more than one extra 1704 // ADD instruction. 1705 if (Ty->isVectorTy() && SE) { 1706 if (!BaseT::isStridedAccess(Ptr)) 1707 return NumVectorInstToHideOverhead; 1708 if (!BaseT::getConstantStrideStep(SE, Ptr)) 1709 return 1; 1710 } 1711 1712 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1713 } 1714 1715 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 1716 bool IsPairwise) { 1717 1718 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1719 1720 MVT MTy = LT.second; 1721 1722 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1723 assert(ISD && "Invalid opcode"); 1724 1725 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 1726 // and make it as the cost. 1727 1728 static const CostTblEntry SSE42CostTblPairWise[] = { 1729 { ISD::FADD, MVT::v2f64, 2 }, 1730 { ISD::FADD, MVT::v4f32, 4 }, 1731 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1732 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1733 { ISD::ADD, MVT::v8i16, 5 }, 1734 }; 1735 1736 static const CostTblEntry AVX1CostTblPairWise[] = { 1737 { ISD::FADD, MVT::v4f32, 4 }, 1738 { ISD::FADD, MVT::v4f64, 5 }, 1739 { ISD::FADD, MVT::v8f32, 7 }, 1740 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1741 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1742 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 1743 { ISD::ADD, MVT::v8i16, 5 }, 1744 { ISD::ADD, MVT::v8i32, 5 }, 1745 }; 1746 1747 static const CostTblEntry SSE42CostTblNoPairWise[] = { 1748 { ISD::FADD, MVT::v2f64, 2 }, 1749 { ISD::FADD, MVT::v4f32, 4 }, 1750 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1751 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 1752 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 1753 }; 1754 1755 static const CostTblEntry AVX1CostTblNoPairWise[] = { 1756 { ISD::FADD, MVT::v4f32, 3 }, 1757 { ISD::FADD, MVT::v4f64, 3 }, 1758 { ISD::FADD, MVT::v8f32, 4 }, 1759 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1760 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 1761 { ISD::ADD, MVT::v4i64, 3 }, 1762 { ISD::ADD, MVT::v8i16, 4 }, 1763 { ISD::ADD, MVT::v8i32, 5 }, 1764 }; 1765 1766 if (IsPairwise) { 1767 if (ST->hasAVX()) 1768 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 1769 return LT.first * Entry->Cost; 1770 1771 if (ST->hasSSE42()) 1772 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 1773 return LT.first * Entry->Cost; 1774 } else { 1775 if (ST->hasAVX()) 1776 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 1777 return LT.first * Entry->Cost; 1778 1779 if (ST->hasSSE42()) 1780 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 1781 return LT.first * Entry->Cost; 1782 } 1783 1784 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1785 } 1786 1787 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1788 /// method might only calculate a fraction of a larger immediate. Therefore it 1789 /// is valid to return a cost of ZERO. 1790 int X86TTIImpl::getIntImmCost(int64_t Val) { 1791 if (Val == 0) 1792 return TTI::TCC_Free; 1793 1794 if (isInt<32>(Val)) 1795 return TTI::TCC_Basic; 1796 1797 return 2 * TTI::TCC_Basic; 1798 } 1799 1800 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1801 assert(Ty->isIntegerTy()); 1802 1803 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1804 if (BitSize == 0) 1805 return ~0U; 1806 1807 // Never hoist constants larger than 128bit, because this might lead to 1808 // incorrect code generation or assertions in codegen. 1809 // Fixme: Create a cost model for types larger than i128 once the codegen 1810 // issues have been fixed. 1811 if (BitSize > 128) 1812 return TTI::TCC_Free; 1813 1814 if (Imm == 0) 1815 return TTI::TCC_Free; 1816 1817 // Sign-extend all constants to a multiple of 64-bit. 1818 APInt ImmVal = Imm; 1819 if (BitSize & 0x3f) 1820 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1821 1822 // Split the constant into 64-bit chunks and calculate the cost for each 1823 // chunk. 1824 int Cost = 0; 1825 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1826 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1827 int64_t Val = Tmp.getSExtValue(); 1828 Cost += getIntImmCost(Val); 1829 } 1830 // We need at least one instruction to materialize the constant. 1831 return std::max(1, Cost); 1832 } 1833 1834 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1835 Type *Ty) { 1836 assert(Ty->isIntegerTy()); 1837 1838 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1839 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1840 // here, so that constant hoisting will ignore this constant. 1841 if (BitSize == 0) 1842 return TTI::TCC_Free; 1843 1844 unsigned ImmIdx = ~0U; 1845 switch (Opcode) { 1846 default: 1847 return TTI::TCC_Free; 1848 case Instruction::GetElementPtr: 1849 // Always hoist the base address of a GetElementPtr. This prevents the 1850 // creation of new constants for every base constant that gets constant 1851 // folded with the offset. 1852 if (Idx == 0) 1853 return 2 * TTI::TCC_Basic; 1854 return TTI::TCC_Free; 1855 case Instruction::Store: 1856 ImmIdx = 0; 1857 break; 1858 case Instruction::ICmp: 1859 // This is an imperfect hack to prevent constant hoisting of 1860 // compares that might be trying to check if a 64-bit value fits in 1861 // 32-bits. The backend can optimize these cases using a right shift by 32. 1862 // Ideally we would check the compare predicate here. There also other 1863 // similar immediates the backend can use shifts for. 1864 if (Idx == 1 && Imm.getBitWidth() == 64) { 1865 uint64_t ImmVal = Imm.getZExtValue(); 1866 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 1867 return TTI::TCC_Free; 1868 } 1869 ImmIdx = 1; 1870 break; 1871 case Instruction::And: 1872 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 1873 // by using a 32-bit operation with implicit zero extension. Detect such 1874 // immediates here as the normal path expects bit 31 to be sign extended. 1875 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 1876 return TTI::TCC_Free; 1877 LLVM_FALLTHROUGH; 1878 case Instruction::Add: 1879 case Instruction::Sub: 1880 case Instruction::Mul: 1881 case Instruction::UDiv: 1882 case Instruction::SDiv: 1883 case Instruction::URem: 1884 case Instruction::SRem: 1885 case Instruction::Or: 1886 case Instruction::Xor: 1887 ImmIdx = 1; 1888 break; 1889 // Always return TCC_Free for the shift value of a shift instruction. 1890 case Instruction::Shl: 1891 case Instruction::LShr: 1892 case Instruction::AShr: 1893 if (Idx == 1) 1894 return TTI::TCC_Free; 1895 break; 1896 case Instruction::Trunc: 1897 case Instruction::ZExt: 1898 case Instruction::SExt: 1899 case Instruction::IntToPtr: 1900 case Instruction::PtrToInt: 1901 case Instruction::BitCast: 1902 case Instruction::PHI: 1903 case Instruction::Call: 1904 case Instruction::Select: 1905 case Instruction::Ret: 1906 case Instruction::Load: 1907 break; 1908 } 1909 1910 if (Idx == ImmIdx) { 1911 int NumConstants = (BitSize + 63) / 64; 1912 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1913 return (Cost <= NumConstants * TTI::TCC_Basic) 1914 ? static_cast<int>(TTI::TCC_Free) 1915 : Cost; 1916 } 1917 1918 return X86TTIImpl::getIntImmCost(Imm, Ty); 1919 } 1920 1921 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1922 Type *Ty) { 1923 assert(Ty->isIntegerTy()); 1924 1925 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1926 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1927 // here, so that constant hoisting will ignore this constant. 1928 if (BitSize == 0) 1929 return TTI::TCC_Free; 1930 1931 switch (IID) { 1932 default: 1933 return TTI::TCC_Free; 1934 case Intrinsic::sadd_with_overflow: 1935 case Intrinsic::uadd_with_overflow: 1936 case Intrinsic::ssub_with_overflow: 1937 case Intrinsic::usub_with_overflow: 1938 case Intrinsic::smul_with_overflow: 1939 case Intrinsic::umul_with_overflow: 1940 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1941 return TTI::TCC_Free; 1942 break; 1943 case Intrinsic::experimental_stackmap: 1944 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1945 return TTI::TCC_Free; 1946 break; 1947 case Intrinsic::experimental_patchpoint_void: 1948 case Intrinsic::experimental_patchpoint_i64: 1949 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1950 return TTI::TCC_Free; 1951 break; 1952 } 1953 return X86TTIImpl::getIntImmCost(Imm, Ty); 1954 } 1955 1956 // Return an average cost of Gather / Scatter instruction, maybe improved later 1957 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 1958 unsigned Alignment, unsigned AddressSpace) { 1959 1960 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 1961 unsigned VF = SrcVTy->getVectorNumElements(); 1962 1963 // Try to reduce index size from 64 bit (default for GEP) 1964 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 1965 // operation will use 16 x 64 indices which do not fit in a zmm and needs 1966 // to split. Also check that the base pointer is the same for all lanes, 1967 // and that there's at most one variable index. 1968 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 1969 unsigned IndexSize = DL.getPointerSizeInBits(); 1970 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 1971 if (IndexSize < 64 || !GEP) 1972 return IndexSize; 1973 1974 unsigned NumOfVarIndices = 0; 1975 Value *Ptrs = GEP->getPointerOperand(); 1976 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 1977 return IndexSize; 1978 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 1979 if (isa<Constant>(GEP->getOperand(i))) 1980 continue; 1981 Type *IndxTy = GEP->getOperand(i)->getType(); 1982 if (IndxTy->isVectorTy()) 1983 IndxTy = IndxTy->getVectorElementType(); 1984 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 1985 !isa<SExtInst>(GEP->getOperand(i))) || 1986 ++NumOfVarIndices > 1) 1987 return IndexSize; // 64 1988 } 1989 return (unsigned)32; 1990 }; 1991 1992 1993 // Trying to reduce IndexSize to 32 bits for vector 16. 1994 // By default the IndexSize is equal to pointer size. 1995 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) : 1996 DL.getPointerSizeInBits(); 1997 1998 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 1999 IndexSize), VF); 2000 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2001 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2002 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2003 if (SplitFactor > 1) { 2004 // Handle splitting of vector of pointers 2005 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2006 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2007 AddressSpace); 2008 } 2009 2010 // The gather / scatter cost is given by Intel architects. It is a rough 2011 // number since we are looking at one instruction in a time. 2012 const int GSOverhead = 2; 2013 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2014 Alignment, AddressSpace); 2015 } 2016 2017 /// Return the cost of full scalarization of gather / scatter operation. 2018 /// 2019 /// Opcode - Load or Store instruction. 2020 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2021 /// VariableMask - The mask is non-constant at compile time. 2022 /// Alignment - Alignment for one element. 2023 /// AddressSpace - pointer[s] address space. 2024 /// 2025 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2026 bool VariableMask, unsigned Alignment, 2027 unsigned AddressSpace) { 2028 unsigned VF = SrcVTy->getVectorNumElements(); 2029 2030 int MaskUnpackCost = 0; 2031 if (VariableMask) { 2032 VectorType *MaskTy = 2033 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2034 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2035 int ScalarCompareCost = 2036 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2037 nullptr); 2038 int BranchCost = getCFInstrCost(Instruction::Br); 2039 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2040 } 2041 2042 // The cost of the scalar loads/stores. 2043 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2044 Alignment, AddressSpace); 2045 2046 int InsertExtractCost = 0; 2047 if (Opcode == Instruction::Load) 2048 for (unsigned i = 0; i < VF; ++i) 2049 // Add the cost of inserting each scalar load into the vector 2050 InsertExtractCost += 2051 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2052 else 2053 for (unsigned i = 0; i < VF; ++i) 2054 // Add the cost of extracting each element out of the data vector 2055 InsertExtractCost += 2056 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2057 2058 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2059 } 2060 2061 /// Calculate the cost of Gather / Scatter operation 2062 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2063 Value *Ptr, bool VariableMask, 2064 unsigned Alignment) { 2065 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2066 unsigned VF = SrcVTy->getVectorNumElements(); 2067 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2068 if (!PtrTy && Ptr->getType()->isVectorTy()) 2069 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2070 assert(PtrTy && "Unexpected type for Ptr argument"); 2071 unsigned AddressSpace = PtrTy->getAddressSpace(); 2072 2073 bool Scalarize = false; 2074 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2075 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2076 Scalarize = true; 2077 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2078 // Vector-4 of gather/scatter instruction does not exist on KNL. 2079 // We can extend it to 8 elements, but zeroing upper bits of 2080 // the mask vector will add more instructions. Right now we give the scalar 2081 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2082 // is better in the VariableMask case. 2083 if (VF == 2 || (VF == 4 && !ST->hasVLX())) 2084 Scalarize = true; 2085 2086 if (Scalarize) 2087 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2088 AddressSpace); 2089 2090 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2091 } 2092 2093 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2094 Type *ScalarTy = DataTy->getScalarType(); 2095 int DataWidth = isa<PointerType>(ScalarTy) ? 2096 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2097 2098 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2099 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2100 } 2101 2102 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2103 return isLegalMaskedLoad(DataType); 2104 } 2105 2106 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2107 // This function is called now in two cases: from the Loop Vectorizer 2108 // and from the Scalarizer. 2109 // When the Loop Vectorizer asks about legality of the feature, 2110 // the vectorization factor is not calculated yet. The Loop Vectorizer 2111 // sends a scalar type and the decision is based on the width of the 2112 // scalar element. 2113 // Later on, the cost model will estimate usage this intrinsic based on 2114 // the vector type. 2115 // The Scalarizer asks again about legality. It sends a vector type. 2116 // In this case we can reject non-power-of-2 vectors. 2117 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements())) 2118 return false; 2119 Type *ScalarTy = DataTy->getScalarType(); 2120 int DataWidth = isa<PointerType>(ScalarTy) ? 2121 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2122 2123 // AVX-512 allows gather and scatter 2124 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512(); 2125 } 2126 2127 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2128 return isLegalMaskedGather(DataType); 2129 } 2130 2131 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2132 const Function *Callee) const { 2133 const TargetMachine &TM = getTLI()->getTargetMachine(); 2134 2135 // Work this as a subsetting of subtarget features. 2136 const FeatureBitset &CallerBits = 2137 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2138 const FeatureBitset &CalleeBits = 2139 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2140 2141 // FIXME: This is likely too limiting as it will include subtarget features 2142 // that we might not care about for inlining, but it is conservatively 2143 // correct. 2144 return (CallerBits & CalleeBits) == CalleeBits; 2145 } 2146 2147 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2148 // TODO: We expect this to be beneficial regardless of arch, 2149 // but there are currently some unexplained performance artifacts on Atom. 2150 // As a temporary solution, disable on Atom. 2151 return !(ST->isAtom() || ST->isSLM()); 2152 } 2153 2154 // Get estimation for interleaved load/store operations and strided load. 2155 // \p Indices contains indices for strided load. 2156 // \p Factor - the factor of interleaving. 2157 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2158 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2159 unsigned Factor, 2160 ArrayRef<unsigned> Indices, 2161 unsigned Alignment, 2162 unsigned AddressSpace) { 2163 2164 // VecTy for interleave memop is <VF*Factor x Elt>. 2165 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2166 // VecTy = <12 x i32>. 2167 2168 // Calculate the number of memory operations (NumOfMemOps), required 2169 // for load/store the VecTy. 2170 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2171 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2172 unsigned LegalVTSize = LegalVT.getStoreSize(); 2173 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2174 2175 // Get the cost of one memory operation. 2176 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2177 LegalVT.getVectorNumElements()); 2178 unsigned MemOpCost = 2179 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2180 2181 if (Opcode == Instruction::Load) { 2182 // Kind of shuffle depends on number of loaded values. 2183 // If we load the entire data in one register, we can use a 1-src shuffle. 2184 // Otherwise, we'll merge 2 sources in each operation. 2185 TTI::ShuffleKind ShuffleKind = 2186 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2187 2188 unsigned ShuffleCost = 2189 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2190 2191 unsigned NumOfLoadsInInterleaveGrp = 2192 Indices.size() ? Indices.size() : Factor; 2193 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2194 VecTy->getVectorNumElements() / Factor); 2195 unsigned NumOfResults = 2196 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2197 NumOfLoadsInInterleaveGrp; 2198 2199 // About a half of the loads may be folded in shuffles when we have only 2200 // one result. If we have more than one result, we do not fold loads at all. 2201 unsigned NumOfUnfoldedLoads = 2202 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2203 2204 // Get a number of shuffle operations per result. 2205 unsigned NumOfShufflesPerResult = 2206 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2207 2208 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2209 // When we have more than one destination, we need additional instructions 2210 // to keep sources. 2211 unsigned NumOfMoves = 0; 2212 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2213 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2214 2215 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2216 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2217 2218 return Cost; 2219 } 2220 2221 // Store. 2222 assert(Opcode == Instruction::Store && 2223 "Expected Store Instruction at this point"); 2224 2225 // There is no strided stores meanwhile. And store can't be folded in 2226 // shuffle. 2227 unsigned NumOfSources = Factor; // The number of values to be merged. 2228 unsigned ShuffleCost = 2229 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 2230 unsigned NumOfShufflesPerStore = NumOfSources - 1; 2231 2232 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2233 // We need additional instructions to keep sources. 2234 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 2235 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 2236 NumOfMoves; 2237 return Cost; 2238 } 2239 2240 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 2241 unsigned Factor, 2242 ArrayRef<unsigned> Indices, 2243 unsigned Alignment, 2244 unsigned AddressSpace) { 2245 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) { 2246 RequiresBW = false; 2247 Type *EltTy = VecTy->getVectorElementType(); 2248 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 2249 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 2250 return true; 2251 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) { 2252 RequiresBW = true; 2253 return true; 2254 } 2255 return false; 2256 }; 2257 bool RequiresBW; 2258 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW); 2259 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI())) 2260 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 2261 Alignment, AddressSpace); 2262 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2263 Alignment, AddressSpace); 2264 } 2265