1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Target/CostTable.h" 48 #include "llvm/Target/TargetLowering.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 70 if (Vector && !ST->hasSSE1()) 71 return 0; 72 73 if (ST->is64Bit()) { 74 if (Vector && ST->hasAVX512()) 75 return 32; 76 return 16; 77 } 78 return 8; 79 } 80 81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 82 if (Vector) { 83 if (ST->hasAVX512()) 84 return 512; 85 if (ST->hasAVX()) 86 return 256; 87 if (ST->hasSSE1()) 88 return 128; 89 return 0; 90 } 91 92 if (ST->is64Bit()) 93 return 64; 94 95 return 32; 96 } 97 98 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 99 // If the loop will not be vectorized, don't interleave the loop. 100 // Let regular unroll to unroll the loop, which saves the overflow 101 // check and memory check cost. 102 if (VF == 1) 103 return 1; 104 105 if (ST->isAtom()) 106 return 1; 107 108 // Sandybridge and Haswell have multiple execution ports and pipelined 109 // vector units. 110 if (ST->hasAVX()) 111 return 4; 112 113 return 2; 114 } 115 116 int X86TTIImpl::getArithmeticInstrCost( 117 unsigned Opcode, Type *Ty, 118 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 119 TTI::OperandValueProperties Opd1PropInfo, 120 TTI::OperandValueProperties Opd2PropInfo, 121 ArrayRef<const Value *> Args) { 122 // Legalize the type. 123 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 124 125 int ISD = TLI->InstructionOpcodeToISD(Opcode); 126 assert(ISD && "Invalid opcode"); 127 128 static const CostTblEntry SLMCostTable[] = { 129 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 130 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 131 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 132 { ISD::FMUL, MVT::f64, 2 }, // mulsd 133 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 134 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 135 { ISD::FDIV, MVT::f32, 17 }, // divss 136 { ISD::FDIV, MVT::v4f32, 39 }, // divps 137 { ISD::FDIV, MVT::f64, 32 }, // divsd 138 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 139 { ISD::FADD, MVT::v2f64, 2 }, // addpd 140 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 141 // v2i64/v4i64 mul is custom lowered as a series of long 142 // multiplies(3), shifts(3) and adds(2). 143 // slm muldq version throughput is 2 144 { ISD::MUL, MVT::v2i64, 11 }, 145 }; 146 147 if (ST->isSLM()) { 148 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 149 // Check if the operands can be shrinked into a smaller datatype. 150 bool Op1Signed = false; 151 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 152 bool Op2Signed = false; 153 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 154 155 bool signedMode = Op1Signed | Op2Signed; 156 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 157 158 if (OpMinSize <= 7) 159 return LT.first * 3; // pmullw/sext 160 if (!signedMode && OpMinSize <= 8) 161 return LT.first * 3; // pmullw/zext 162 if (OpMinSize <= 15) 163 return LT.first * 5; // pmullw/pmulhw/pshuf 164 if (!signedMode && OpMinSize <= 16) 165 return LT.first * 5; // pmullw/pmulhw/pshuf 166 } 167 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 168 LT.second)) { 169 return LT.first * Entry->Cost; 170 } 171 } 172 173 if (ISD == ISD::SDIV && 174 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 175 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 176 // On X86, vector signed division by constants power-of-two are 177 // normally expanded to the sequence SRA + SRL + ADD + SRA. 178 // The OperandValue properties many not be same as that of previous 179 // operation;conservatively assume OP_None. 180 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 181 Op2Info, TargetTransformInfo::OP_None, 182 TargetTransformInfo::OP_None); 183 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 184 TargetTransformInfo::OP_None, 185 TargetTransformInfo::OP_None); 186 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 187 TargetTransformInfo::OP_None, 188 TargetTransformInfo::OP_None); 189 190 return Cost; 191 } 192 193 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 194 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 195 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 196 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 197 198 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 199 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 200 }; 201 202 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 203 ST->hasBWI()) { 204 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 205 LT.second)) 206 return LT.first * Entry->Cost; 207 } 208 209 static const CostTblEntry AVX512UniformConstCostTable[] = { 210 { ISD::SRA, MVT::v2i64, 1 }, 211 { ISD::SRA, MVT::v4i64, 1 }, 212 { ISD::SRA, MVT::v8i64, 1 }, 213 214 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 215 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 216 }; 217 218 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 219 ST->hasAVX512()) { 220 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 221 LT.second)) 222 return LT.first * Entry->Cost; 223 } 224 225 static const CostTblEntry AVX2UniformConstCostTable[] = { 226 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 227 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 228 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 229 230 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 231 232 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 233 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 234 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 235 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 236 }; 237 238 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 239 ST->hasAVX2()) { 240 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 241 LT.second)) 242 return LT.first * Entry->Cost; 243 } 244 245 static const CostTblEntry SSE2UniformConstCostTable[] = { 246 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 247 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 248 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 249 250 { ISD::SHL, MVT::v32i8, 4 }, // 2*(psllw + pand). 251 { ISD::SRL, MVT::v32i8, 4 }, // 2*(psrlw + pand). 252 { ISD::SRA, MVT::v32i8, 8 }, // 2*(psrlw, pand, pxor, psubb). 253 254 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence 255 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 256 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence 257 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 258 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence 259 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 260 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence 261 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 262 }; 263 264 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 265 ST->hasSSE2()) { 266 // pmuldq sequence. 267 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 268 return LT.first * 30; 269 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 270 return LT.first * 15; 271 272 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD, 273 LT.second)) 274 return LT.first * Entry->Cost; 275 } 276 277 static const CostTblEntry AVX2UniformCostTable[] = { 278 // Uniform splats are cheaper for the following instructions. 279 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 280 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 281 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 282 }; 283 284 if (ST->hasAVX2() && 285 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 286 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 287 if (const auto *Entry = 288 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 289 return LT.first * Entry->Cost; 290 } 291 292 static const CostTblEntry SSE2UniformCostTable[] = { 293 // Uniform splats are cheaper for the following instructions. 294 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 295 { ISD::SHL, MVT::v4i32, 1 }, // pslld 296 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 297 298 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 299 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 300 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 301 302 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 303 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 304 }; 305 306 if (ST->hasSSE2() && 307 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 308 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 309 if (const auto *Entry = 310 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 311 return LT.first * Entry->Cost; 312 } 313 314 static const CostTblEntry AVX512DQCostTable[] = { 315 { ISD::MUL, MVT::v2i64, 1 }, 316 { ISD::MUL, MVT::v4i64, 1 }, 317 { ISD::MUL, MVT::v8i64, 1 } 318 }; 319 320 // Look for AVX512DQ lowering tricks for custom cases. 321 if (ST->hasDQI()) 322 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 323 return LT.first * Entry->Cost; 324 325 static const CostTblEntry AVX512BWCostTable[] = { 326 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 327 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 328 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 329 330 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 331 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 332 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 333 334 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 335 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 336 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 337 338 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 339 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 340 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 341 342 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 343 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 344 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 345 346 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 347 { ISD::SDIV, MVT::v64i8, 64*20 }, 348 { ISD::SDIV, MVT::v32i16, 32*20 }, 349 { ISD::UDIV, MVT::v64i8, 64*20 }, 350 { ISD::UDIV, MVT::v32i16, 32*20 } 351 }; 352 353 // Look for AVX512BW lowering tricks for custom cases. 354 if (ST->hasBWI()) 355 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 356 return LT.first * Entry->Cost; 357 358 static const CostTblEntry AVX512CostTable[] = { 359 { ISD::SHL, MVT::v16i32, 1 }, 360 { ISD::SRL, MVT::v16i32, 1 }, 361 { ISD::SRA, MVT::v16i32, 1 }, 362 363 { ISD::SHL, MVT::v8i64, 1 }, 364 { ISD::SRL, MVT::v8i64, 1 }, 365 366 { ISD::SRA, MVT::v2i64, 1 }, 367 { ISD::SRA, MVT::v4i64, 1 }, 368 { ISD::SRA, MVT::v8i64, 1 }, 369 370 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 371 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 372 { ISD::MUL, MVT::v16i32, 1 }, // pmulld 373 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 374 375 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 376 { ISD::SDIV, MVT::v16i32, 16*20 }, 377 { ISD::SDIV, MVT::v8i64, 8*20 }, 378 { ISD::UDIV, MVT::v16i32, 16*20 }, 379 { ISD::UDIV, MVT::v8i64, 8*20 } 380 }; 381 382 if (ST->hasAVX512()) 383 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 384 return LT.first * Entry->Cost; 385 386 static const CostTblEntry AVX2ShiftCostTable[] = { 387 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 388 // customize them to detect the cases where shift amount is a scalar one. 389 { ISD::SHL, MVT::v4i32, 1 }, 390 { ISD::SRL, MVT::v4i32, 1 }, 391 { ISD::SRA, MVT::v4i32, 1 }, 392 { ISD::SHL, MVT::v8i32, 1 }, 393 { ISD::SRL, MVT::v8i32, 1 }, 394 { ISD::SRA, MVT::v8i32, 1 }, 395 { ISD::SHL, MVT::v2i64, 1 }, 396 { ISD::SRL, MVT::v2i64, 1 }, 397 { ISD::SHL, MVT::v4i64, 1 }, 398 { ISD::SRL, MVT::v4i64, 1 }, 399 }; 400 401 // Look for AVX2 lowering tricks. 402 if (ST->hasAVX2()) { 403 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 404 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 405 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 406 // On AVX2, a packed v16i16 shift left by a constant build_vector 407 // is lowered into a vector multiply (vpmullw). 408 return LT.first; 409 410 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 411 return LT.first * Entry->Cost; 412 } 413 414 static const CostTblEntry XOPShiftCostTable[] = { 415 // 128bit shifts take 1cy, but right shifts require negation beforehand. 416 { ISD::SHL, MVT::v16i8, 1 }, 417 { ISD::SRL, MVT::v16i8, 2 }, 418 { ISD::SRA, MVT::v16i8, 2 }, 419 { ISD::SHL, MVT::v8i16, 1 }, 420 { ISD::SRL, MVT::v8i16, 2 }, 421 { ISD::SRA, MVT::v8i16, 2 }, 422 { ISD::SHL, MVT::v4i32, 1 }, 423 { ISD::SRL, MVT::v4i32, 2 }, 424 { ISD::SRA, MVT::v4i32, 2 }, 425 { ISD::SHL, MVT::v2i64, 1 }, 426 { ISD::SRL, MVT::v2i64, 2 }, 427 { ISD::SRA, MVT::v2i64, 2 }, 428 // 256bit shifts require splitting if AVX2 didn't catch them above. 429 { ISD::SHL, MVT::v32i8, 2 }, 430 { ISD::SRL, MVT::v32i8, 4 }, 431 { ISD::SRA, MVT::v32i8, 4 }, 432 { ISD::SHL, MVT::v16i16, 2 }, 433 { ISD::SRL, MVT::v16i16, 4 }, 434 { ISD::SRA, MVT::v16i16, 4 }, 435 { ISD::SHL, MVT::v8i32, 2 }, 436 { ISD::SRL, MVT::v8i32, 4 }, 437 { ISD::SRA, MVT::v8i32, 4 }, 438 { ISD::SHL, MVT::v4i64, 2 }, 439 { ISD::SRL, MVT::v4i64, 4 }, 440 { ISD::SRA, MVT::v4i64, 4 }, 441 }; 442 443 // Look for XOP lowering tricks. 444 if (ST->hasXOP()) 445 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 446 return LT.first * Entry->Cost; 447 448 static const CostTblEntry SSE2UniformShiftCostTable[] = { 449 // Uniform splats are cheaper for the following instructions. 450 { ISD::SHL, MVT::v16i16, 2 }, // psllw. 451 { ISD::SHL, MVT::v8i32, 2 }, // pslld 452 { ISD::SHL, MVT::v4i64, 2 }, // psllq. 453 454 { ISD::SRL, MVT::v16i16, 2 }, // psrlw. 455 { ISD::SRL, MVT::v8i32, 2 }, // psrld. 456 { ISD::SRL, MVT::v4i64, 2 }, // psrlq. 457 458 { ISD::SRA, MVT::v16i16, 2 }, // psraw. 459 { ISD::SRA, MVT::v8i32, 2 }, // psrad. 460 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 461 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle. 462 }; 463 464 if (ST->hasSSE2() && 465 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 466 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 467 if (const auto *Entry = 468 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 469 return LT.first * Entry->Cost; 470 } 471 472 if (ISD == ISD::SHL && 473 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 474 MVT VT = LT.second; 475 // Vector shift left by non uniform constant can be lowered 476 // into vector multiply. 477 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 478 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 479 ISD = ISD::MUL; 480 } 481 482 static const CostTblEntry AVX2CostTable[] = { 483 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 484 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 485 486 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 487 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 488 489 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 490 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 491 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 492 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 493 494 { ISD::SUB, MVT::v32i8, 1 }, // psubb 495 { ISD::ADD, MVT::v32i8, 1 }, // paddb 496 { ISD::SUB, MVT::v16i16, 1 }, // psubw 497 { ISD::ADD, MVT::v16i16, 1 }, // paddw 498 { ISD::SUB, MVT::v8i32, 1 }, // psubd 499 { ISD::ADD, MVT::v8i32, 1 }, // paddd 500 { ISD::SUB, MVT::v4i64, 1 }, // psubq 501 { ISD::ADD, MVT::v4i64, 1 }, // paddq 502 503 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 504 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 505 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 506 { ISD::MUL, MVT::v8i32, 1 }, // pmulld 507 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 508 509 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 510 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 511 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 512 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 513 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 514 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 515 }; 516 517 // Look for AVX2 lowering tricks for custom cases. 518 if (ST->hasAVX2()) 519 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 520 return LT.first * Entry->Cost; 521 522 static const CostTblEntry AVX1CostTable[] = { 523 // We don't have to scalarize unsupported ops. We can issue two half-sized 524 // operations and we only need to extract the upper YMM half. 525 // Two ops + 1 extract + 1 insert = 4. 526 { ISD::MUL, MVT::v16i16, 4 }, 527 { ISD::MUL, MVT::v8i32, 4 }, 528 { ISD::SUB, MVT::v32i8, 4 }, 529 { ISD::ADD, MVT::v32i8, 4 }, 530 { ISD::SUB, MVT::v16i16, 4 }, 531 { ISD::ADD, MVT::v16i16, 4 }, 532 { ISD::SUB, MVT::v8i32, 4 }, 533 { ISD::ADD, MVT::v8i32, 4 }, 534 { ISD::SUB, MVT::v4i64, 4 }, 535 { ISD::ADD, MVT::v4i64, 4 }, 536 537 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 538 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 539 // Because we believe v4i64 to be a legal type, we must also include the 540 // extract+insert in the cost table. Therefore, the cost here is 18 541 // instead of 8. 542 { ISD::MUL, MVT::v4i64, 18 }, 543 544 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 545 546 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 547 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 548 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 549 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 550 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 551 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 552 553 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 554 { ISD::SDIV, MVT::v32i8, 32*20 }, 555 { ISD::SDIV, MVT::v16i16, 16*20 }, 556 { ISD::SDIV, MVT::v8i32, 8*20 }, 557 { ISD::SDIV, MVT::v4i64, 4*20 }, 558 { ISD::UDIV, MVT::v32i8, 32*20 }, 559 { ISD::UDIV, MVT::v16i16, 16*20 }, 560 { ISD::UDIV, MVT::v8i32, 8*20 }, 561 { ISD::UDIV, MVT::v4i64, 4*20 }, 562 }; 563 564 if (ST->hasAVX()) 565 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 566 return LT.first * Entry->Cost; 567 568 static const CostTblEntry SSE42CostTable[] = { 569 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 570 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 571 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 572 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 573 }; 574 575 if (ST->hasSSE42()) 576 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 577 return LT.first * Entry->Cost; 578 579 static const CostTblEntry SSE41CostTable[] = { 580 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 581 { ISD::SHL, MVT::v32i8, 2*11 }, // pblendvb sequence. 582 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 583 { ISD::SHL, MVT::v16i16, 2*14 }, // pblendvb sequence. 584 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 585 { ISD::SHL, MVT::v8i32, 2*4 }, // pslld/paddd/cvttps2dq/pmulld 586 587 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 588 { ISD::SRL, MVT::v32i8, 2*12 }, // pblendvb sequence. 589 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 590 { ISD::SRL, MVT::v16i16, 2*14 }, // pblendvb sequence. 591 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 592 { ISD::SRL, MVT::v8i32, 2*11 }, // Shift each lane + blend. 593 594 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 595 { ISD::SRA, MVT::v32i8, 2*24 }, // pblendvb sequence. 596 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 597 { ISD::SRA, MVT::v16i16, 2*14 }, // pblendvb sequence. 598 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 599 { ISD::SRA, MVT::v8i32, 2*12 }, // Shift each lane + blend. 600 601 { ISD::MUL, MVT::v4i32, 1 } // pmulld 602 }; 603 604 if (ST->hasSSE41()) 605 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 606 return LT.first * Entry->Cost; 607 608 static const CostTblEntry SSE2CostTable[] = { 609 // We don't correctly identify costs of casts because they are marked as 610 // custom. 611 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 612 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 613 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 614 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 615 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 616 617 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 618 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 619 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 620 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 621 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 622 623 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 624 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 625 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 626 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 627 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. 628 629 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 630 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 631 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 632 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 633 634 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 635 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 636 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 637 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 638 639 // It is not a good idea to vectorize division. We have to scalarize it and 640 // in the process we will often end up having to spilling regular 641 // registers. The overhead of division is going to dominate most kernels 642 // anyways so try hard to prevent vectorization of division - it is 643 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 644 // to hide "20 cycles" for each lane. 645 { ISD::SDIV, MVT::v16i8, 16*20 }, 646 { ISD::SDIV, MVT::v8i16, 8*20 }, 647 { ISD::SDIV, MVT::v4i32, 4*20 }, 648 { ISD::SDIV, MVT::v2i64, 2*20 }, 649 { ISD::UDIV, MVT::v16i8, 16*20 }, 650 { ISD::UDIV, MVT::v8i16, 8*20 }, 651 { ISD::UDIV, MVT::v4i32, 4*20 }, 652 { ISD::UDIV, MVT::v2i64, 2*20 }, 653 }; 654 655 if (ST->hasSSE2()) 656 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 657 return LT.first * Entry->Cost; 658 659 static const CostTblEntry SSE1CostTable[] = { 660 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 661 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 662 }; 663 664 if (ST->hasSSE1()) 665 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 666 return LT.first * Entry->Cost; 667 668 // Fallback to the default implementation. 669 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 670 } 671 672 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 673 Type *SubTp) { 674 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 675 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 676 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 677 678 // For Broadcasts we are splatting the first element from the first input 679 // register, so only need to reference that input and all the output 680 // registers are the same. 681 if (Kind == TTI::SK_Broadcast) 682 LT.first = 1; 683 684 // We are going to permute multiple sources and the result will be in multiple 685 // destinations. Providing an accurate cost only for splits where the element 686 // type remains the same. 687 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 688 MVT LegalVT = LT.second; 689 if (LegalVT.getVectorElementType().getSizeInBits() == 690 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 691 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 692 693 unsigned VecTySize = DL.getTypeStoreSize(Tp); 694 unsigned LegalVTSize = LegalVT.getStoreSize(); 695 // Number of source vectors after legalization: 696 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 697 // Number of destination vectors after legalization: 698 unsigned NumOfDests = LT.first; 699 700 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 701 LegalVT.getVectorNumElements()); 702 703 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 704 return NumOfShuffles * 705 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 706 } 707 708 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 709 } 710 711 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 712 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 713 // We assume that source and destination have the same vector type. 714 int NumOfDests = LT.first; 715 int NumOfShufflesPerDest = LT.first * 2 - 1; 716 LT.first = NumOfDests * NumOfShufflesPerDest; 717 } 718 719 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 720 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 721 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 722 723 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 724 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 725 726 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 727 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 728 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 729 }; 730 731 if (ST->hasVBMI()) 732 if (const auto *Entry = 733 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 734 return LT.first * Entry->Cost; 735 736 static const CostTblEntry AVX512BWShuffleTbl[] = { 737 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 738 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 739 740 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 741 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 742 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 743 744 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 745 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 746 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 747 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 748 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 749 750 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 751 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 752 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 753 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 754 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 755 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 756 }; 757 758 if (ST->hasBWI()) 759 if (const auto *Entry = 760 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 761 return LT.first * Entry->Cost; 762 763 static const CostTblEntry AVX512ShuffleTbl[] = { 764 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 765 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 766 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 767 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 768 769 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 770 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 771 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 772 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 773 774 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 775 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 776 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 777 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 778 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 779 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 780 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 781 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 782 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 783 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 784 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 785 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 786 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 787 788 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 789 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 790 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 791 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 792 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 793 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 794 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 795 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 796 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 797 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 798 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 799 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 800 }; 801 802 if (ST->hasAVX512()) 803 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 804 return LT.first * Entry->Cost; 805 806 static const CostTblEntry AVX2ShuffleTbl[] = { 807 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 808 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 809 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 810 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 811 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 812 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 813 814 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 815 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 816 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 817 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 818 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 819 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 820 821 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw 822 { TTI::SK_Alternate, MVT::v32i8, 1 }, // vpblendvb 823 824 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 825 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 826 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2 * vpshufb 827 // + vpblendvb 828 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 } // vperm2i128 + 2 * vpshufb 829 // + vpblendvb 830 }; 831 832 if (ST->hasAVX2()) 833 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 834 return LT.first * Entry->Cost; 835 836 static const CostTblEntry AVX1ShuffleTbl[] = { 837 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 838 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 839 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 840 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 841 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 842 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 843 844 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 845 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 846 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 847 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 848 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 849 // + vinsertf128 850 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 851 // + vinsertf128 852 853 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd 854 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd 855 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps 856 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps 857 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor 858 { TTI::SK_Alternate, MVT::v32i8, 3 } // vpand + vpandn + vpor 859 }; 860 861 if (ST->hasAVX()) 862 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 863 return LT.first * Entry->Cost; 864 865 static const CostTblEntry SSE41ShuffleTbl[] = { 866 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw 867 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 868 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw 869 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps 870 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw 871 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb 872 }; 873 874 if (ST->hasSSE41()) 875 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 876 return LT.first * Entry->Cost; 877 878 static const CostTblEntry SSSE3ShuffleTbl[] = { 879 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 880 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 881 882 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 883 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 884 885 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pshufb + pshufb + por 886 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pshufb + pshufb + por 887 888 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb 889 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 } // pshufb 890 }; 891 892 if (ST->hasSSSE3()) 893 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 894 return LT.first * Entry->Cost; 895 896 static const CostTblEntry SSE2ShuffleTbl[] = { 897 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 898 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 899 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 900 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 901 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 902 903 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 904 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 905 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 906 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 907 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 908 // + 2*pshufd + 2*unpck + packus 909 910 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd 911 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 912 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps 913 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por 914 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pand + pandn + por 915 916 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd 917 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 } // pshufd 918 }; 919 920 if (ST->hasSSE2()) 921 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 922 return LT.first * Entry->Cost; 923 924 static const CostTblEntry SSE1ShuffleTbl[] = { 925 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 926 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 927 { TTI::SK_Alternate, MVT::v4f32, 2 } // 2*shufps 928 }; 929 930 if (ST->hasSSE1()) 931 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 932 return LT.first * Entry->Cost; 933 934 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 935 } 936 937 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 938 int ISD = TLI->InstructionOpcodeToISD(Opcode); 939 assert(ISD && "Invalid opcode"); 940 941 // FIXME: Need a better design of the cost table to handle non-simple types of 942 // potential massive combinations (elem_num x src_type x dst_type). 943 944 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 945 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 946 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 947 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 948 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 949 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 950 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 951 952 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 953 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 954 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 955 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 956 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 957 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 958 959 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 960 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 961 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 962 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 963 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 964 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 965 966 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 967 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 968 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 969 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 970 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 971 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 972 }; 973 974 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 975 // 256-bit wide vectors. 976 977 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 978 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 979 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 980 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 981 982 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 983 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 984 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 985 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 986 987 // v16i1 -> v16i32 - load + broadcast 988 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 989 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 990 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 991 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 992 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 993 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 994 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 995 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 996 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 997 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 998 999 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1000 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1001 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1002 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1003 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1004 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1005 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1006 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1007 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1008 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1009 1010 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1011 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1012 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1013 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1014 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1015 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1016 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1017 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1018 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1019 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1020 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1021 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1022 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1023 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1024 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1025 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1026 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1027 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1028 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1029 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1030 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1031 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 1032 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1033 1034 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1035 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1036 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1037 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1038 }; 1039 1040 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1041 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1042 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1043 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1044 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1045 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1046 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1047 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1048 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1049 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1050 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1051 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1052 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1053 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1054 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1055 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1056 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1057 1058 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1059 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1060 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1061 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1062 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1063 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1064 1065 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1066 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1067 1068 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1069 }; 1070 1071 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1072 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1073 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1074 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1075 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1076 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1077 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1078 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1079 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1080 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1081 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1082 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1083 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1084 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1085 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1086 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1087 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1088 1089 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1090 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1091 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1092 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1093 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1094 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1095 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1096 1097 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1098 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1099 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1100 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1101 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1102 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1103 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1104 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1105 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1106 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1107 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1108 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1109 1110 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1111 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1112 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1113 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1114 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1115 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1116 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1117 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1118 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1119 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1120 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1121 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1122 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1123 // The generic code to compute the scalar overhead is currently broken. 1124 // Workaround this limitation by estimating the scalarization overhead 1125 // here. We have roughly 10 instructions per scalar element. 1126 // Multiply that by the vector width. 1127 // FIXME: remove that when PR19268 is fixed. 1128 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 1129 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 1130 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1131 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1132 1133 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1134 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1135 // This node is expanded into scalarized operations but BasicTTI is overly 1136 // optimistic estimating its cost. It computes 3 per element (one 1137 // vector-extract, one scalar conversion and one vector-insert). The 1138 // problem is that the inserts form a read-modify-write chain so latency 1139 // should be factored in too. Inflating the cost per element by 1. 1140 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1141 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1142 1143 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1144 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1145 }; 1146 1147 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1148 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1149 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1150 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1151 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1152 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1153 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1154 1155 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1156 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1157 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1158 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1159 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1160 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1161 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1162 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1163 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1164 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1165 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1166 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1167 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1168 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1169 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1170 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1171 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1172 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1173 1174 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1175 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1176 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1177 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1178 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1179 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1180 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1181 1182 }; 1183 1184 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1185 // These are somewhat magic numbers justified by looking at the output of 1186 // Intel's IACA, running some kernels and making sure when we take 1187 // legalization into account the throughput will be overestimated. 1188 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1189 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1190 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1191 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1192 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1193 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1194 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1195 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1196 1197 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1198 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1199 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1200 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1201 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1202 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1203 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1204 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1205 1206 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1207 1208 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1209 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1210 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1211 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1212 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1213 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1214 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1215 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1216 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1217 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1218 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1219 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1220 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1221 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1222 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1223 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1224 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1225 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1226 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1227 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1228 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1229 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1230 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1231 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1232 1233 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1234 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1235 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1236 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1237 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1238 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1239 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1240 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1241 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1242 }; 1243 1244 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1245 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1246 1247 if (ST->hasSSE2() && !ST->hasAVX()) { 1248 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1249 LTDest.second, LTSrc.second)) 1250 return LTSrc.first * Entry->Cost; 1251 } 1252 1253 EVT SrcTy = TLI->getValueType(DL, Src); 1254 EVT DstTy = TLI->getValueType(DL, Dst); 1255 1256 // The function getSimpleVT only handles simple value types. 1257 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1258 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1259 1260 if (ST->hasDQI()) 1261 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1262 DstTy.getSimpleVT(), 1263 SrcTy.getSimpleVT())) 1264 return Entry->Cost; 1265 1266 if (ST->hasAVX512()) 1267 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1268 DstTy.getSimpleVT(), 1269 SrcTy.getSimpleVT())) 1270 return Entry->Cost; 1271 1272 if (ST->hasAVX2()) { 1273 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1274 DstTy.getSimpleVT(), 1275 SrcTy.getSimpleVT())) 1276 return Entry->Cost; 1277 } 1278 1279 if (ST->hasAVX()) { 1280 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1281 DstTy.getSimpleVT(), 1282 SrcTy.getSimpleVT())) 1283 return Entry->Cost; 1284 } 1285 1286 if (ST->hasSSE41()) { 1287 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1288 DstTy.getSimpleVT(), 1289 SrcTy.getSimpleVT())) 1290 return Entry->Cost; 1291 } 1292 1293 if (ST->hasSSE2()) { 1294 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1295 DstTy.getSimpleVT(), 1296 SrcTy.getSimpleVT())) 1297 return Entry->Cost; 1298 } 1299 1300 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1301 } 1302 1303 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 1304 // Legalize the type. 1305 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1306 1307 MVT MTy = LT.second; 1308 1309 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1310 assert(ISD && "Invalid opcode"); 1311 1312 static const CostTblEntry SSE2CostTbl[] = { 1313 { ISD::SETCC, MVT::v2i64, 8 }, 1314 { ISD::SETCC, MVT::v4i32, 1 }, 1315 { ISD::SETCC, MVT::v8i16, 1 }, 1316 { ISD::SETCC, MVT::v16i8, 1 }, 1317 }; 1318 1319 static const CostTblEntry SSE42CostTbl[] = { 1320 { ISD::SETCC, MVT::v2f64, 1 }, 1321 { ISD::SETCC, MVT::v4f32, 1 }, 1322 { ISD::SETCC, MVT::v2i64, 1 }, 1323 }; 1324 1325 static const CostTblEntry AVX1CostTbl[] = { 1326 { ISD::SETCC, MVT::v4f64, 1 }, 1327 { ISD::SETCC, MVT::v8f32, 1 }, 1328 // AVX1 does not support 8-wide integer compare. 1329 { ISD::SETCC, MVT::v4i64, 4 }, 1330 { ISD::SETCC, MVT::v8i32, 4 }, 1331 { ISD::SETCC, MVT::v16i16, 4 }, 1332 { ISD::SETCC, MVT::v32i8, 4 }, 1333 }; 1334 1335 static const CostTblEntry AVX2CostTbl[] = { 1336 { ISD::SETCC, MVT::v4i64, 1 }, 1337 { ISD::SETCC, MVT::v8i32, 1 }, 1338 { ISD::SETCC, MVT::v16i16, 1 }, 1339 { ISD::SETCC, MVT::v32i8, 1 }, 1340 }; 1341 1342 static const CostTblEntry AVX512CostTbl[] = { 1343 { ISD::SETCC, MVT::v8i64, 1 }, 1344 { ISD::SETCC, MVT::v16i32, 1 }, 1345 { ISD::SETCC, MVT::v8f64, 1 }, 1346 { ISD::SETCC, MVT::v16f32, 1 }, 1347 }; 1348 1349 if (ST->hasAVX512()) 1350 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1351 return LT.first * Entry->Cost; 1352 1353 if (ST->hasAVX2()) 1354 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1355 return LT.first * Entry->Cost; 1356 1357 if (ST->hasAVX()) 1358 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1359 return LT.first * Entry->Cost; 1360 1361 if (ST->hasSSE42()) 1362 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1363 return LT.first * Entry->Cost; 1364 1365 if (ST->hasSSE2()) 1366 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1367 return LT.first * Entry->Cost; 1368 1369 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 1370 } 1371 1372 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1373 ArrayRef<Type *> Tys, FastMathFlags FMF, 1374 unsigned ScalarizationCostPassed) { 1375 // Costs should match the codegen from: 1376 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1377 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1378 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1379 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1380 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1381 static const CostTblEntry XOPCostTbl[] = { 1382 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1383 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1384 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1385 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1386 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1387 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1388 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1389 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1390 { ISD::BITREVERSE, MVT::i64, 3 }, 1391 { ISD::BITREVERSE, MVT::i32, 3 }, 1392 { ISD::BITREVERSE, MVT::i16, 3 }, 1393 { ISD::BITREVERSE, MVT::i8, 3 } 1394 }; 1395 static const CostTblEntry AVX2CostTbl[] = { 1396 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1397 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1398 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1399 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1400 { ISD::BSWAP, MVT::v4i64, 1 }, 1401 { ISD::BSWAP, MVT::v8i32, 1 }, 1402 { ISD::BSWAP, MVT::v16i16, 1 }, 1403 { ISD::CTLZ, MVT::v4i64, 23 }, 1404 { ISD::CTLZ, MVT::v8i32, 18 }, 1405 { ISD::CTLZ, MVT::v16i16, 14 }, 1406 { ISD::CTLZ, MVT::v32i8, 9 }, 1407 { ISD::CTPOP, MVT::v4i64, 7 }, 1408 { ISD::CTPOP, MVT::v8i32, 11 }, 1409 { ISD::CTPOP, MVT::v16i16, 9 }, 1410 { ISD::CTPOP, MVT::v32i8, 6 }, 1411 { ISD::CTTZ, MVT::v4i64, 10 }, 1412 { ISD::CTTZ, MVT::v8i32, 14 }, 1413 { ISD::CTTZ, MVT::v16i16, 12 }, 1414 { ISD::CTTZ, MVT::v32i8, 9 }, 1415 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1416 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1417 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1418 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1419 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1420 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1421 }; 1422 static const CostTblEntry AVX1CostTbl[] = { 1423 { ISD::BITREVERSE, MVT::v4i64, 10 }, 1424 { ISD::BITREVERSE, MVT::v8i32, 10 }, 1425 { ISD::BITREVERSE, MVT::v16i16, 10 }, 1426 { ISD::BITREVERSE, MVT::v32i8, 10 }, 1427 { ISD::BSWAP, MVT::v4i64, 4 }, 1428 { ISD::BSWAP, MVT::v8i32, 4 }, 1429 { ISD::BSWAP, MVT::v16i16, 4 }, 1430 { ISD::CTLZ, MVT::v4i64, 46 }, 1431 { ISD::CTLZ, MVT::v8i32, 36 }, 1432 { ISD::CTLZ, MVT::v16i16, 28 }, 1433 { ISD::CTLZ, MVT::v32i8, 18 }, 1434 { ISD::CTPOP, MVT::v4i64, 14 }, 1435 { ISD::CTPOP, MVT::v8i32, 22 }, 1436 { ISD::CTPOP, MVT::v16i16, 18 }, 1437 { ISD::CTPOP, MVT::v32i8, 12 }, 1438 { ISD::CTTZ, MVT::v4i64, 20 }, 1439 { ISD::CTTZ, MVT::v8i32, 28 }, 1440 { ISD::CTTZ, MVT::v16i16, 24 }, 1441 { ISD::CTTZ, MVT::v32i8, 18 }, 1442 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1443 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1444 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1445 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1446 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1447 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1448 }; 1449 static const CostTblEntry SSE42CostTbl[] = { 1450 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1451 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1452 }; 1453 static const CostTblEntry SSSE3CostTbl[] = { 1454 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1455 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1456 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1457 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1458 { ISD::BSWAP, MVT::v2i64, 1 }, 1459 { ISD::BSWAP, MVT::v4i32, 1 }, 1460 { ISD::BSWAP, MVT::v8i16, 1 }, 1461 { ISD::CTLZ, MVT::v2i64, 23 }, 1462 { ISD::CTLZ, MVT::v4i32, 18 }, 1463 { ISD::CTLZ, MVT::v8i16, 14 }, 1464 { ISD::CTLZ, MVT::v16i8, 9 }, 1465 { ISD::CTPOP, MVT::v2i64, 7 }, 1466 { ISD::CTPOP, MVT::v4i32, 11 }, 1467 { ISD::CTPOP, MVT::v8i16, 9 }, 1468 { ISD::CTPOP, MVT::v16i8, 6 }, 1469 { ISD::CTTZ, MVT::v2i64, 10 }, 1470 { ISD::CTTZ, MVT::v4i32, 14 }, 1471 { ISD::CTTZ, MVT::v8i16, 12 }, 1472 { ISD::CTTZ, MVT::v16i8, 9 } 1473 }; 1474 static const CostTblEntry SSE2CostTbl[] = { 1475 { ISD::BITREVERSE, MVT::v2i64, 29 }, 1476 { ISD::BITREVERSE, MVT::v4i32, 27 }, 1477 { ISD::BITREVERSE, MVT::v8i16, 27 }, 1478 { ISD::BITREVERSE, MVT::v16i8, 20 }, 1479 { ISD::BSWAP, MVT::v2i64, 7 }, 1480 { ISD::BSWAP, MVT::v4i32, 7 }, 1481 { ISD::BSWAP, MVT::v8i16, 7 }, 1482 { ISD::CTLZ, MVT::v2i64, 25 }, 1483 { ISD::CTLZ, MVT::v4i32, 26 }, 1484 { ISD::CTLZ, MVT::v8i16, 20 }, 1485 { ISD::CTLZ, MVT::v16i8, 17 }, 1486 { ISD::CTPOP, MVT::v2i64, 12 }, 1487 { ISD::CTPOP, MVT::v4i32, 15 }, 1488 { ISD::CTPOP, MVT::v8i16, 13 }, 1489 { ISD::CTPOP, MVT::v16i8, 10 }, 1490 { ISD::CTTZ, MVT::v2i64, 14 }, 1491 { ISD::CTTZ, MVT::v4i32, 18 }, 1492 { ISD::CTTZ, MVT::v8i16, 16 }, 1493 { ISD::CTTZ, MVT::v16i8, 13 }, 1494 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1495 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1496 }; 1497 static const CostTblEntry SSE1CostTbl[] = { 1498 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1499 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1500 }; 1501 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1502 { ISD::BITREVERSE, MVT::i64, 14 } 1503 }; 1504 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1505 { ISD::BITREVERSE, MVT::i32, 14 }, 1506 { ISD::BITREVERSE, MVT::i16, 14 }, 1507 { ISD::BITREVERSE, MVT::i8, 11 } 1508 }; 1509 1510 unsigned ISD = ISD::DELETED_NODE; 1511 switch (IID) { 1512 default: 1513 break; 1514 case Intrinsic::bitreverse: 1515 ISD = ISD::BITREVERSE; 1516 break; 1517 case Intrinsic::bswap: 1518 ISD = ISD::BSWAP; 1519 break; 1520 case Intrinsic::ctlz: 1521 ISD = ISD::CTLZ; 1522 break; 1523 case Intrinsic::ctpop: 1524 ISD = ISD::CTPOP; 1525 break; 1526 case Intrinsic::cttz: 1527 ISD = ISD::CTTZ; 1528 break; 1529 case Intrinsic::sqrt: 1530 ISD = ISD::FSQRT; 1531 break; 1532 } 1533 1534 // Legalize the type. 1535 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1536 MVT MTy = LT.second; 1537 1538 // Attempt to lookup cost. 1539 if (ST->hasXOP()) 1540 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1541 return LT.first * Entry->Cost; 1542 1543 if (ST->hasAVX2()) 1544 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1545 return LT.first * Entry->Cost; 1546 1547 if (ST->hasAVX()) 1548 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1549 return LT.first * Entry->Cost; 1550 1551 if (ST->hasSSE42()) 1552 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1553 return LT.first * Entry->Cost; 1554 1555 if (ST->hasSSSE3()) 1556 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1557 return LT.first * Entry->Cost; 1558 1559 if (ST->hasSSE2()) 1560 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1561 return LT.first * Entry->Cost; 1562 1563 if (ST->hasSSE1()) 1564 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1565 return LT.first * Entry->Cost; 1566 1567 if (ST->is64Bit()) 1568 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 1569 return LT.first * Entry->Cost; 1570 1571 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 1572 return LT.first * Entry->Cost; 1573 1574 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed); 1575 } 1576 1577 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1578 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 1579 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF); 1580 } 1581 1582 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1583 assert(Val->isVectorTy() && "This must be a vector type"); 1584 1585 Type *ScalarType = Val->getScalarType(); 1586 1587 if (Index != -1U) { 1588 // Legalize the type. 1589 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1590 1591 // This type is legalized to a scalar type. 1592 if (!LT.second.isVector()) 1593 return 0; 1594 1595 // The type may be split. Normalize the index to the new type. 1596 unsigned Width = LT.second.getVectorNumElements(); 1597 Index = Index % Width; 1598 1599 // Floating point scalars are already located in index #0. 1600 if (ScalarType->isFloatingPointTy() && Index == 0) 1601 return 0; 1602 } 1603 1604 // Add to the base cost if we know that the extracted element of a vector is 1605 // destined to be moved to and used in the integer register file. 1606 int RegisterFileMoveCost = 0; 1607 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1608 RegisterFileMoveCost = 1; 1609 1610 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1611 } 1612 1613 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1614 unsigned AddressSpace) { 1615 // Handle non-power-of-two vectors such as <3 x float> 1616 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1617 unsigned NumElem = VTy->getVectorNumElements(); 1618 1619 // Handle a few common cases: 1620 // <3 x float> 1621 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1622 // Cost = 64 bit store + extract + 32 bit store. 1623 return 3; 1624 1625 // <3 x double> 1626 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1627 // Cost = 128 bit store + unpack + 64 bit store. 1628 return 3; 1629 1630 // Assume that all other non-power-of-two numbers are scalarized. 1631 if (!isPowerOf2_32(NumElem)) { 1632 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1633 AddressSpace); 1634 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1635 Opcode == Instruction::Store); 1636 return NumElem * Cost + SplitCost; 1637 } 1638 } 1639 1640 // Legalize the type. 1641 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1642 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1643 "Invalid Opcode"); 1644 1645 // Each load/store unit costs 1. 1646 int Cost = LT.first * 1; 1647 1648 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1649 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1650 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1651 Cost *= 2; 1652 1653 return Cost; 1654 } 1655 1656 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1657 unsigned Alignment, 1658 unsigned AddressSpace) { 1659 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1660 if (!SrcVTy) 1661 // To calculate scalar take the regular cost, without mask 1662 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1663 1664 unsigned NumElem = SrcVTy->getVectorNumElements(); 1665 VectorType *MaskTy = 1666 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1667 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1668 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1669 !isPowerOf2_32(NumElem)) { 1670 // Scalarization 1671 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1672 int ScalarCompareCost = getCmpSelInstrCost( 1673 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1674 int BranchCost = getCFInstrCost(Instruction::Br); 1675 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1676 1677 int ValueSplitCost = getScalarizationOverhead( 1678 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1679 int MemopCost = 1680 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1681 Alignment, AddressSpace); 1682 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1683 } 1684 1685 // Legalize the type. 1686 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1687 auto VT = TLI->getValueType(DL, SrcVTy); 1688 int Cost = 0; 1689 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1690 LT.second.getVectorNumElements() == NumElem) 1691 // Promotion requires expand/truncate for data and a shuffle for mask. 1692 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1693 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1694 1695 else if (LT.second.getVectorNumElements() > NumElem) { 1696 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1697 LT.second.getVectorNumElements()); 1698 // Expanding requires fill mask with zeroes 1699 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1700 } 1701 if (!ST->hasAVX512()) 1702 return Cost + LT.first*4; // Each maskmov costs 4 1703 1704 // AVX-512 masked load/store is cheapper 1705 return Cost+LT.first; 1706 } 1707 1708 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 1709 const SCEV *Ptr) { 1710 // Address computations in vectorized code with non-consecutive addresses will 1711 // likely result in more instructions compared to scalar code where the 1712 // computation can more often be merged into the index mode. The resulting 1713 // extra micro-ops can significantly decrease throughput. 1714 unsigned NumVectorInstToHideOverhead = 10; 1715 1716 // Cost modeling of Strided Access Computation is hidden by the indexing 1717 // modes of X86 regardless of the stride value. We dont believe that there 1718 // is a difference between constant strided access in gerenal and constant 1719 // strided value which is less than or equal to 64. 1720 // Even in the case of (loop invariant) stride whose value is not known at 1721 // compile time, the address computation will not incur more than one extra 1722 // ADD instruction. 1723 if (Ty->isVectorTy() && SE) { 1724 if (!BaseT::isStridedAccess(Ptr)) 1725 return NumVectorInstToHideOverhead; 1726 if (!BaseT::getConstantStrideStep(SE, Ptr)) 1727 return 1; 1728 } 1729 1730 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1731 } 1732 1733 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 1734 bool IsPairwise) { 1735 1736 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1737 1738 MVT MTy = LT.second; 1739 1740 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1741 assert(ISD && "Invalid opcode"); 1742 1743 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 1744 // and make it as the cost. 1745 1746 static const CostTblEntry SSE42CostTblPairWise[] = { 1747 { ISD::FADD, MVT::v2f64, 2 }, 1748 { ISD::FADD, MVT::v4f32, 4 }, 1749 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1750 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1751 { ISD::ADD, MVT::v8i16, 5 }, 1752 }; 1753 1754 static const CostTblEntry AVX1CostTblPairWise[] = { 1755 { ISD::FADD, MVT::v4f32, 4 }, 1756 { ISD::FADD, MVT::v4f64, 5 }, 1757 { ISD::FADD, MVT::v8f32, 7 }, 1758 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1759 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1760 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 1761 { ISD::ADD, MVT::v8i16, 5 }, 1762 { ISD::ADD, MVT::v8i32, 5 }, 1763 }; 1764 1765 static const CostTblEntry SSE42CostTblNoPairWise[] = { 1766 { ISD::FADD, MVT::v2f64, 2 }, 1767 { ISD::FADD, MVT::v4f32, 4 }, 1768 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1769 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 1770 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 1771 }; 1772 1773 static const CostTblEntry AVX1CostTblNoPairWise[] = { 1774 { ISD::FADD, MVT::v4f32, 3 }, 1775 { ISD::FADD, MVT::v4f64, 3 }, 1776 { ISD::FADD, MVT::v8f32, 4 }, 1777 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1778 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 1779 { ISD::ADD, MVT::v4i64, 3 }, 1780 { ISD::ADD, MVT::v8i16, 4 }, 1781 { ISD::ADD, MVT::v8i32, 5 }, 1782 }; 1783 1784 if (IsPairwise) { 1785 if (ST->hasAVX()) 1786 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 1787 return LT.first * Entry->Cost; 1788 1789 if (ST->hasSSE42()) 1790 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 1791 return LT.first * Entry->Cost; 1792 } else { 1793 if (ST->hasAVX()) 1794 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 1795 return LT.first * Entry->Cost; 1796 1797 if (ST->hasSSE42()) 1798 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 1799 return LT.first * Entry->Cost; 1800 } 1801 1802 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1803 } 1804 1805 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1806 /// method might only calculate a fraction of a larger immediate. Therefore it 1807 /// is valid to return a cost of ZERO. 1808 int X86TTIImpl::getIntImmCost(int64_t Val) { 1809 if (Val == 0) 1810 return TTI::TCC_Free; 1811 1812 if (isInt<32>(Val)) 1813 return TTI::TCC_Basic; 1814 1815 return 2 * TTI::TCC_Basic; 1816 } 1817 1818 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1819 assert(Ty->isIntegerTy()); 1820 1821 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1822 if (BitSize == 0) 1823 return ~0U; 1824 1825 // Never hoist constants larger than 128bit, because this might lead to 1826 // incorrect code generation or assertions in codegen. 1827 // Fixme: Create a cost model for types larger than i128 once the codegen 1828 // issues have been fixed. 1829 if (BitSize > 128) 1830 return TTI::TCC_Free; 1831 1832 if (Imm == 0) 1833 return TTI::TCC_Free; 1834 1835 // Sign-extend all constants to a multiple of 64-bit. 1836 APInt ImmVal = Imm; 1837 if (BitSize & 0x3f) 1838 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1839 1840 // Split the constant into 64-bit chunks and calculate the cost for each 1841 // chunk. 1842 int Cost = 0; 1843 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1844 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1845 int64_t Val = Tmp.getSExtValue(); 1846 Cost += getIntImmCost(Val); 1847 } 1848 // We need at least one instruction to materialize the constant. 1849 return std::max(1, Cost); 1850 } 1851 1852 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1853 Type *Ty) { 1854 assert(Ty->isIntegerTy()); 1855 1856 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1857 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1858 // here, so that constant hoisting will ignore this constant. 1859 if (BitSize == 0) 1860 return TTI::TCC_Free; 1861 1862 unsigned ImmIdx = ~0U; 1863 switch (Opcode) { 1864 default: 1865 return TTI::TCC_Free; 1866 case Instruction::GetElementPtr: 1867 // Always hoist the base address of a GetElementPtr. This prevents the 1868 // creation of new constants for every base constant that gets constant 1869 // folded with the offset. 1870 if (Idx == 0) 1871 return 2 * TTI::TCC_Basic; 1872 return TTI::TCC_Free; 1873 case Instruction::Store: 1874 ImmIdx = 0; 1875 break; 1876 case Instruction::ICmp: 1877 // This is an imperfect hack to prevent constant hoisting of 1878 // compares that might be trying to check if a 64-bit value fits in 1879 // 32-bits. The backend can optimize these cases using a right shift by 32. 1880 // Ideally we would check the compare predicate here. There also other 1881 // similar immediates the backend can use shifts for. 1882 if (Idx == 1 && Imm.getBitWidth() == 64) { 1883 uint64_t ImmVal = Imm.getZExtValue(); 1884 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 1885 return TTI::TCC_Free; 1886 } 1887 ImmIdx = 1; 1888 break; 1889 case Instruction::And: 1890 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 1891 // by using a 32-bit operation with implicit zero extension. Detect such 1892 // immediates here as the normal path expects bit 31 to be sign extended. 1893 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 1894 return TTI::TCC_Free; 1895 LLVM_FALLTHROUGH; 1896 case Instruction::Add: 1897 case Instruction::Sub: 1898 case Instruction::Mul: 1899 case Instruction::UDiv: 1900 case Instruction::SDiv: 1901 case Instruction::URem: 1902 case Instruction::SRem: 1903 case Instruction::Or: 1904 case Instruction::Xor: 1905 ImmIdx = 1; 1906 break; 1907 // Always return TCC_Free for the shift value of a shift instruction. 1908 case Instruction::Shl: 1909 case Instruction::LShr: 1910 case Instruction::AShr: 1911 if (Idx == 1) 1912 return TTI::TCC_Free; 1913 break; 1914 case Instruction::Trunc: 1915 case Instruction::ZExt: 1916 case Instruction::SExt: 1917 case Instruction::IntToPtr: 1918 case Instruction::PtrToInt: 1919 case Instruction::BitCast: 1920 case Instruction::PHI: 1921 case Instruction::Call: 1922 case Instruction::Select: 1923 case Instruction::Ret: 1924 case Instruction::Load: 1925 break; 1926 } 1927 1928 if (Idx == ImmIdx) { 1929 int NumConstants = (BitSize + 63) / 64; 1930 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1931 return (Cost <= NumConstants * TTI::TCC_Basic) 1932 ? static_cast<int>(TTI::TCC_Free) 1933 : Cost; 1934 } 1935 1936 return X86TTIImpl::getIntImmCost(Imm, Ty); 1937 } 1938 1939 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1940 Type *Ty) { 1941 assert(Ty->isIntegerTy()); 1942 1943 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1944 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1945 // here, so that constant hoisting will ignore this constant. 1946 if (BitSize == 0) 1947 return TTI::TCC_Free; 1948 1949 switch (IID) { 1950 default: 1951 return TTI::TCC_Free; 1952 case Intrinsic::sadd_with_overflow: 1953 case Intrinsic::uadd_with_overflow: 1954 case Intrinsic::ssub_with_overflow: 1955 case Intrinsic::usub_with_overflow: 1956 case Intrinsic::smul_with_overflow: 1957 case Intrinsic::umul_with_overflow: 1958 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1959 return TTI::TCC_Free; 1960 break; 1961 case Intrinsic::experimental_stackmap: 1962 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1963 return TTI::TCC_Free; 1964 break; 1965 case Intrinsic::experimental_patchpoint_void: 1966 case Intrinsic::experimental_patchpoint_i64: 1967 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1968 return TTI::TCC_Free; 1969 break; 1970 } 1971 return X86TTIImpl::getIntImmCost(Imm, Ty); 1972 } 1973 1974 // Return an average cost of Gather / Scatter instruction, maybe improved later 1975 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 1976 unsigned Alignment, unsigned AddressSpace) { 1977 1978 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 1979 unsigned VF = SrcVTy->getVectorNumElements(); 1980 1981 // Try to reduce index size from 64 bit (default for GEP) 1982 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 1983 // operation will use 16 x 64 indices which do not fit in a zmm and needs 1984 // to split. Also check that the base pointer is the same for all lanes, 1985 // and that there's at most one variable index. 1986 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 1987 unsigned IndexSize = DL.getPointerSizeInBits(); 1988 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 1989 if (IndexSize < 64 || !GEP) 1990 return IndexSize; 1991 1992 unsigned NumOfVarIndices = 0; 1993 Value *Ptrs = GEP->getPointerOperand(); 1994 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 1995 return IndexSize; 1996 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 1997 if (isa<Constant>(GEP->getOperand(i))) 1998 continue; 1999 Type *IndxTy = GEP->getOperand(i)->getType(); 2000 if (IndxTy->isVectorTy()) 2001 IndxTy = IndxTy->getVectorElementType(); 2002 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 2003 !isa<SExtInst>(GEP->getOperand(i))) || 2004 ++NumOfVarIndices > 1) 2005 return IndexSize; // 64 2006 } 2007 return (unsigned)32; 2008 }; 2009 2010 2011 // Trying to reduce IndexSize to 32 bits for vector 16. 2012 // By default the IndexSize is equal to pointer size. 2013 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) : 2014 DL.getPointerSizeInBits(); 2015 2016 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 2017 IndexSize), VF); 2018 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2019 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2020 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2021 if (SplitFactor > 1) { 2022 // Handle splitting of vector of pointers 2023 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2024 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2025 AddressSpace); 2026 } 2027 2028 // The gather / scatter cost is given by Intel architects. It is a rough 2029 // number since we are looking at one instruction in a time. 2030 const int GSOverhead = 2; 2031 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2032 Alignment, AddressSpace); 2033 } 2034 2035 /// Return the cost of full scalarization of gather / scatter operation. 2036 /// 2037 /// Opcode - Load or Store instruction. 2038 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2039 /// VariableMask - The mask is non-constant at compile time. 2040 /// Alignment - Alignment for one element. 2041 /// AddressSpace - pointer[s] address space. 2042 /// 2043 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2044 bool VariableMask, unsigned Alignment, 2045 unsigned AddressSpace) { 2046 unsigned VF = SrcVTy->getVectorNumElements(); 2047 2048 int MaskUnpackCost = 0; 2049 if (VariableMask) { 2050 VectorType *MaskTy = 2051 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2052 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2053 int ScalarCompareCost = 2054 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2055 nullptr); 2056 int BranchCost = getCFInstrCost(Instruction::Br); 2057 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2058 } 2059 2060 // The cost of the scalar loads/stores. 2061 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2062 Alignment, AddressSpace); 2063 2064 int InsertExtractCost = 0; 2065 if (Opcode == Instruction::Load) 2066 for (unsigned i = 0; i < VF; ++i) 2067 // Add the cost of inserting each scalar load into the vector 2068 InsertExtractCost += 2069 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2070 else 2071 for (unsigned i = 0; i < VF; ++i) 2072 // Add the cost of extracting each element out of the data vector 2073 InsertExtractCost += 2074 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2075 2076 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2077 } 2078 2079 /// Calculate the cost of Gather / Scatter operation 2080 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2081 Value *Ptr, bool VariableMask, 2082 unsigned Alignment) { 2083 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2084 unsigned VF = SrcVTy->getVectorNumElements(); 2085 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2086 if (!PtrTy && Ptr->getType()->isVectorTy()) 2087 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2088 assert(PtrTy && "Unexpected type for Ptr argument"); 2089 unsigned AddressSpace = PtrTy->getAddressSpace(); 2090 2091 bool Scalarize = false; 2092 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2093 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2094 Scalarize = true; 2095 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2096 // Vector-4 of gather/scatter instruction does not exist on KNL. 2097 // We can extend it to 8 elements, but zeroing upper bits of 2098 // the mask vector will add more instructions. Right now we give the scalar 2099 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2100 // is better in the VariableMask case. 2101 if (VF == 2 || (VF == 4 && !ST->hasVLX())) 2102 Scalarize = true; 2103 2104 if (Scalarize) 2105 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2106 AddressSpace); 2107 2108 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2109 } 2110 2111 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2112 Type *ScalarTy = DataTy->getScalarType(); 2113 int DataWidth = isa<PointerType>(ScalarTy) ? 2114 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2115 2116 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2117 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2118 } 2119 2120 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2121 return isLegalMaskedLoad(DataType); 2122 } 2123 2124 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2125 // This function is called now in two cases: from the Loop Vectorizer 2126 // and from the Scalarizer. 2127 // When the Loop Vectorizer asks about legality of the feature, 2128 // the vectorization factor is not calculated yet. The Loop Vectorizer 2129 // sends a scalar type and the decision is based on the width of the 2130 // scalar element. 2131 // Later on, the cost model will estimate usage this intrinsic based on 2132 // the vector type. 2133 // The Scalarizer asks again about legality. It sends a vector type. 2134 // In this case we can reject non-power-of-2 vectors. 2135 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements())) 2136 return false; 2137 Type *ScalarTy = DataTy->getScalarType(); 2138 int DataWidth = isa<PointerType>(ScalarTy) ? 2139 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2140 2141 // AVX-512 allows gather and scatter 2142 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512(); 2143 } 2144 2145 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2146 return isLegalMaskedGather(DataType); 2147 } 2148 2149 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2150 const Function *Callee) const { 2151 const TargetMachine &TM = getTLI()->getTargetMachine(); 2152 2153 // Work this as a subsetting of subtarget features. 2154 const FeatureBitset &CallerBits = 2155 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2156 const FeatureBitset &CalleeBits = 2157 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2158 2159 // FIXME: This is likely too limiting as it will include subtarget features 2160 // that we might not care about for inlining, but it is conservatively 2161 // correct. 2162 return (CallerBits & CalleeBits) == CalleeBits; 2163 } 2164 2165 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2166 // TODO: We expect this to be beneficial regardless of arch, 2167 // but there are currently some unexplained performance artifacts on Atom. 2168 // As a temporary solution, disable on Atom. 2169 return !(ST->isAtom()); 2170 } 2171 2172 // Get estimation for interleaved load/store operations and strided load. 2173 // \p Indices contains indices for strided load. 2174 // \p Factor - the factor of interleaving. 2175 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2176 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2177 unsigned Factor, 2178 ArrayRef<unsigned> Indices, 2179 unsigned Alignment, 2180 unsigned AddressSpace) { 2181 2182 // VecTy for interleave memop is <VF*Factor x Elt>. 2183 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2184 // VecTy = <12 x i32>. 2185 2186 // Calculate the number of memory operations (NumOfMemOps), required 2187 // for load/store the VecTy. 2188 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2189 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2190 unsigned LegalVTSize = LegalVT.getStoreSize(); 2191 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2192 2193 // Get the cost of one memory operation. 2194 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2195 LegalVT.getVectorNumElements()); 2196 unsigned MemOpCost = 2197 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2198 2199 if (Opcode == Instruction::Load) { 2200 // Kind of shuffle depends on number of loaded values. 2201 // If we load the entire data in one register, we can use a 1-src shuffle. 2202 // Otherwise, we'll merge 2 sources in each operation. 2203 TTI::ShuffleKind ShuffleKind = 2204 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2205 2206 unsigned ShuffleCost = 2207 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2208 2209 unsigned NumOfLoadsInInterleaveGrp = 2210 Indices.size() ? Indices.size() : Factor; 2211 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2212 VecTy->getVectorNumElements() / Factor); 2213 unsigned NumOfResults = 2214 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2215 NumOfLoadsInInterleaveGrp; 2216 2217 // About a half of the loads may be folded in shuffles when we have only 2218 // one result. If we have more than one result, we do not fold loads at all. 2219 unsigned NumOfUnfoldedLoads = 2220 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2221 2222 // Get a number of shuffle operations per result. 2223 unsigned NumOfShufflesPerResult = 2224 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2225 2226 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2227 // When we have more than one destination, we need additional instructions 2228 // to keep sources. 2229 unsigned NumOfMoves = 0; 2230 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2231 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2232 2233 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2234 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2235 2236 return Cost; 2237 } 2238 2239 // Store. 2240 assert(Opcode == Instruction::Store && 2241 "Expected Store Instruction at this point"); 2242 2243 // There is no strided stores meanwhile. And store can't be folded in 2244 // shuffle. 2245 unsigned NumOfSources = Factor; // The number of values to be merged. 2246 unsigned ShuffleCost = 2247 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 2248 unsigned NumOfShufflesPerStore = NumOfSources - 1; 2249 2250 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2251 // We need additional instructions to keep sources. 2252 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 2253 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 2254 NumOfMoves; 2255 return Cost; 2256 } 2257 2258 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 2259 unsigned Factor, 2260 ArrayRef<unsigned> Indices, 2261 unsigned Alignment, 2262 unsigned AddressSpace) { 2263 auto isSupportedOnAVX512 = [](Type *VecTy, bool &RequiresBW) { 2264 RequiresBW = false; 2265 Type *EltTy = VecTy->getVectorElementType(); 2266 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 2267 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 2268 return true; 2269 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) { 2270 RequiresBW = true; 2271 return true; 2272 } 2273 return false; 2274 }; 2275 bool RequiresBW; 2276 bool HasAVX512Solution = isSupportedOnAVX512(VecTy, RequiresBW); 2277 if (ST->hasAVX512() && HasAVX512Solution && (!RequiresBW || ST->hasBWI())) 2278 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 2279 Alignment, AddressSpace); 2280 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2281 Alignment, AddressSpace); 2282 } 2283