1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/CodeGen/CostTable.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/Support/Debug.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 70 TargetTransformInfo::CacheLevel Level) const { 71 switch (Level) { 72 case TargetTransformInfo::CacheLevel::L1D: 73 // - Penryn 74 // - Nehalem 75 // - Westmere 76 // - Sandy Bridge 77 // - Ivy Bridge 78 // - Haswell 79 // - Broadwell 80 // - Skylake 81 // - Kabylake 82 return 32 * 1024; // 32 KByte 83 case TargetTransformInfo::CacheLevel::L2D: 84 // - Penryn 85 // - Nehalem 86 // - Westmere 87 // - Sandy Bridge 88 // - Ivy Bridge 89 // - Haswell 90 // - Broadwell 91 // - Skylake 92 // - Kabylake 93 return 256 * 1024; // 256 KByte 94 } 95 96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 97 } 98 99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 100 TargetTransformInfo::CacheLevel Level) const { 101 // - Penryn 102 // - Nehalem 103 // - Westmere 104 // - Sandy Bridge 105 // - Ivy Bridge 106 // - Haswell 107 // - Broadwell 108 // - Skylake 109 // - Kabylake 110 switch (Level) { 111 case TargetTransformInfo::CacheLevel::L1D: 112 LLVM_FALLTHROUGH; 113 case TargetTransformInfo::CacheLevel::L2D: 114 return 8; 115 } 116 117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 118 } 119 120 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const { 133 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 134 if (Vector) { 135 if (ST->hasAVX512() && PreferVectorWidth >= 512) 136 return 512; 137 if (ST->hasAVX() && PreferVectorWidth >= 256) 138 return 256; 139 if (ST->hasSSE1() && PreferVectorWidth >= 128) 140 return 128; 141 return 0; 142 } 143 144 if (ST->is64Bit()) 145 return 64; 146 147 return 32; 148 } 149 150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 151 return getRegisterBitWidth(true); 152 } 153 154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 155 // If the loop will not be vectorized, don't interleave the loop. 156 // Let regular unroll to unroll the loop, which saves the overflow 157 // check and memory check cost. 158 if (VF == 1) 159 return 1; 160 161 if (ST->isAtom()) 162 return 1; 163 164 // Sandybridge and Haswell have multiple execution ports and pipelined 165 // vector units. 166 if (ST->hasAVX()) 167 return 4; 168 169 return 2; 170 } 171 172 int X86TTIImpl::getArithmeticInstrCost( 173 unsigned Opcode, Type *Ty, 174 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 175 TTI::OperandValueProperties Opd1PropInfo, 176 TTI::OperandValueProperties Opd2PropInfo, 177 ArrayRef<const Value *> Args) { 178 // Legalize the type. 179 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 180 181 int ISD = TLI->InstructionOpcodeToISD(Opcode); 182 assert(ISD && "Invalid opcode"); 183 184 static const CostTblEntry GLMCostTable[] = { 185 { ISD::FDIV, MVT::f32, 18 }, // divss 186 { ISD::FDIV, MVT::v4f32, 35 }, // divps 187 { ISD::FDIV, MVT::f64, 33 }, // divsd 188 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 189 }; 190 191 if (ST->isGLM()) 192 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 193 LT.second)) 194 return LT.first * Entry->Cost; 195 196 static const CostTblEntry SLMCostTable[] = { 197 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 198 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 199 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 200 { ISD::FMUL, MVT::f64, 2 }, // mulsd 201 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 202 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 203 { ISD::FDIV, MVT::f32, 17 }, // divss 204 { ISD::FDIV, MVT::v4f32, 39 }, // divps 205 { ISD::FDIV, MVT::f64, 32 }, // divsd 206 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 207 { ISD::FADD, MVT::v2f64, 2 }, // addpd 208 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 209 // v2i64/v4i64 mul is custom lowered as a series of long: 210 // multiplies(3), shifts(3) and adds(2) 211 // slm muldq version throughput is 2 and addq throughput 4 212 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 213 // 3X4 (addq throughput) = 17 214 { ISD::MUL, MVT::v2i64, 17 }, 215 // slm addq\subq throughput is 4 216 { ISD::ADD, MVT::v2i64, 4 }, 217 { ISD::SUB, MVT::v2i64, 4 }, 218 }; 219 220 if (ST->isSLM()) { 221 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 222 // Check if the operands can be shrinked into a smaller datatype. 223 bool Op1Signed = false; 224 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 225 bool Op2Signed = false; 226 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 227 228 bool signedMode = Op1Signed | Op2Signed; 229 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 230 231 if (OpMinSize <= 7) 232 return LT.first * 3; // pmullw/sext 233 if (!signedMode && OpMinSize <= 8) 234 return LT.first * 3; // pmullw/zext 235 if (OpMinSize <= 15) 236 return LT.first * 5; // pmullw/pmulhw/pshuf 237 if (!signedMode && OpMinSize <= 16) 238 return LT.first * 5; // pmullw/pmulhw/pshuf 239 } 240 241 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 242 LT.second)) { 243 return LT.first * Entry->Cost; 244 } 245 } 246 247 if (ISD == ISD::SDIV && 248 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 249 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 250 // On X86, vector signed division by constants power-of-two are 251 // normally expanded to the sequence SRA + SRL + ADD + SRA. 252 // The OperandValue properties many not be same as that of previous 253 // operation;conservatively assume OP_None. 254 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 255 Op2Info, TargetTransformInfo::OP_None, 256 TargetTransformInfo::OP_None); 257 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 258 TargetTransformInfo::OP_None, 259 TargetTransformInfo::OP_None); 260 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 261 TargetTransformInfo::OP_None, 262 TargetTransformInfo::OP_None); 263 264 return Cost; 265 } 266 267 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 268 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 269 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 270 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 271 272 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 273 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 274 }; 275 276 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 277 ST->hasBWI()) { 278 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 279 LT.second)) 280 return LT.first * Entry->Cost; 281 } 282 283 static const CostTblEntry AVX512UniformConstCostTable[] = { 284 { ISD::SRA, MVT::v2i64, 1 }, 285 { ISD::SRA, MVT::v4i64, 1 }, 286 { ISD::SRA, MVT::v8i64, 1 }, 287 288 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 289 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 290 }; 291 292 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 293 ST->hasAVX512()) { 294 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 295 LT.second)) 296 return LT.first * Entry->Cost; 297 } 298 299 static const CostTblEntry AVX2UniformConstCostTable[] = { 300 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 301 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 302 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 303 304 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 305 306 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 307 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 308 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 309 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 310 }; 311 312 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 313 ST->hasAVX2()) { 314 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 315 LT.second)) 316 return LT.first * Entry->Cost; 317 } 318 319 static const CostTblEntry SSE2UniformConstCostTable[] = { 320 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 321 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 322 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 323 324 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 325 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 326 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 327 328 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 329 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 330 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 331 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 332 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 333 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 334 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 335 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 336 }; 337 338 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 339 ST->hasSSE2()) { 340 // pmuldq sequence. 341 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 342 return LT.first * 32; 343 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 344 return LT.first * 15; 345 346 // XOP has faster vXi8 shifts. 347 if ((ISD != ISD::SHL && ISD != ISD::SRL && ISD != ISD::SRA) || 348 !ST->hasXOP()) 349 if (const auto *Entry = 350 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 351 return LT.first * Entry->Cost; 352 } 353 354 static const CostTblEntry AVX2UniformCostTable[] = { 355 // Uniform splats are cheaper for the following instructions. 356 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 357 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 358 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 359 }; 360 361 if (ST->hasAVX2() && 362 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 363 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 364 if (const auto *Entry = 365 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 366 return LT.first * Entry->Cost; 367 } 368 369 static const CostTblEntry SSE2UniformCostTable[] = { 370 // Uniform splats are cheaper for the following instructions. 371 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 372 { ISD::SHL, MVT::v4i32, 1 }, // pslld 373 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 374 375 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 376 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 377 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 378 379 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 380 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 381 }; 382 383 if (ST->hasSSE2() && 384 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 385 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 386 if (const auto *Entry = 387 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 388 return LT.first * Entry->Cost; 389 } 390 391 static const CostTblEntry AVX512DQCostTable[] = { 392 { ISD::MUL, MVT::v2i64, 1 }, 393 { ISD::MUL, MVT::v4i64, 1 }, 394 { ISD::MUL, MVT::v8i64, 1 } 395 }; 396 397 // Look for AVX512DQ lowering tricks for custom cases. 398 if (ST->hasDQI()) 399 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 400 return LT.first * Entry->Cost; 401 402 static const CostTblEntry AVX512BWCostTable[] = { 403 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 404 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 405 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 406 407 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 408 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 409 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 410 411 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 412 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 413 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 414 415 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 416 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 417 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 418 419 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 420 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 421 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 422 }; 423 424 // Look for AVX512BW lowering tricks for custom cases. 425 if (ST->hasBWI()) 426 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 427 return LT.first * Entry->Cost; 428 429 static const CostTblEntry AVX512CostTable[] = { 430 { ISD::SHL, MVT::v16i32, 1 }, 431 { ISD::SRL, MVT::v16i32, 1 }, 432 { ISD::SRA, MVT::v16i32, 1 }, 433 434 { ISD::SHL, MVT::v8i64, 1 }, 435 { ISD::SRL, MVT::v8i64, 1 }, 436 437 { ISD::SRA, MVT::v2i64, 1 }, 438 { ISD::SRA, MVT::v4i64, 1 }, 439 { ISD::SRA, MVT::v8i64, 1 }, 440 441 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 442 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 443 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 444 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 445 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 446 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 447 448 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 449 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 450 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 451 452 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 453 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 454 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 455 }; 456 457 if (ST->hasAVX512()) 458 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 459 return LT.first * Entry->Cost; 460 461 static const CostTblEntry AVX2ShiftCostTable[] = { 462 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 463 // customize them to detect the cases where shift amount is a scalar one. 464 { ISD::SHL, MVT::v4i32, 1 }, 465 { ISD::SRL, MVT::v4i32, 1 }, 466 { ISD::SRA, MVT::v4i32, 1 }, 467 { ISD::SHL, MVT::v8i32, 1 }, 468 { ISD::SRL, MVT::v8i32, 1 }, 469 { ISD::SRA, MVT::v8i32, 1 }, 470 { ISD::SHL, MVT::v2i64, 1 }, 471 { ISD::SRL, MVT::v2i64, 1 }, 472 { ISD::SHL, MVT::v4i64, 1 }, 473 { ISD::SRL, MVT::v4i64, 1 }, 474 }; 475 476 // Look for AVX2 lowering tricks. 477 if (ST->hasAVX2()) { 478 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 479 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 480 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 481 // On AVX2, a packed v16i16 shift left by a constant build_vector 482 // is lowered into a vector multiply (vpmullw). 483 return getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info, 484 TargetTransformInfo::OP_None, 485 TargetTransformInfo::OP_None); 486 487 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 488 return LT.first * Entry->Cost; 489 } 490 491 static const CostTblEntry XOPShiftCostTable[] = { 492 // 128bit shifts take 1cy, but right shifts require negation beforehand. 493 { ISD::SHL, MVT::v16i8, 1 }, 494 { ISD::SRL, MVT::v16i8, 2 }, 495 { ISD::SRA, MVT::v16i8, 2 }, 496 { ISD::SHL, MVT::v8i16, 1 }, 497 { ISD::SRL, MVT::v8i16, 2 }, 498 { ISD::SRA, MVT::v8i16, 2 }, 499 { ISD::SHL, MVT::v4i32, 1 }, 500 { ISD::SRL, MVT::v4i32, 2 }, 501 { ISD::SRA, MVT::v4i32, 2 }, 502 { ISD::SHL, MVT::v2i64, 1 }, 503 { ISD::SRL, MVT::v2i64, 2 }, 504 { ISD::SRA, MVT::v2i64, 2 }, 505 // 256bit shifts require splitting if AVX2 didn't catch them above. 506 { ISD::SHL, MVT::v32i8, 2+2 }, 507 { ISD::SRL, MVT::v32i8, 4+2 }, 508 { ISD::SRA, MVT::v32i8, 4+2 }, 509 { ISD::SHL, MVT::v16i16, 2+2 }, 510 { ISD::SRL, MVT::v16i16, 4+2 }, 511 { ISD::SRA, MVT::v16i16, 4+2 }, 512 { ISD::SHL, MVT::v8i32, 2+2 }, 513 { ISD::SRL, MVT::v8i32, 4+2 }, 514 { ISD::SRA, MVT::v8i32, 4+2 }, 515 { ISD::SHL, MVT::v4i64, 2+2 }, 516 { ISD::SRL, MVT::v4i64, 4+2 }, 517 { ISD::SRA, MVT::v4i64, 4+2 }, 518 }; 519 520 // Look for XOP lowering tricks. 521 if (ST->hasXOP()) 522 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 523 return LT.first * Entry->Cost; 524 525 static const CostTblEntry SSE2UniformShiftCostTable[] = { 526 // Uniform splats are cheaper for the following instructions. 527 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 528 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 529 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 530 531 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 532 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 533 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 534 535 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 536 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 537 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 538 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 539 }; 540 541 if (ST->hasSSE2() && 542 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 543 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 544 545 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 546 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 547 return LT.first * 4; // 2*psrad + shuffle. 548 549 if (const auto *Entry = 550 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 551 return LT.first * Entry->Cost; 552 } 553 554 if (ISD == ISD::SHL && 555 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 556 MVT VT = LT.second; 557 // Vector shift left by non uniform constant can be lowered 558 // into vector multiply. 559 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 560 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 561 ISD = ISD::MUL; 562 } 563 564 static const CostTblEntry AVX2CostTable[] = { 565 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 566 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 567 568 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 569 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 570 571 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 572 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 573 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 574 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 575 576 { ISD::SUB, MVT::v32i8, 1 }, // psubb 577 { ISD::ADD, MVT::v32i8, 1 }, // paddb 578 { ISD::SUB, MVT::v16i16, 1 }, // psubw 579 { ISD::ADD, MVT::v16i16, 1 }, // paddw 580 { ISD::SUB, MVT::v8i32, 1 }, // psubd 581 { ISD::ADD, MVT::v8i32, 1 }, // paddd 582 { ISD::SUB, MVT::v4i64, 1 }, // psubq 583 { ISD::ADD, MVT::v4i64, 1 }, // paddq 584 585 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 586 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 587 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 588 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 589 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 590 591 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 592 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 593 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 594 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 595 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 596 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 597 598 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 599 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 600 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 601 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 602 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 603 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 604 }; 605 606 // Look for AVX2 lowering tricks for custom cases. 607 if (ST->hasAVX2()) 608 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 609 return LT.first * Entry->Cost; 610 611 static const CostTblEntry AVX1CostTable[] = { 612 // We don't have to scalarize unsupported ops. We can issue two half-sized 613 // operations and we only need to extract the upper YMM half. 614 // Two ops + 1 extract + 1 insert = 4. 615 { ISD::MUL, MVT::v16i16, 4 }, 616 { ISD::MUL, MVT::v8i32, 4 }, 617 { ISD::SUB, MVT::v32i8, 4 }, 618 { ISD::ADD, MVT::v32i8, 4 }, 619 { ISD::SUB, MVT::v16i16, 4 }, 620 { ISD::ADD, MVT::v16i16, 4 }, 621 { ISD::SUB, MVT::v8i32, 4 }, 622 { ISD::ADD, MVT::v8i32, 4 }, 623 { ISD::SUB, MVT::v4i64, 4 }, 624 { ISD::ADD, MVT::v4i64, 4 }, 625 626 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 627 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 628 // Because we believe v4i64 to be a legal type, we must also include the 629 // extract+insert in the cost table. Therefore, the cost here is 18 630 // instead of 8. 631 { ISD::MUL, MVT::v4i64, 18 }, 632 633 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 634 635 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 636 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 637 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 638 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 639 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 640 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 641 }; 642 643 if (ST->hasAVX()) 644 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 645 return LT.first * Entry->Cost; 646 647 static const CostTblEntry SSE42CostTable[] = { 648 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 649 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 650 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 651 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 652 653 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 654 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 655 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 656 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 657 658 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 659 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 660 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 661 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 662 663 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 664 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 665 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 666 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 667 }; 668 669 if (ST->hasSSE42()) 670 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 671 return LT.first * Entry->Cost; 672 673 static const CostTblEntry SSE41CostTable[] = { 674 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 675 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. 676 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 677 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 678 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 679 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split 680 681 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 682 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. 683 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 684 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 685 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 686 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. 687 688 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 689 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. 690 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 691 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 692 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 693 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. 694 695 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 696 }; 697 698 if (ST->hasSSE41()) 699 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 700 return LT.first * Entry->Cost; 701 702 static const CostTblEntry SSE2CostTable[] = { 703 // We don't correctly identify costs of casts because they are marked as 704 // custom. 705 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 706 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 707 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 708 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 709 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 710 711 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 712 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 713 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 714 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 715 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 716 717 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 718 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 719 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 720 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 721 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. 722 723 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 724 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 725 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 726 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 727 728 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 729 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 730 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 731 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 732 }; 733 734 if (ST->hasSSE2()) 735 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 736 return LT.first * Entry->Cost; 737 738 static const CostTblEntry SSE1CostTable[] = { 739 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 740 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 741 }; 742 743 if (ST->hasSSE1()) 744 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 745 return LT.first * Entry->Cost; 746 747 // It is not a good idea to vectorize division. We have to scalarize it and 748 // in the process we will often end up having to spilling regular 749 // registers. The overhead of division is going to dominate most kernels 750 // anyways so try hard to prevent vectorization of division - it is 751 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 752 // to hide "20 cycles" for each lane. 753 if ((ISD == ISD::SDIV || ISD == ISD::UDIV) && LT.second.isVector()) { 754 int ScalarCost = getArithmeticInstrCost( 755 Opcode, Ty->getScalarType(), Op1Info, Op2Info, 756 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 757 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 758 } 759 760 // Fallback to the default implementation. 761 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 762 } 763 764 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 765 Type *SubTp) { 766 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 767 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 768 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 769 770 // For Broadcasts we are splatting the first element from the first input 771 // register, so only need to reference that input and all the output 772 // registers are the same. 773 if (Kind == TTI::SK_Broadcast) 774 LT.first = 1; 775 776 // We are going to permute multiple sources and the result will be in multiple 777 // destinations. Providing an accurate cost only for splits where the element 778 // type remains the same. 779 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 780 MVT LegalVT = LT.second; 781 if (LegalVT.isVector() && 782 LegalVT.getVectorElementType().getSizeInBits() == 783 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 784 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 785 786 unsigned VecTySize = DL.getTypeStoreSize(Tp); 787 unsigned LegalVTSize = LegalVT.getStoreSize(); 788 // Number of source vectors after legalization: 789 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 790 // Number of destination vectors after legalization: 791 unsigned NumOfDests = LT.first; 792 793 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 794 LegalVT.getVectorNumElements()); 795 796 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 797 return NumOfShuffles * 798 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 799 } 800 801 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 802 } 803 804 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 805 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 806 // We assume that source and destination have the same vector type. 807 int NumOfDests = LT.first; 808 int NumOfShufflesPerDest = LT.first * 2 - 1; 809 LT.first = NumOfDests * NumOfShufflesPerDest; 810 } 811 812 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 813 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 814 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 815 816 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 817 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 818 819 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 820 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 821 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 822 }; 823 824 if (ST->hasVBMI()) 825 if (const auto *Entry = 826 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 827 return LT.first * Entry->Cost; 828 829 static const CostTblEntry AVX512BWShuffleTbl[] = { 830 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 831 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 832 833 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 834 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 835 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 836 837 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 838 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 839 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 840 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 841 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 842 843 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 844 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 845 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 846 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 847 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 848 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 849 }; 850 851 if (ST->hasBWI()) 852 if (const auto *Entry = 853 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 854 return LT.first * Entry->Cost; 855 856 static const CostTblEntry AVX512ShuffleTbl[] = { 857 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 858 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 859 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 860 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 861 862 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 863 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 864 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 865 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 866 867 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 868 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 869 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 870 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 871 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 872 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 873 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 874 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 875 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 876 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 877 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 878 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 879 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 880 881 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 882 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 883 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 884 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 885 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 886 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 887 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 888 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 889 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 890 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 891 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 892 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 893 }; 894 895 if (ST->hasAVX512()) 896 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 897 return LT.first * Entry->Cost; 898 899 static const CostTblEntry AVX2ShuffleTbl[] = { 900 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 901 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 902 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 903 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 904 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 905 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 906 907 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 908 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 909 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 910 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 911 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 912 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 913 914 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw 915 { TTI::SK_Alternate, MVT::v32i8, 1 }, // vpblendvb 916 917 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 918 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 919 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 920 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 921 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2*vpshufb 922 // + vpblendvb 923 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vperm2i128 + 2*vpshufb 924 // + vpblendvb 925 926 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 3 }, // 2*vpermpd + vblendpd 927 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 3 }, // 2*vpermps + vblendps 928 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 3 }, // 2*vpermq + vpblendd 929 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 3 }, // 2*vpermd + vpblendd 930 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 7 }, // 2*vperm2i128 + 4*vpshufb 931 // + vpblendvb 932 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 7 }, // 2*vperm2i128 + 4*vpshufb 933 // + vpblendvb 934 }; 935 936 if (ST->hasAVX2()) 937 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 938 return LT.first * Entry->Cost; 939 940 static const CostTblEntry XOPShuffleTbl[] = { 941 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 2 }, // vperm2f128 + vpermil2pd 942 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 2 }, // vperm2f128 + vpermil2ps 943 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 2 }, // vperm2f128 + vpermil2pd 944 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 2 }, // vperm2f128 + vpermil2ps 945 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vextractf128 + 2*vpperm 946 // + vinsertf128 947 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vextractf128 + 2*vpperm 948 // + vinsertf128 949 950 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 9 }, // 2*vextractf128 + 6*vpperm 951 // + vinsertf128 952 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpperm 953 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 9 }, // 2*vextractf128 + 6*vpperm 954 // + vinsertf128 955 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 }, // vpperm 956 }; 957 958 if (ST->hasXOP()) 959 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 960 return LT.first * Entry->Cost; 961 962 static const CostTblEntry AVX1ShuffleTbl[] = { 963 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 964 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 965 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 966 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 967 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 968 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 969 970 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 971 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 972 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 973 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 974 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 975 // + vinsertf128 976 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 977 // + vinsertf128 978 979 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd 980 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd 981 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps 982 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps 983 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor 984 { TTI::SK_Alternate, MVT::v32i8, 3 }, // vpand + vpandn + vpor 985 986 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 3 }, // 2*vperm2f128 + vshufpd 987 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 3 }, // 2*vperm2f128 + vshufpd 988 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 989 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 990 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 8 }, // vextractf128 + 4*pshufb 991 // + 2*por + vinsertf128 992 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 8 }, // vextractf128 + 4*pshufb 993 // + 2*por + vinsertf128 994 995 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 4 }, // 2*vperm2f128 + 2*vshufpd 996 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 997 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 4 }, // 2*vperm2f128 + 2*vshufpd 998 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 999 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 15 }, // 2*vextractf128 + 8*pshufb 1000 // + 4*por + vinsertf128 1001 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 15 }, // 2*vextractf128 + 8*pshufb 1002 // + 4*por + vinsertf128 1003 }; 1004 1005 if (ST->hasAVX()) 1006 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1007 return LT.first * Entry->Cost; 1008 1009 static const CostTblEntry SSE41ShuffleTbl[] = { 1010 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw 1011 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 1012 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw 1013 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps 1014 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw 1015 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb 1016 }; 1017 1018 if (ST->hasSSE41()) 1019 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1020 return LT.first * Entry->Cost; 1021 1022 static const CostTblEntry SSSE3ShuffleTbl[] = { 1023 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 1024 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 1025 1026 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 1027 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 1028 1029 { TTI::SK_Alternate, MVT::v8i16, 3 }, // 2*pshufb + por 1030 { TTI::SK_Alternate, MVT::v16i8, 3 }, // 2*pshufb + por 1031 1032 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb 1033 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 1034 1035 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 3 }, // 2*pshufb + por 1036 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 }, // 2*pshufb + por 1037 }; 1038 1039 if (ST->hasSSSE3()) 1040 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1041 return LT.first * Entry->Cost; 1042 1043 static const CostTblEntry SSE2ShuffleTbl[] = { 1044 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 1045 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 1046 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 1047 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 1048 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 1049 1050 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 1051 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 1052 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 1053 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 1054 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 1055 // + 2*pshufd + 2*unpck + packus 1056 1057 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd 1058 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 1059 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps 1060 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por 1061 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pand + pandn + por 1062 1063 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // shufpd 1064 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd 1065 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // pshufd 1066 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 5 }, // 2*pshuflw + 2*pshufhw 1067 // + pshufd/unpck 1068 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1069 // + 2*pshufd + 2*unpck + 2*packus 1070 1071 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1072 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1073 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1074 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1075 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1076 }; 1077 1078 if (ST->hasSSE2()) 1079 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1080 return LT.first * Entry->Cost; 1081 1082 static const CostTblEntry SSE1ShuffleTbl[] = { 1083 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1084 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1085 { TTI::SK_Alternate, MVT::v4f32, 2 }, // 2*shufps 1086 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1087 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1088 }; 1089 1090 if (ST->hasSSE1()) 1091 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1092 return LT.first * Entry->Cost; 1093 1094 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 1095 } 1096 1097 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 1098 const Instruction *I) { 1099 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1100 assert(ISD && "Invalid opcode"); 1101 1102 // FIXME: Need a better design of the cost table to handle non-simple types of 1103 // potential massive combinations (elem_num x src_type x dst_type). 1104 1105 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1106 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1107 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1108 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1109 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1110 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1111 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1112 1113 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1114 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1115 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1116 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1117 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1118 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1119 1120 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 1121 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1122 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1123 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1124 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1125 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1126 1127 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 1128 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1129 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1130 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1131 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1132 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1133 }; 1134 1135 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1136 // 256-bit wide vectors. 1137 1138 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1139 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1140 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1141 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1142 1143 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 1144 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 1145 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 1146 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 1147 1148 // v16i1 -> v16i32 - load + broadcast 1149 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1150 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1151 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1152 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1153 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1154 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1155 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1156 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1157 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1158 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1159 1160 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1161 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1162 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1163 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1164 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1165 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1166 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1167 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1168 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1169 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1170 1171 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1172 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1173 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1174 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1175 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1176 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1177 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1178 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1179 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1180 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1181 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1182 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1183 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1184 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1185 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1186 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1187 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1188 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1189 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1190 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1191 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1192 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 1193 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1194 1195 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1196 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1197 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1198 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 }, 1199 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 }, 1200 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1201 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 }, 1202 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 }, 1203 }; 1204 1205 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1206 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1207 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1208 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1209 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1210 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1211 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1212 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1213 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1214 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1215 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1216 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1217 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1218 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1219 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1220 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1221 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1222 1223 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1224 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1225 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1226 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1227 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1228 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1229 1230 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1231 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1232 1233 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1234 }; 1235 1236 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1237 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1238 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1239 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1240 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1241 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1242 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1243 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1244 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1245 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1246 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1247 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1248 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1249 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1250 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1251 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1252 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1253 1254 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1255 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1256 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1257 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1258 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1259 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1260 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1261 1262 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1263 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1264 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1265 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1266 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1267 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1268 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1269 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1270 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1271 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1272 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1273 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1274 1275 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1276 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1277 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1278 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1279 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1280 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1281 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1282 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1283 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1284 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1285 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1286 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1287 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1288 // The generic code to compute the scalar overhead is currently broken. 1289 // Workaround this limitation by estimating the scalarization overhead 1290 // here. We have roughly 10 instructions per scalar element. 1291 // Multiply that by the vector width. 1292 // FIXME: remove that when PR19268 is fixed. 1293 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 1294 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 1295 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1296 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1297 1298 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1299 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1300 // This node is expanded into scalarized operations but BasicTTI is overly 1301 // optimistic estimating its cost. It computes 3 per element (one 1302 // vector-extract, one scalar conversion and one vector-insert). The 1303 // problem is that the inserts form a read-modify-write chain so latency 1304 // should be factored in too. Inflating the cost per element by 1. 1305 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1306 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1307 1308 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1309 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1310 }; 1311 1312 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1313 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1314 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1315 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1316 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1317 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1318 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1319 1320 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1321 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1322 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1323 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1324 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1325 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1326 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1327 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1328 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1329 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1330 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1331 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1332 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1333 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1334 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1335 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1336 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1337 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1338 1339 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1340 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1341 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1342 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1343 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1344 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1345 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1346 1347 }; 1348 1349 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1350 // These are somewhat magic numbers justified by looking at the output of 1351 // Intel's IACA, running some kernels and making sure when we take 1352 // legalization into account the throughput will be overestimated. 1353 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1354 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1355 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1356 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1357 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1358 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1359 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1360 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1361 1362 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1363 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1364 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1365 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1366 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1367 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1368 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1369 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1370 1371 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1372 1373 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1374 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1375 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1376 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1377 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1378 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1379 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1380 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1381 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1382 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1383 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1384 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1385 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1386 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1387 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1388 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1389 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1390 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1391 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1392 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1393 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1394 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1395 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1396 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1397 1398 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1399 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1400 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1401 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1402 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1403 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1404 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1405 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1406 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1407 }; 1408 1409 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1410 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1411 1412 if (ST->hasSSE2() && !ST->hasAVX()) { 1413 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1414 LTDest.second, LTSrc.second)) 1415 return LTSrc.first * Entry->Cost; 1416 } 1417 1418 EVT SrcTy = TLI->getValueType(DL, Src); 1419 EVT DstTy = TLI->getValueType(DL, Dst); 1420 1421 // The function getSimpleVT only handles simple value types. 1422 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1423 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1424 1425 if (ST->hasDQI()) 1426 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1427 DstTy.getSimpleVT(), 1428 SrcTy.getSimpleVT())) 1429 return Entry->Cost; 1430 1431 if (ST->hasAVX512()) 1432 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1433 DstTy.getSimpleVT(), 1434 SrcTy.getSimpleVT())) 1435 return Entry->Cost; 1436 1437 if (ST->hasAVX2()) { 1438 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1439 DstTy.getSimpleVT(), 1440 SrcTy.getSimpleVT())) 1441 return Entry->Cost; 1442 } 1443 1444 if (ST->hasAVX()) { 1445 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1446 DstTy.getSimpleVT(), 1447 SrcTy.getSimpleVT())) 1448 return Entry->Cost; 1449 } 1450 1451 if (ST->hasSSE41()) { 1452 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1453 DstTy.getSimpleVT(), 1454 SrcTy.getSimpleVT())) 1455 return Entry->Cost; 1456 } 1457 1458 if (ST->hasSSE2()) { 1459 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1460 DstTy.getSimpleVT(), 1461 SrcTy.getSimpleVT())) 1462 return Entry->Cost; 1463 } 1464 1465 return BaseT::getCastInstrCost(Opcode, Dst, Src, I); 1466 } 1467 1468 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 1469 const Instruction *I) { 1470 // Legalize the type. 1471 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1472 1473 MVT MTy = LT.second; 1474 1475 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1476 assert(ISD && "Invalid opcode"); 1477 1478 static const CostTblEntry SSE2CostTbl[] = { 1479 { ISD::SETCC, MVT::v2i64, 8 }, 1480 { ISD::SETCC, MVT::v4i32, 1 }, 1481 { ISD::SETCC, MVT::v8i16, 1 }, 1482 { ISD::SETCC, MVT::v16i8, 1 }, 1483 }; 1484 1485 static const CostTblEntry SSE42CostTbl[] = { 1486 { ISD::SETCC, MVT::v2f64, 1 }, 1487 { ISD::SETCC, MVT::v4f32, 1 }, 1488 { ISD::SETCC, MVT::v2i64, 1 }, 1489 }; 1490 1491 static const CostTblEntry AVX1CostTbl[] = { 1492 { ISD::SETCC, MVT::v4f64, 1 }, 1493 { ISD::SETCC, MVT::v8f32, 1 }, 1494 // AVX1 does not support 8-wide integer compare. 1495 { ISD::SETCC, MVT::v4i64, 4 }, 1496 { ISD::SETCC, MVT::v8i32, 4 }, 1497 { ISD::SETCC, MVT::v16i16, 4 }, 1498 { ISD::SETCC, MVT::v32i8, 4 }, 1499 }; 1500 1501 static const CostTblEntry AVX2CostTbl[] = { 1502 { ISD::SETCC, MVT::v4i64, 1 }, 1503 { ISD::SETCC, MVT::v8i32, 1 }, 1504 { ISD::SETCC, MVT::v16i16, 1 }, 1505 { ISD::SETCC, MVT::v32i8, 1 }, 1506 }; 1507 1508 static const CostTblEntry AVX512CostTbl[] = { 1509 { ISD::SETCC, MVT::v8i64, 1 }, 1510 { ISD::SETCC, MVT::v16i32, 1 }, 1511 { ISD::SETCC, MVT::v8f64, 1 }, 1512 { ISD::SETCC, MVT::v16f32, 1 }, 1513 }; 1514 1515 static const CostTblEntry AVX512BWCostTbl[] = { 1516 { ISD::SETCC, MVT::v32i16, 1 }, 1517 { ISD::SETCC, MVT::v64i8, 1 }, 1518 }; 1519 1520 if (ST->hasBWI()) 1521 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1522 return LT.first * Entry->Cost; 1523 1524 if (ST->hasAVX512()) 1525 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1526 return LT.first * Entry->Cost; 1527 1528 if (ST->hasAVX2()) 1529 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1530 return LT.first * Entry->Cost; 1531 1532 if (ST->hasAVX()) 1533 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1534 return LT.first * Entry->Cost; 1535 1536 if (ST->hasSSE42()) 1537 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1538 return LT.first * Entry->Cost; 1539 1540 if (ST->hasSSE2()) 1541 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1542 return LT.first * Entry->Cost; 1543 1544 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 1545 } 1546 1547 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 1548 1549 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1550 ArrayRef<Type *> Tys, FastMathFlags FMF, 1551 unsigned ScalarizationCostPassed) { 1552 // Costs should match the codegen from: 1553 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1554 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1555 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1556 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1557 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1558 static const CostTblEntry AVX512CDCostTbl[] = { 1559 { ISD::CTLZ, MVT::v8i64, 1 }, 1560 { ISD::CTLZ, MVT::v16i32, 1 }, 1561 { ISD::CTLZ, MVT::v32i16, 8 }, 1562 { ISD::CTLZ, MVT::v64i8, 20 }, 1563 { ISD::CTLZ, MVT::v4i64, 1 }, 1564 { ISD::CTLZ, MVT::v8i32, 1 }, 1565 { ISD::CTLZ, MVT::v16i16, 4 }, 1566 { ISD::CTLZ, MVT::v32i8, 10 }, 1567 { ISD::CTLZ, MVT::v2i64, 1 }, 1568 { ISD::CTLZ, MVT::v4i32, 1 }, 1569 { ISD::CTLZ, MVT::v8i16, 4 }, 1570 { ISD::CTLZ, MVT::v16i8, 4 }, 1571 }; 1572 static const CostTblEntry AVX512BWCostTbl[] = { 1573 { ISD::BITREVERSE, MVT::v8i64, 5 }, 1574 { ISD::BITREVERSE, MVT::v16i32, 5 }, 1575 { ISD::BITREVERSE, MVT::v32i16, 5 }, 1576 { ISD::BITREVERSE, MVT::v64i8, 5 }, 1577 { ISD::CTLZ, MVT::v8i64, 23 }, 1578 { ISD::CTLZ, MVT::v16i32, 22 }, 1579 { ISD::CTLZ, MVT::v32i16, 18 }, 1580 { ISD::CTLZ, MVT::v64i8, 17 }, 1581 { ISD::CTPOP, MVT::v8i64, 7 }, 1582 { ISD::CTPOP, MVT::v16i32, 11 }, 1583 { ISD::CTPOP, MVT::v32i16, 9 }, 1584 { ISD::CTPOP, MVT::v64i8, 6 }, 1585 { ISD::CTTZ, MVT::v8i64, 10 }, 1586 { ISD::CTTZ, MVT::v16i32, 14 }, 1587 { ISD::CTTZ, MVT::v32i16, 12 }, 1588 { ISD::CTTZ, MVT::v64i8, 9 }, 1589 }; 1590 static const CostTblEntry AVX512CostTbl[] = { 1591 { ISD::BITREVERSE, MVT::v8i64, 36 }, 1592 { ISD::BITREVERSE, MVT::v16i32, 24 }, 1593 { ISD::CTLZ, MVT::v8i64, 29 }, 1594 { ISD::CTLZ, MVT::v16i32, 35 }, 1595 { ISD::CTPOP, MVT::v8i64, 16 }, 1596 { ISD::CTPOP, MVT::v16i32, 24 }, 1597 { ISD::CTTZ, MVT::v8i64, 20 }, 1598 { ISD::CTTZ, MVT::v16i32, 28 }, 1599 }; 1600 static const CostTblEntry XOPCostTbl[] = { 1601 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1602 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1603 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1604 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1605 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1606 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1607 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1608 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1609 { ISD::BITREVERSE, MVT::i64, 3 }, 1610 { ISD::BITREVERSE, MVT::i32, 3 }, 1611 { ISD::BITREVERSE, MVT::i16, 3 }, 1612 { ISD::BITREVERSE, MVT::i8, 3 } 1613 }; 1614 static const CostTblEntry AVX2CostTbl[] = { 1615 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1616 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1617 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1618 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1619 { ISD::BSWAP, MVT::v4i64, 1 }, 1620 { ISD::BSWAP, MVT::v8i32, 1 }, 1621 { ISD::BSWAP, MVT::v16i16, 1 }, 1622 { ISD::CTLZ, MVT::v4i64, 23 }, 1623 { ISD::CTLZ, MVT::v8i32, 18 }, 1624 { ISD::CTLZ, MVT::v16i16, 14 }, 1625 { ISD::CTLZ, MVT::v32i8, 9 }, 1626 { ISD::CTPOP, MVT::v4i64, 7 }, 1627 { ISD::CTPOP, MVT::v8i32, 11 }, 1628 { ISD::CTPOP, MVT::v16i16, 9 }, 1629 { ISD::CTPOP, MVT::v32i8, 6 }, 1630 { ISD::CTTZ, MVT::v4i64, 10 }, 1631 { ISD::CTTZ, MVT::v8i32, 14 }, 1632 { ISD::CTTZ, MVT::v16i16, 12 }, 1633 { ISD::CTTZ, MVT::v32i8, 9 }, 1634 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1635 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1636 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1637 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1638 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1639 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1640 }; 1641 static const CostTblEntry AVX1CostTbl[] = { 1642 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 1643 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 1644 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 1645 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 1646 { ISD::BSWAP, MVT::v4i64, 4 }, 1647 { ISD::BSWAP, MVT::v8i32, 4 }, 1648 { ISD::BSWAP, MVT::v16i16, 4 }, 1649 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 1650 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 1651 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 1652 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1653 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 1654 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 1655 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 1656 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 1657 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 1658 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 1659 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 1660 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1661 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1662 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1663 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1664 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1665 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1666 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1667 }; 1668 static const CostTblEntry GLMCostTbl[] = { 1669 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 1670 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 1671 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 1672 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 1673 }; 1674 static const CostTblEntry SLMCostTbl[] = { 1675 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 1676 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 1677 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 1678 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 1679 }; 1680 static const CostTblEntry SSE42CostTbl[] = { 1681 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1682 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1683 }; 1684 static const CostTblEntry SSSE3CostTbl[] = { 1685 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1686 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1687 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1688 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1689 { ISD::BSWAP, MVT::v2i64, 1 }, 1690 { ISD::BSWAP, MVT::v4i32, 1 }, 1691 { ISD::BSWAP, MVT::v8i16, 1 }, 1692 { ISD::CTLZ, MVT::v2i64, 23 }, 1693 { ISD::CTLZ, MVT::v4i32, 18 }, 1694 { ISD::CTLZ, MVT::v8i16, 14 }, 1695 { ISD::CTLZ, MVT::v16i8, 9 }, 1696 { ISD::CTPOP, MVT::v2i64, 7 }, 1697 { ISD::CTPOP, MVT::v4i32, 11 }, 1698 { ISD::CTPOP, MVT::v8i16, 9 }, 1699 { ISD::CTPOP, MVT::v16i8, 6 }, 1700 { ISD::CTTZ, MVT::v2i64, 10 }, 1701 { ISD::CTTZ, MVT::v4i32, 14 }, 1702 { ISD::CTTZ, MVT::v8i16, 12 }, 1703 { ISD::CTTZ, MVT::v16i8, 9 } 1704 }; 1705 static const CostTblEntry SSE2CostTbl[] = { 1706 { ISD::BITREVERSE, MVT::v2i64, 29 }, 1707 { ISD::BITREVERSE, MVT::v4i32, 27 }, 1708 { ISD::BITREVERSE, MVT::v8i16, 27 }, 1709 { ISD::BITREVERSE, MVT::v16i8, 20 }, 1710 { ISD::BSWAP, MVT::v2i64, 7 }, 1711 { ISD::BSWAP, MVT::v4i32, 7 }, 1712 { ISD::BSWAP, MVT::v8i16, 7 }, 1713 { ISD::CTLZ, MVT::v2i64, 25 }, 1714 { ISD::CTLZ, MVT::v4i32, 26 }, 1715 { ISD::CTLZ, MVT::v8i16, 20 }, 1716 { ISD::CTLZ, MVT::v16i8, 17 }, 1717 { ISD::CTPOP, MVT::v2i64, 12 }, 1718 { ISD::CTPOP, MVT::v4i32, 15 }, 1719 { ISD::CTPOP, MVT::v8i16, 13 }, 1720 { ISD::CTPOP, MVT::v16i8, 10 }, 1721 { ISD::CTTZ, MVT::v2i64, 14 }, 1722 { ISD::CTTZ, MVT::v4i32, 18 }, 1723 { ISD::CTTZ, MVT::v8i16, 16 }, 1724 { ISD::CTTZ, MVT::v16i8, 13 }, 1725 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1726 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1727 }; 1728 static const CostTblEntry SSE1CostTbl[] = { 1729 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1730 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1731 }; 1732 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1733 { ISD::BITREVERSE, MVT::i64, 14 } 1734 }; 1735 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1736 { ISD::BITREVERSE, MVT::i32, 14 }, 1737 { ISD::BITREVERSE, MVT::i16, 14 }, 1738 { ISD::BITREVERSE, MVT::i8, 11 } 1739 }; 1740 1741 unsigned ISD = ISD::DELETED_NODE; 1742 switch (IID) { 1743 default: 1744 break; 1745 case Intrinsic::bitreverse: 1746 ISD = ISD::BITREVERSE; 1747 break; 1748 case Intrinsic::bswap: 1749 ISD = ISD::BSWAP; 1750 break; 1751 case Intrinsic::ctlz: 1752 ISD = ISD::CTLZ; 1753 break; 1754 case Intrinsic::ctpop: 1755 ISD = ISD::CTPOP; 1756 break; 1757 case Intrinsic::cttz: 1758 ISD = ISD::CTTZ; 1759 break; 1760 case Intrinsic::sqrt: 1761 ISD = ISD::FSQRT; 1762 break; 1763 } 1764 1765 // Legalize the type. 1766 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1767 MVT MTy = LT.second; 1768 1769 // Attempt to lookup cost. 1770 if (ST->isGLM()) 1771 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 1772 return LT.first * Entry->Cost; 1773 1774 if (ST->isSLM()) 1775 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 1776 return LT.first * Entry->Cost; 1777 1778 if (ST->hasCDI()) 1779 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 1780 return LT.first * Entry->Cost; 1781 1782 if (ST->hasBWI()) 1783 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1784 return LT.first * Entry->Cost; 1785 1786 if (ST->hasAVX512()) 1787 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1788 return LT.first * Entry->Cost; 1789 1790 if (ST->hasXOP()) 1791 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1792 return LT.first * Entry->Cost; 1793 1794 if (ST->hasAVX2()) 1795 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1796 return LT.first * Entry->Cost; 1797 1798 if (ST->hasAVX()) 1799 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1800 return LT.first * Entry->Cost; 1801 1802 if (ST->hasSSE42()) 1803 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1804 return LT.first * Entry->Cost; 1805 1806 if (ST->hasSSSE3()) 1807 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1808 return LT.first * Entry->Cost; 1809 1810 if (ST->hasSSE2()) 1811 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1812 return LT.first * Entry->Cost; 1813 1814 if (ST->hasSSE1()) 1815 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1816 return LT.first * Entry->Cost; 1817 1818 if (ST->is64Bit()) 1819 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 1820 return LT.first * Entry->Cost; 1821 1822 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 1823 return LT.first * Entry->Cost; 1824 1825 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed); 1826 } 1827 1828 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1829 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 1830 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF); 1831 } 1832 1833 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1834 assert(Val->isVectorTy() && "This must be a vector type"); 1835 1836 Type *ScalarType = Val->getScalarType(); 1837 1838 if (Index != -1U) { 1839 // Legalize the type. 1840 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1841 1842 // This type is legalized to a scalar type. 1843 if (!LT.second.isVector()) 1844 return 0; 1845 1846 // The type may be split. Normalize the index to the new type. 1847 unsigned Width = LT.second.getVectorNumElements(); 1848 Index = Index % Width; 1849 1850 // Floating point scalars are already located in index #0. 1851 if (ScalarType->isFloatingPointTy() && Index == 0) 1852 return 0; 1853 } 1854 1855 // Add to the base cost if we know that the extracted element of a vector is 1856 // destined to be moved to and used in the integer register file. 1857 int RegisterFileMoveCost = 0; 1858 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1859 RegisterFileMoveCost = 1; 1860 1861 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1862 } 1863 1864 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1865 unsigned AddressSpace, const Instruction *I) { 1866 // Handle non-power-of-two vectors such as <3 x float> 1867 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1868 unsigned NumElem = VTy->getVectorNumElements(); 1869 1870 // Handle a few common cases: 1871 // <3 x float> 1872 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1873 // Cost = 64 bit store + extract + 32 bit store. 1874 return 3; 1875 1876 // <3 x double> 1877 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1878 // Cost = 128 bit store + unpack + 64 bit store. 1879 return 3; 1880 1881 // Assume that all other non-power-of-two numbers are scalarized. 1882 if (!isPowerOf2_32(NumElem)) { 1883 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1884 AddressSpace); 1885 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1886 Opcode == Instruction::Store); 1887 return NumElem * Cost + SplitCost; 1888 } 1889 } 1890 1891 // Legalize the type. 1892 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1893 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1894 "Invalid Opcode"); 1895 1896 // Each load/store unit costs 1. 1897 int Cost = LT.first * 1; 1898 1899 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1900 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1901 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1902 Cost *= 2; 1903 1904 return Cost; 1905 } 1906 1907 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1908 unsigned Alignment, 1909 unsigned AddressSpace) { 1910 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1911 if (!SrcVTy) 1912 // To calculate scalar take the regular cost, without mask 1913 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1914 1915 unsigned NumElem = SrcVTy->getVectorNumElements(); 1916 VectorType *MaskTy = 1917 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1918 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1919 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1920 !isPowerOf2_32(NumElem)) { 1921 // Scalarization 1922 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1923 int ScalarCompareCost = getCmpSelInstrCost( 1924 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1925 int BranchCost = getCFInstrCost(Instruction::Br); 1926 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1927 1928 int ValueSplitCost = getScalarizationOverhead( 1929 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1930 int MemopCost = 1931 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1932 Alignment, AddressSpace); 1933 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1934 } 1935 1936 // Legalize the type. 1937 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1938 auto VT = TLI->getValueType(DL, SrcVTy); 1939 int Cost = 0; 1940 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1941 LT.second.getVectorNumElements() == NumElem) 1942 // Promotion requires expand/truncate for data and a shuffle for mask. 1943 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1944 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1945 1946 else if (LT.second.getVectorNumElements() > NumElem) { 1947 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1948 LT.second.getVectorNumElements()); 1949 // Expanding requires fill mask with zeroes 1950 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1951 } 1952 if (!ST->hasAVX512()) 1953 return Cost + LT.first*4; // Each maskmov costs 4 1954 1955 // AVX-512 masked load/store is cheapper 1956 return Cost+LT.first; 1957 } 1958 1959 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 1960 const SCEV *Ptr) { 1961 // Address computations in vectorized code with non-consecutive addresses will 1962 // likely result in more instructions compared to scalar code where the 1963 // computation can more often be merged into the index mode. The resulting 1964 // extra micro-ops can significantly decrease throughput. 1965 unsigned NumVectorInstToHideOverhead = 10; 1966 1967 // Cost modeling of Strided Access Computation is hidden by the indexing 1968 // modes of X86 regardless of the stride value. We dont believe that there 1969 // is a difference between constant strided access in gerenal and constant 1970 // strided value which is less than or equal to 64. 1971 // Even in the case of (loop invariant) stride whose value is not known at 1972 // compile time, the address computation will not incur more than one extra 1973 // ADD instruction. 1974 if (Ty->isVectorTy() && SE) { 1975 if (!BaseT::isStridedAccess(Ptr)) 1976 return NumVectorInstToHideOverhead; 1977 if (!BaseT::getConstantStrideStep(SE, Ptr)) 1978 return 1; 1979 } 1980 1981 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1982 } 1983 1984 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy, 1985 bool IsPairwise) { 1986 1987 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1988 1989 MVT MTy = LT.second; 1990 1991 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1992 assert(ISD && "Invalid opcode"); 1993 1994 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 1995 // and make it as the cost. 1996 1997 static const CostTblEntry SSE42CostTblPairWise[] = { 1998 { ISD::FADD, MVT::v2f64, 2 }, 1999 { ISD::FADD, MVT::v4f32, 4 }, 2000 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2001 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 2002 { ISD::ADD, MVT::v8i16, 5 }, 2003 }; 2004 2005 static const CostTblEntry AVX1CostTblPairWise[] = { 2006 { ISD::FADD, MVT::v4f32, 4 }, 2007 { ISD::FADD, MVT::v4f64, 5 }, 2008 { ISD::FADD, MVT::v8f32, 7 }, 2009 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2010 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 2011 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 2012 { ISD::ADD, MVT::v8i16, 5 }, 2013 { ISD::ADD, MVT::v8i32, 5 }, 2014 }; 2015 2016 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2017 { ISD::FADD, MVT::v2f64, 2 }, 2018 { ISD::FADD, MVT::v4f32, 4 }, 2019 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2020 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 2021 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 2022 }; 2023 2024 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2025 { ISD::FADD, MVT::v4f32, 3 }, 2026 { ISD::FADD, MVT::v4f64, 3 }, 2027 { ISD::FADD, MVT::v8f32, 4 }, 2028 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2029 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 2030 { ISD::ADD, MVT::v4i64, 3 }, 2031 { ISD::ADD, MVT::v8i16, 4 }, 2032 { ISD::ADD, MVT::v8i32, 5 }, 2033 }; 2034 2035 if (IsPairwise) { 2036 if (ST->hasAVX()) 2037 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2038 return LT.first * Entry->Cost; 2039 2040 if (ST->hasSSE42()) 2041 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2042 return LT.first * Entry->Cost; 2043 } else { 2044 if (ST->hasAVX()) 2045 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2046 return LT.first * Entry->Cost; 2047 2048 if (ST->hasSSE42()) 2049 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2050 return LT.first * Entry->Cost; 2051 } 2052 2053 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise); 2054 } 2055 2056 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy, 2057 bool IsPairwise, bool IsUnsigned) { 2058 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2059 2060 MVT MTy = LT.second; 2061 2062 int ISD; 2063 if (ValTy->isIntOrIntVectorTy()) { 2064 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 2065 } else { 2066 assert(ValTy->isFPOrFPVectorTy() && 2067 "Expected float point or integer vector type."); 2068 ISD = ISD::FMINNUM; 2069 } 2070 2071 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 2072 // and make it as the cost. 2073 2074 static const CostTblEntry SSE42CostTblPairWise[] = { 2075 {ISD::FMINNUM, MVT::v2f64, 3}, 2076 {ISD::FMINNUM, MVT::v4f32, 2}, 2077 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2078 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6" 2079 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2080 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2081 {ISD::SMIN, MVT::v8i16, 2}, 2082 {ISD::UMIN, MVT::v8i16, 2}, 2083 }; 2084 2085 static const CostTblEntry AVX1CostTblPairWise[] = { 2086 {ISD::FMINNUM, MVT::v4f32, 1}, 2087 {ISD::FMINNUM, MVT::v4f64, 1}, 2088 {ISD::FMINNUM, MVT::v8f32, 2}, 2089 {ISD::SMIN, MVT::v2i64, 3}, 2090 {ISD::UMIN, MVT::v2i64, 3}, 2091 {ISD::SMIN, MVT::v4i32, 1}, 2092 {ISD::UMIN, MVT::v4i32, 1}, 2093 {ISD::SMIN, MVT::v8i16, 1}, 2094 {ISD::UMIN, MVT::v8i16, 1}, 2095 {ISD::SMIN, MVT::v8i32, 3}, 2096 {ISD::UMIN, MVT::v8i32, 3}, 2097 }; 2098 2099 static const CostTblEntry AVX2CostTblPairWise[] = { 2100 {ISD::SMIN, MVT::v4i64, 2}, 2101 {ISD::UMIN, MVT::v4i64, 2}, 2102 {ISD::SMIN, MVT::v8i32, 1}, 2103 {ISD::UMIN, MVT::v8i32, 1}, 2104 {ISD::SMIN, MVT::v16i16, 1}, 2105 {ISD::UMIN, MVT::v16i16, 1}, 2106 {ISD::SMIN, MVT::v32i8, 2}, 2107 {ISD::UMIN, MVT::v32i8, 2}, 2108 }; 2109 2110 static const CostTblEntry AVX512CostTblPairWise[] = { 2111 {ISD::FMINNUM, MVT::v8f64, 1}, 2112 {ISD::FMINNUM, MVT::v16f32, 2}, 2113 {ISD::SMIN, MVT::v8i64, 2}, 2114 {ISD::UMIN, MVT::v8i64, 2}, 2115 {ISD::SMIN, MVT::v16i32, 1}, 2116 {ISD::UMIN, MVT::v16i32, 1}, 2117 }; 2118 2119 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2120 {ISD::FMINNUM, MVT::v2f64, 3}, 2121 {ISD::FMINNUM, MVT::v4f32, 3}, 2122 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2123 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6" 2124 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2125 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2126 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5" 2127 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8" 2128 }; 2129 2130 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2131 {ISD::FMINNUM, MVT::v4f32, 1}, 2132 {ISD::FMINNUM, MVT::v4f64, 1}, 2133 {ISD::FMINNUM, MVT::v8f32, 1}, 2134 {ISD::SMIN, MVT::v2i64, 3}, 2135 {ISD::UMIN, MVT::v2i64, 3}, 2136 {ISD::SMIN, MVT::v4i32, 1}, 2137 {ISD::UMIN, MVT::v4i32, 1}, 2138 {ISD::SMIN, MVT::v8i16, 1}, 2139 {ISD::UMIN, MVT::v8i16, 1}, 2140 {ISD::SMIN, MVT::v8i32, 2}, 2141 {ISD::UMIN, MVT::v8i32, 2}, 2142 }; 2143 2144 static const CostTblEntry AVX2CostTblNoPairWise[] = { 2145 {ISD::SMIN, MVT::v4i64, 1}, 2146 {ISD::UMIN, MVT::v4i64, 1}, 2147 {ISD::SMIN, MVT::v8i32, 1}, 2148 {ISD::UMIN, MVT::v8i32, 1}, 2149 {ISD::SMIN, MVT::v16i16, 1}, 2150 {ISD::UMIN, MVT::v16i16, 1}, 2151 {ISD::SMIN, MVT::v32i8, 1}, 2152 {ISD::UMIN, MVT::v32i8, 1}, 2153 }; 2154 2155 static const CostTblEntry AVX512CostTblNoPairWise[] = { 2156 {ISD::FMINNUM, MVT::v8f64, 1}, 2157 {ISD::FMINNUM, MVT::v16f32, 2}, 2158 {ISD::SMIN, MVT::v8i64, 1}, 2159 {ISD::UMIN, MVT::v8i64, 1}, 2160 {ISD::SMIN, MVT::v16i32, 1}, 2161 {ISD::UMIN, MVT::v16i32, 1}, 2162 }; 2163 2164 if (IsPairwise) { 2165 if (ST->hasAVX512()) 2166 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy)) 2167 return LT.first * Entry->Cost; 2168 2169 if (ST->hasAVX2()) 2170 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy)) 2171 return LT.first * Entry->Cost; 2172 2173 if (ST->hasAVX()) 2174 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2175 return LT.first * Entry->Cost; 2176 2177 if (ST->hasSSE42()) 2178 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2179 return LT.first * Entry->Cost; 2180 } else { 2181 if (ST->hasAVX512()) 2182 if (const auto *Entry = 2183 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy)) 2184 return LT.first * Entry->Cost; 2185 2186 if (ST->hasAVX2()) 2187 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy)) 2188 return LT.first * Entry->Cost; 2189 2190 if (ST->hasAVX()) 2191 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2192 return LT.first * Entry->Cost; 2193 2194 if (ST->hasSSE42()) 2195 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2196 return LT.first * Entry->Cost; 2197 } 2198 2199 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned); 2200 } 2201 2202 /// Calculate the cost of materializing a 64-bit value. This helper 2203 /// method might only calculate a fraction of a larger immediate. Therefore it 2204 /// is valid to return a cost of ZERO. 2205 int X86TTIImpl::getIntImmCost(int64_t Val) { 2206 if (Val == 0) 2207 return TTI::TCC_Free; 2208 2209 if (isInt<32>(Val)) 2210 return TTI::TCC_Basic; 2211 2212 return 2 * TTI::TCC_Basic; 2213 } 2214 2215 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 2216 assert(Ty->isIntegerTy()); 2217 2218 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2219 if (BitSize == 0) 2220 return ~0U; 2221 2222 // Never hoist constants larger than 128bit, because this might lead to 2223 // incorrect code generation or assertions in codegen. 2224 // Fixme: Create a cost model for types larger than i128 once the codegen 2225 // issues have been fixed. 2226 if (BitSize > 128) 2227 return TTI::TCC_Free; 2228 2229 if (Imm == 0) 2230 return TTI::TCC_Free; 2231 2232 // Sign-extend all constants to a multiple of 64-bit. 2233 APInt ImmVal = Imm; 2234 if (BitSize & 0x3f) 2235 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 2236 2237 // Split the constant into 64-bit chunks and calculate the cost for each 2238 // chunk. 2239 int Cost = 0; 2240 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 2241 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 2242 int64_t Val = Tmp.getSExtValue(); 2243 Cost += getIntImmCost(Val); 2244 } 2245 // We need at least one instruction to materialize the constant. 2246 return std::max(1, Cost); 2247 } 2248 2249 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 2250 Type *Ty) { 2251 assert(Ty->isIntegerTy()); 2252 2253 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2254 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2255 // here, so that constant hoisting will ignore this constant. 2256 if (BitSize == 0) 2257 return TTI::TCC_Free; 2258 2259 unsigned ImmIdx = ~0U; 2260 switch (Opcode) { 2261 default: 2262 return TTI::TCC_Free; 2263 case Instruction::GetElementPtr: 2264 // Always hoist the base address of a GetElementPtr. This prevents the 2265 // creation of new constants for every base constant that gets constant 2266 // folded with the offset. 2267 if (Idx == 0) 2268 return 2 * TTI::TCC_Basic; 2269 return TTI::TCC_Free; 2270 case Instruction::Store: 2271 ImmIdx = 0; 2272 break; 2273 case Instruction::ICmp: 2274 // This is an imperfect hack to prevent constant hoisting of 2275 // compares that might be trying to check if a 64-bit value fits in 2276 // 32-bits. The backend can optimize these cases using a right shift by 32. 2277 // Ideally we would check the compare predicate here. There also other 2278 // similar immediates the backend can use shifts for. 2279 if (Idx == 1 && Imm.getBitWidth() == 64) { 2280 uint64_t ImmVal = Imm.getZExtValue(); 2281 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 2282 return TTI::TCC_Free; 2283 } 2284 ImmIdx = 1; 2285 break; 2286 case Instruction::And: 2287 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 2288 // by using a 32-bit operation with implicit zero extension. Detect such 2289 // immediates here as the normal path expects bit 31 to be sign extended. 2290 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 2291 return TTI::TCC_Free; 2292 LLVM_FALLTHROUGH; 2293 case Instruction::Add: 2294 case Instruction::Sub: 2295 case Instruction::Mul: 2296 case Instruction::UDiv: 2297 case Instruction::SDiv: 2298 case Instruction::URem: 2299 case Instruction::SRem: 2300 case Instruction::Or: 2301 case Instruction::Xor: 2302 ImmIdx = 1; 2303 break; 2304 // Always return TCC_Free for the shift value of a shift instruction. 2305 case Instruction::Shl: 2306 case Instruction::LShr: 2307 case Instruction::AShr: 2308 if (Idx == 1) 2309 return TTI::TCC_Free; 2310 break; 2311 case Instruction::Trunc: 2312 case Instruction::ZExt: 2313 case Instruction::SExt: 2314 case Instruction::IntToPtr: 2315 case Instruction::PtrToInt: 2316 case Instruction::BitCast: 2317 case Instruction::PHI: 2318 case Instruction::Call: 2319 case Instruction::Select: 2320 case Instruction::Ret: 2321 case Instruction::Load: 2322 break; 2323 } 2324 2325 if (Idx == ImmIdx) { 2326 int NumConstants = (BitSize + 63) / 64; 2327 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 2328 return (Cost <= NumConstants * TTI::TCC_Basic) 2329 ? static_cast<int>(TTI::TCC_Free) 2330 : Cost; 2331 } 2332 2333 return X86TTIImpl::getIntImmCost(Imm, Ty); 2334 } 2335 2336 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 2337 Type *Ty) { 2338 assert(Ty->isIntegerTy()); 2339 2340 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2341 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2342 // here, so that constant hoisting will ignore this constant. 2343 if (BitSize == 0) 2344 return TTI::TCC_Free; 2345 2346 switch (IID) { 2347 default: 2348 return TTI::TCC_Free; 2349 case Intrinsic::sadd_with_overflow: 2350 case Intrinsic::uadd_with_overflow: 2351 case Intrinsic::ssub_with_overflow: 2352 case Intrinsic::usub_with_overflow: 2353 case Intrinsic::smul_with_overflow: 2354 case Intrinsic::umul_with_overflow: 2355 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 2356 return TTI::TCC_Free; 2357 break; 2358 case Intrinsic::experimental_stackmap: 2359 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2360 return TTI::TCC_Free; 2361 break; 2362 case Intrinsic::experimental_patchpoint_void: 2363 case Intrinsic::experimental_patchpoint_i64: 2364 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2365 return TTI::TCC_Free; 2366 break; 2367 } 2368 return X86TTIImpl::getIntImmCost(Imm, Ty); 2369 } 2370 2371 unsigned X86TTIImpl::getUserCost(const User *U, 2372 ArrayRef<const Value *> Operands) { 2373 if (isa<StoreInst>(U)) { 2374 Value *Ptr = U->getOperand(1); 2375 // Store instruction with index and scale costs 2 Uops. 2376 // Check the preceding GEP to identify non-const indices. 2377 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 2378 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 2379 return TTI::TCC_Basic * 2; 2380 } 2381 return TTI::TCC_Basic; 2382 } 2383 return BaseT::getUserCost(U, Operands); 2384 } 2385 2386 // Return an average cost of Gather / Scatter instruction, maybe improved later 2387 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 2388 unsigned Alignment, unsigned AddressSpace) { 2389 2390 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 2391 unsigned VF = SrcVTy->getVectorNumElements(); 2392 2393 // Try to reduce index size from 64 bit (default for GEP) 2394 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 2395 // operation will use 16 x 64 indices which do not fit in a zmm and needs 2396 // to split. Also check that the base pointer is the same for all lanes, 2397 // and that there's at most one variable index. 2398 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 2399 unsigned IndexSize = DL.getPointerSizeInBits(); 2400 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 2401 if (IndexSize < 64 || !GEP) 2402 return IndexSize; 2403 2404 unsigned NumOfVarIndices = 0; 2405 Value *Ptrs = GEP->getPointerOperand(); 2406 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 2407 return IndexSize; 2408 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 2409 if (isa<Constant>(GEP->getOperand(i))) 2410 continue; 2411 Type *IndxTy = GEP->getOperand(i)->getType(); 2412 if (IndxTy->isVectorTy()) 2413 IndxTy = IndxTy->getVectorElementType(); 2414 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 2415 !isa<SExtInst>(GEP->getOperand(i))) || 2416 ++NumOfVarIndices > 1) 2417 return IndexSize; // 64 2418 } 2419 return (unsigned)32; 2420 }; 2421 2422 2423 // Trying to reduce IndexSize to 32 bits for vector 16. 2424 // By default the IndexSize is equal to pointer size. 2425 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 2426 ? getIndexSizeInBits(Ptr, DL) 2427 : DL.getPointerSizeInBits(); 2428 2429 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 2430 IndexSize), VF); 2431 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2432 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2433 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2434 if (SplitFactor > 1) { 2435 // Handle splitting of vector of pointers 2436 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2437 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2438 AddressSpace); 2439 } 2440 2441 // The gather / scatter cost is given by Intel architects. It is a rough 2442 // number since we are looking at one instruction in a time. 2443 const int GSOverhead = (Opcode == Instruction::Load) 2444 ? ST->getGatherOverhead() 2445 : ST->getScatterOverhead(); 2446 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2447 Alignment, AddressSpace); 2448 } 2449 2450 /// Return the cost of full scalarization of gather / scatter operation. 2451 /// 2452 /// Opcode - Load or Store instruction. 2453 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2454 /// VariableMask - The mask is non-constant at compile time. 2455 /// Alignment - Alignment for one element. 2456 /// AddressSpace - pointer[s] address space. 2457 /// 2458 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2459 bool VariableMask, unsigned Alignment, 2460 unsigned AddressSpace) { 2461 unsigned VF = SrcVTy->getVectorNumElements(); 2462 2463 int MaskUnpackCost = 0; 2464 if (VariableMask) { 2465 VectorType *MaskTy = 2466 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2467 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2468 int ScalarCompareCost = 2469 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2470 nullptr); 2471 int BranchCost = getCFInstrCost(Instruction::Br); 2472 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2473 } 2474 2475 // The cost of the scalar loads/stores. 2476 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2477 Alignment, AddressSpace); 2478 2479 int InsertExtractCost = 0; 2480 if (Opcode == Instruction::Load) 2481 for (unsigned i = 0; i < VF; ++i) 2482 // Add the cost of inserting each scalar load into the vector 2483 InsertExtractCost += 2484 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2485 else 2486 for (unsigned i = 0; i < VF; ++i) 2487 // Add the cost of extracting each element out of the data vector 2488 InsertExtractCost += 2489 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2490 2491 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2492 } 2493 2494 /// Calculate the cost of Gather / Scatter operation 2495 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2496 Value *Ptr, bool VariableMask, 2497 unsigned Alignment) { 2498 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2499 unsigned VF = SrcVTy->getVectorNumElements(); 2500 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2501 if (!PtrTy && Ptr->getType()->isVectorTy()) 2502 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2503 assert(PtrTy && "Unexpected type for Ptr argument"); 2504 unsigned AddressSpace = PtrTy->getAddressSpace(); 2505 2506 bool Scalarize = false; 2507 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2508 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2509 Scalarize = true; 2510 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2511 // Vector-4 of gather/scatter instruction does not exist on KNL. 2512 // We can extend it to 8 elements, but zeroing upper bits of 2513 // the mask vector will add more instructions. Right now we give the scalar 2514 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2515 // is better in the VariableMask case. 2516 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) 2517 Scalarize = true; 2518 2519 if (Scalarize) 2520 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2521 AddressSpace); 2522 2523 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2524 } 2525 2526 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 2527 TargetTransformInfo::LSRCost &C2) { 2528 // X86 specific here are "instruction number 1st priority". 2529 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 2530 C1.NumIVMuls, C1.NumBaseAdds, 2531 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 2532 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 2533 C2.NumIVMuls, C2.NumBaseAdds, 2534 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 2535 } 2536 2537 bool X86TTIImpl::canMacroFuseCmp() { 2538 return ST->hasMacroFusion(); 2539 } 2540 2541 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2542 // The backend can't handle a single element vector. 2543 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1) 2544 return false; 2545 Type *ScalarTy = DataTy->getScalarType(); 2546 int DataWidth = isa<PointerType>(ScalarTy) ? 2547 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2548 2549 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2550 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2551 } 2552 2553 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2554 return isLegalMaskedLoad(DataType); 2555 } 2556 2557 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2558 // This function is called now in two cases: from the Loop Vectorizer 2559 // and from the Scalarizer. 2560 // When the Loop Vectorizer asks about legality of the feature, 2561 // the vectorization factor is not calculated yet. The Loop Vectorizer 2562 // sends a scalar type and the decision is based on the width of the 2563 // scalar element. 2564 // Later on, the cost model will estimate usage this intrinsic based on 2565 // the vector type. 2566 // The Scalarizer asks again about legality. It sends a vector type. 2567 // In this case we can reject non-power-of-2 vectors. 2568 // We also reject single element vectors as the type legalizer can't 2569 // scalarize it. 2570 if (isa<VectorType>(DataTy)) { 2571 unsigned NumElts = DataTy->getVectorNumElements(); 2572 if (NumElts == 1 || !isPowerOf2_32(NumElts)) 2573 return false; 2574 } 2575 Type *ScalarTy = DataTy->getScalarType(); 2576 int DataWidth = isa<PointerType>(ScalarTy) ? 2577 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2578 2579 // Some CPUs have better gather performance than others. 2580 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 2581 // enable gather with a -march. 2582 return (DataWidth == 32 || DataWidth == 64) && 2583 (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())); 2584 } 2585 2586 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2587 // AVX2 doesn't support scatter 2588 if (!ST->hasAVX512()) 2589 return false; 2590 return isLegalMaskedGather(DataType); 2591 } 2592 2593 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 2594 EVT VT = TLI->getValueType(DL, DataType); 2595 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 2596 } 2597 2598 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 2599 return false; 2600 } 2601 2602 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2603 const Function *Callee) const { 2604 const TargetMachine &TM = getTLI()->getTargetMachine(); 2605 2606 // Work this as a subsetting of subtarget features. 2607 const FeatureBitset &CallerBits = 2608 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2609 const FeatureBitset &CalleeBits = 2610 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2611 2612 // FIXME: This is likely too limiting as it will include subtarget features 2613 // that we might not care about for inlining, but it is conservatively 2614 // correct. 2615 return (CallerBits & CalleeBits) == CalleeBits; 2616 } 2617 2618 const X86TTIImpl::TTI::MemCmpExpansionOptions * 2619 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const { 2620 // Only enable vector loads for equality comparison. 2621 // Right now the vector version is not as fast, see #33329. 2622 static const auto ThreeWayOptions = [this]() { 2623 TTI::MemCmpExpansionOptions Options; 2624 if (ST->is64Bit()) { 2625 Options.LoadSizes.push_back(8); 2626 } 2627 Options.LoadSizes.push_back(4); 2628 Options.LoadSizes.push_back(2); 2629 Options.LoadSizes.push_back(1); 2630 return Options; 2631 }(); 2632 static const auto EqZeroOptions = [this]() { 2633 TTI::MemCmpExpansionOptions Options; 2634 // TODO: enable AVX512 when the DAG is ready. 2635 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64); 2636 if (ST->hasAVX2()) Options.LoadSizes.push_back(32); 2637 if (ST->hasSSE2()) Options.LoadSizes.push_back(16); 2638 if (ST->is64Bit()) { 2639 Options.LoadSizes.push_back(8); 2640 } 2641 Options.LoadSizes.push_back(4); 2642 Options.LoadSizes.push_back(2); 2643 Options.LoadSizes.push_back(1); 2644 return Options; 2645 }(); 2646 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions; 2647 } 2648 2649 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2650 // TODO: We expect this to be beneficial regardless of arch, 2651 // but there are currently some unexplained performance artifacts on Atom. 2652 // As a temporary solution, disable on Atom. 2653 return !(ST->isAtom()); 2654 } 2655 2656 // Get estimation for interleaved load/store operations for AVX2. 2657 // \p Factor is the interleaved-access factor (stride) - number of 2658 // (interleaved) elements in the group. 2659 // \p Indices contains the indices for a strided load: when the 2660 // interleaved load has gaps they indicate which elements are used. 2661 // If Indices is empty (or if the number of indices is equal to the size 2662 // of the interleaved-access as given in \p Factor) the access has no gaps. 2663 // 2664 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 2665 // computing the cost using a generic formula as a function of generic 2666 // shuffles. We therefore use a lookup table instead, filled according to 2667 // the instruction sequences that codegen currently generates. 2668 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy, 2669 unsigned Factor, 2670 ArrayRef<unsigned> Indices, 2671 unsigned Alignment, 2672 unsigned AddressSpace) { 2673 2674 // We currently Support only fully-interleaved groups, with no gaps. 2675 // TODO: Support also strided loads (interleaved-groups with gaps). 2676 if (Indices.size() && Indices.size() != Factor) 2677 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2678 Alignment, AddressSpace); 2679 2680 // VecTy for interleave memop is <VF*Factor x Elt>. 2681 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2682 // VecTy = <12 x i32>. 2683 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2684 2685 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 2686 // the VF=2, while v2i128 is an unsupported MVT vector type 2687 // (see MachineValueType.h::getVectorVT()). 2688 if (!LegalVT.isVector()) 2689 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2690 Alignment, AddressSpace); 2691 2692 unsigned VF = VecTy->getVectorNumElements() / Factor; 2693 Type *ScalarTy = VecTy->getVectorElementType(); 2694 2695 // Calculate the number of memory operations (NumOfMemOps), required 2696 // for load/store the VecTy. 2697 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2698 unsigned LegalVTSize = LegalVT.getStoreSize(); 2699 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2700 2701 // Get the cost of one memory operation. 2702 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2703 LegalVT.getVectorNumElements()); 2704 unsigned MemOpCost = 2705 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2706 2707 VectorType *VT = VectorType::get(ScalarTy, VF); 2708 EVT ETy = TLI->getValueType(DL, VT); 2709 if (!ETy.isSimple()) 2710 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2711 Alignment, AddressSpace); 2712 2713 // TODO: Complete for other data-types and strides. 2714 // Each combination of Stride, ElementTy and VF results in a different 2715 // sequence; The cost tables are therefore accessed with: 2716 // Factor (stride) and VectorType=VFxElemType. 2717 // The Cost accounts only for the shuffle sequence; 2718 // The cost of the loads/stores is accounted for separately. 2719 // 2720 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 2721 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 2722 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 2723 2724 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 2725 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 2726 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 2727 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 2728 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 2729 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 2730 2731 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 2732 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 2733 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 2734 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 2735 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 2736 2737 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 2738 }; 2739 2740 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 2741 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) 2742 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) 2743 2744 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) 2745 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) 2746 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) 2747 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) 2748 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) 2749 2750 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) 2751 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) 2752 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) 2753 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) 2754 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) 2755 }; 2756 2757 if (Opcode == Instruction::Load) { 2758 if (const auto *Entry = 2759 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 2760 return NumOfMemOps * MemOpCost + Entry->Cost; 2761 } else { 2762 assert(Opcode == Instruction::Store && 2763 "Expected Store Instruction at this point"); 2764 if (const auto *Entry = 2765 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 2766 return NumOfMemOps * MemOpCost + Entry->Cost; 2767 } 2768 2769 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2770 Alignment, AddressSpace); 2771 } 2772 2773 // Get estimation for interleaved load/store operations and strided load. 2774 // \p Indices contains indices for strided load. 2775 // \p Factor - the factor of interleaving. 2776 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2777 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2778 unsigned Factor, 2779 ArrayRef<unsigned> Indices, 2780 unsigned Alignment, 2781 unsigned AddressSpace) { 2782 2783 // VecTy for interleave memop is <VF*Factor x Elt>. 2784 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2785 // VecTy = <12 x i32>. 2786 2787 // Calculate the number of memory operations (NumOfMemOps), required 2788 // for load/store the VecTy. 2789 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2790 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2791 unsigned LegalVTSize = LegalVT.getStoreSize(); 2792 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2793 2794 // Get the cost of one memory operation. 2795 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2796 LegalVT.getVectorNumElements()); 2797 unsigned MemOpCost = 2798 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2799 2800 unsigned VF = VecTy->getVectorNumElements() / Factor; 2801 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 2802 2803 if (Opcode == Instruction::Load) { 2804 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 2805 // contain the cost of the optimized shuffle sequence that the 2806 // X86InterleavedAccess pass will generate. 2807 // The cost of loads and stores are computed separately from the table. 2808 2809 // X86InterleavedAccess support only the following interleaved-access group. 2810 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 2811 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 2812 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 2813 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 2814 }; 2815 2816 if (const auto *Entry = 2817 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 2818 return NumOfMemOps * MemOpCost + Entry->Cost; 2819 //If an entry does not exist, fallback to the default implementation. 2820 2821 // Kind of shuffle depends on number of loaded values. 2822 // If we load the entire data in one register, we can use a 1-src shuffle. 2823 // Otherwise, we'll merge 2 sources in each operation. 2824 TTI::ShuffleKind ShuffleKind = 2825 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2826 2827 unsigned ShuffleCost = 2828 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2829 2830 unsigned NumOfLoadsInInterleaveGrp = 2831 Indices.size() ? Indices.size() : Factor; 2832 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2833 VecTy->getVectorNumElements() / Factor); 2834 unsigned NumOfResults = 2835 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2836 NumOfLoadsInInterleaveGrp; 2837 2838 // About a half of the loads may be folded in shuffles when we have only 2839 // one result. If we have more than one result, we do not fold loads at all. 2840 unsigned NumOfUnfoldedLoads = 2841 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2842 2843 // Get a number of shuffle operations per result. 2844 unsigned NumOfShufflesPerResult = 2845 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2846 2847 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2848 // When we have more than one destination, we need additional instructions 2849 // to keep sources. 2850 unsigned NumOfMoves = 0; 2851 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2852 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2853 2854 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2855 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2856 2857 return Cost; 2858 } 2859 2860 // Store. 2861 assert(Opcode == Instruction::Store && 2862 "Expected Store Instruction at this point"); 2863 // X86InterleavedAccess support only the following interleaved-access group. 2864 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 2865 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 2866 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 2867 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 2868 2869 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 2870 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 2871 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 2872 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 2873 }; 2874 2875 if (const auto *Entry = 2876 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 2877 return NumOfMemOps * MemOpCost + Entry->Cost; 2878 //If an entry does not exist, fallback to the default implementation. 2879 2880 // There is no strided stores meanwhile. And store can't be folded in 2881 // shuffle. 2882 unsigned NumOfSources = Factor; // The number of values to be merged. 2883 unsigned ShuffleCost = 2884 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 2885 unsigned NumOfShufflesPerStore = NumOfSources - 1; 2886 2887 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2888 // We need additional instructions to keep sources. 2889 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 2890 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 2891 NumOfMoves; 2892 return Cost; 2893 } 2894 2895 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 2896 unsigned Factor, 2897 ArrayRef<unsigned> Indices, 2898 unsigned Alignment, 2899 unsigned AddressSpace) { 2900 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 2901 Type *EltTy = VecTy->getVectorElementType(); 2902 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 2903 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 2904 return true; 2905 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 2906 return HasBW; 2907 return false; 2908 }; 2909 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 2910 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 2911 Alignment, AddressSpace); 2912 if (ST->hasAVX2()) 2913 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices, 2914 Alignment, AddressSpace); 2915 2916 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2917 Alignment, AddressSpace); 2918 } 2919