1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/CodeGen/CostTable.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/Support/Debug.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 70 TargetTransformInfo::CacheLevel Level) const { 71 switch (Level) { 72 case TargetTransformInfo::CacheLevel::L1D: 73 // - Penryn 74 // - Nehalem 75 // - Westmere 76 // - Sandy Bridge 77 // - Ivy Bridge 78 // - Haswell 79 // - Broadwell 80 // - Skylake 81 // - Kabylake 82 return 32 * 1024; // 32 KByte 83 case TargetTransformInfo::CacheLevel::L2D: 84 // - Penryn 85 // - Nehalem 86 // - Westmere 87 // - Sandy Bridge 88 // - Ivy Bridge 89 // - Haswell 90 // - Broadwell 91 // - Skylake 92 // - Kabylake 93 return 256 * 1024; // 256 KByte 94 } 95 96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 97 } 98 99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 100 TargetTransformInfo::CacheLevel Level) const { 101 // - Penryn 102 // - Nehalem 103 // - Westmere 104 // - Sandy Bridge 105 // - Ivy Bridge 106 // - Haswell 107 // - Broadwell 108 // - Skylake 109 // - Kabylake 110 switch (Level) { 111 case TargetTransformInfo::CacheLevel::L1D: 112 LLVM_FALLTHROUGH; 113 case TargetTransformInfo::CacheLevel::L2D: 114 return 8; 115 } 116 117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 118 } 119 120 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const { 133 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 134 if (Vector) { 135 if (ST->hasAVX512() && PreferVectorWidth >= 512) 136 return 512; 137 if (ST->hasAVX() && PreferVectorWidth >= 256) 138 return 256; 139 if (ST->hasSSE1() && PreferVectorWidth >= 128) 140 return 128; 141 return 0; 142 } 143 144 if (ST->is64Bit()) 145 return 64; 146 147 return 32; 148 } 149 150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 151 return getRegisterBitWidth(true); 152 } 153 154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 155 // If the loop will not be vectorized, don't interleave the loop. 156 // Let regular unroll to unroll the loop, which saves the overflow 157 // check and memory check cost. 158 if (VF == 1) 159 return 1; 160 161 if (ST->isAtom()) 162 return 1; 163 164 // Sandybridge and Haswell have multiple execution ports and pipelined 165 // vector units. 166 if (ST->hasAVX()) 167 return 4; 168 169 return 2; 170 } 171 172 int X86TTIImpl::getArithmeticInstrCost( 173 unsigned Opcode, Type *Ty, 174 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 175 TTI::OperandValueProperties Opd1PropInfo, 176 TTI::OperandValueProperties Opd2PropInfo, 177 ArrayRef<const Value *> Args) { 178 // Legalize the type. 179 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 180 181 int ISD = TLI->InstructionOpcodeToISD(Opcode); 182 assert(ISD && "Invalid opcode"); 183 184 static const CostTblEntry GLMCostTable[] = { 185 { ISD::FDIV, MVT::f32, 18 }, // divss 186 { ISD::FDIV, MVT::v4f32, 35 }, // divps 187 { ISD::FDIV, MVT::f64, 33 }, // divsd 188 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 189 }; 190 191 if (ST->isGLM()) 192 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 193 LT.second)) 194 return LT.first * Entry->Cost; 195 196 static const CostTblEntry SLMCostTable[] = { 197 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 198 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 199 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 200 { ISD::FMUL, MVT::f64, 2 }, // mulsd 201 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 202 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 203 { ISD::FDIV, MVT::f32, 17 }, // divss 204 { ISD::FDIV, MVT::v4f32, 39 }, // divps 205 { ISD::FDIV, MVT::f64, 32 }, // divsd 206 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 207 { ISD::FADD, MVT::v2f64, 2 }, // addpd 208 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 209 // v2i64/v4i64 mul is custom lowered as a series of long: 210 // multiplies(3), shifts(3) and adds(2) 211 // slm muldq version throughput is 2 and addq throughput 4 212 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 213 // 3X4 (addq throughput) = 17 214 { ISD::MUL, MVT::v2i64, 17 }, 215 // slm addq\subq throughput is 4 216 { ISD::ADD, MVT::v2i64, 4 }, 217 { ISD::SUB, MVT::v2i64, 4 }, 218 }; 219 220 if (ST->isSLM()) { 221 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 222 // Check if the operands can be shrinked into a smaller datatype. 223 bool Op1Signed = false; 224 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 225 bool Op2Signed = false; 226 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 227 228 bool signedMode = Op1Signed | Op2Signed; 229 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 230 231 if (OpMinSize <= 7) 232 return LT.first * 3; // pmullw/sext 233 if (!signedMode && OpMinSize <= 8) 234 return LT.first * 3; // pmullw/zext 235 if (OpMinSize <= 15) 236 return LT.first * 5; // pmullw/pmulhw/pshuf 237 if (!signedMode && OpMinSize <= 16) 238 return LT.first * 5; // pmullw/pmulhw/pshuf 239 } 240 241 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 242 LT.second)) { 243 return LT.first * Entry->Cost; 244 } 245 } 246 247 if (ISD == ISD::SDIV && 248 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 249 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 250 // On X86, vector signed division by constants power-of-two are 251 // normally expanded to the sequence SRA + SRL + ADD + SRA. 252 // The OperandValue properties many not be same as that of previous 253 // operation;conservatively assume OP_None. 254 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 255 Op2Info, TargetTransformInfo::OP_None, 256 TargetTransformInfo::OP_None); 257 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 258 TargetTransformInfo::OP_None, 259 TargetTransformInfo::OP_None); 260 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 261 TargetTransformInfo::OP_None, 262 TargetTransformInfo::OP_None); 263 264 return Cost; 265 } 266 267 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 268 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 269 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 270 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 271 272 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 273 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 274 }; 275 276 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 277 ST->hasBWI()) { 278 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 279 LT.second)) 280 return LT.first * Entry->Cost; 281 } 282 283 static const CostTblEntry AVX512UniformConstCostTable[] = { 284 { ISD::SRA, MVT::v2i64, 1 }, 285 { ISD::SRA, MVT::v4i64, 1 }, 286 { ISD::SRA, MVT::v8i64, 1 }, 287 288 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 289 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 290 }; 291 292 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 293 ST->hasAVX512()) { 294 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 295 LT.second)) 296 return LT.first * Entry->Cost; 297 } 298 299 static const CostTblEntry AVX2UniformConstCostTable[] = { 300 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 301 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 302 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 303 304 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 305 306 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 307 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 308 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 309 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 310 }; 311 312 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 313 ST->hasAVX2()) { 314 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 315 LT.second)) 316 return LT.first * Entry->Cost; 317 } 318 319 static const CostTblEntry SSE2UniformConstCostTable[] = { 320 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 321 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 322 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 323 324 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 325 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 326 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 327 328 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 329 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 330 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 331 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 332 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 333 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 334 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 335 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 336 }; 337 338 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 339 ST->hasSSE2()) { 340 // pmuldq sequence. 341 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 342 return LT.first * 32; 343 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 344 return LT.first * 15; 345 346 // XOP has faster vXi8 shifts. 347 if ((ISD != ISD::SHL && ISD != ISD::SRL && ISD != ISD::SRA) || 348 !ST->hasXOP()) 349 if (const auto *Entry = 350 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 351 return LT.first * Entry->Cost; 352 } 353 354 static const CostTblEntry AVX2UniformCostTable[] = { 355 // Uniform splats are cheaper for the following instructions. 356 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 357 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 358 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 359 }; 360 361 if (ST->hasAVX2() && 362 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 363 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 364 if (const auto *Entry = 365 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 366 return LT.first * Entry->Cost; 367 } 368 369 static const CostTblEntry SSE2UniformCostTable[] = { 370 // Uniform splats are cheaper for the following instructions. 371 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 372 { ISD::SHL, MVT::v4i32, 1 }, // pslld 373 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 374 375 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 376 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 377 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 378 379 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 380 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 381 }; 382 383 if (ST->hasSSE2() && 384 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 385 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 386 if (const auto *Entry = 387 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 388 return LT.first * Entry->Cost; 389 } 390 391 static const CostTblEntry AVX512DQCostTable[] = { 392 { ISD::MUL, MVT::v2i64, 1 }, 393 { ISD::MUL, MVT::v4i64, 1 }, 394 { ISD::MUL, MVT::v8i64, 1 } 395 }; 396 397 // Look for AVX512DQ lowering tricks for custom cases. 398 if (ST->hasDQI()) 399 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 400 return LT.first * Entry->Cost; 401 402 static const CostTblEntry AVX512BWCostTable[] = { 403 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 404 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 405 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 406 407 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 408 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 409 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 410 411 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 412 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 413 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 414 415 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 416 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 417 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 418 419 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 420 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 421 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 422 423 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 424 { ISD::SDIV, MVT::v64i8, 64*20 }, 425 { ISD::SDIV, MVT::v32i16, 32*20 }, 426 { ISD::UDIV, MVT::v64i8, 64*20 }, 427 { ISD::UDIV, MVT::v32i16, 32*20 } 428 }; 429 430 // Look for AVX512BW lowering tricks for custom cases. 431 if (ST->hasBWI()) 432 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 433 return LT.first * Entry->Cost; 434 435 static const CostTblEntry AVX512CostTable[] = { 436 { ISD::SHL, MVT::v16i32, 1 }, 437 { ISD::SRL, MVT::v16i32, 1 }, 438 { ISD::SRA, MVT::v16i32, 1 }, 439 440 { ISD::SHL, MVT::v8i64, 1 }, 441 { ISD::SRL, MVT::v8i64, 1 }, 442 443 { ISD::SRA, MVT::v2i64, 1 }, 444 { ISD::SRA, MVT::v4i64, 1 }, 445 { ISD::SRA, MVT::v8i64, 1 }, 446 447 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 448 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 449 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 450 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 451 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 452 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 453 454 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 455 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 456 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 457 458 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 459 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 460 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 461 462 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 463 { ISD::SDIV, MVT::v16i32, 16*20 }, 464 { ISD::SDIV, MVT::v8i64, 8*20 }, 465 { ISD::UDIV, MVT::v16i32, 16*20 }, 466 { ISD::UDIV, MVT::v8i64, 8*20 } 467 }; 468 469 if (ST->hasAVX512()) 470 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 471 return LT.first * Entry->Cost; 472 473 static const CostTblEntry AVX2ShiftCostTable[] = { 474 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 475 // customize them to detect the cases where shift amount is a scalar one. 476 { ISD::SHL, MVT::v4i32, 1 }, 477 { ISD::SRL, MVT::v4i32, 1 }, 478 { ISD::SRA, MVT::v4i32, 1 }, 479 { ISD::SHL, MVT::v8i32, 1 }, 480 { ISD::SRL, MVT::v8i32, 1 }, 481 { ISD::SRA, MVT::v8i32, 1 }, 482 { ISD::SHL, MVT::v2i64, 1 }, 483 { ISD::SRL, MVT::v2i64, 1 }, 484 { ISD::SHL, MVT::v4i64, 1 }, 485 { ISD::SRL, MVT::v4i64, 1 }, 486 }; 487 488 // Look for AVX2 lowering tricks. 489 if (ST->hasAVX2()) { 490 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 491 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 492 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 493 // On AVX2, a packed v16i16 shift left by a constant build_vector 494 // is lowered into a vector multiply (vpmullw). 495 return LT.first; 496 497 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 498 return LT.first * Entry->Cost; 499 } 500 501 static const CostTblEntry XOPShiftCostTable[] = { 502 // 128bit shifts take 1cy, but right shifts require negation beforehand. 503 { ISD::SHL, MVT::v16i8, 1 }, 504 { ISD::SRL, MVT::v16i8, 2 }, 505 { ISD::SRA, MVT::v16i8, 2 }, 506 { ISD::SHL, MVT::v8i16, 1 }, 507 { ISD::SRL, MVT::v8i16, 2 }, 508 { ISD::SRA, MVT::v8i16, 2 }, 509 { ISD::SHL, MVT::v4i32, 1 }, 510 { ISD::SRL, MVT::v4i32, 2 }, 511 { ISD::SRA, MVT::v4i32, 2 }, 512 { ISD::SHL, MVT::v2i64, 1 }, 513 { ISD::SRL, MVT::v2i64, 2 }, 514 { ISD::SRA, MVT::v2i64, 2 }, 515 // 256bit shifts require splitting if AVX2 didn't catch them above. 516 { ISD::SHL, MVT::v32i8, 2+2 }, 517 { ISD::SRL, MVT::v32i8, 4+2 }, 518 { ISD::SRA, MVT::v32i8, 4+2 }, 519 { ISD::SHL, MVT::v16i16, 2+2 }, 520 { ISD::SRL, MVT::v16i16, 4+2 }, 521 { ISD::SRA, MVT::v16i16, 4+2 }, 522 { ISD::SHL, MVT::v8i32, 2+2 }, 523 { ISD::SRL, MVT::v8i32, 4+2 }, 524 { ISD::SRA, MVT::v8i32, 4+2 }, 525 { ISD::SHL, MVT::v4i64, 2+2 }, 526 { ISD::SRL, MVT::v4i64, 4+2 }, 527 { ISD::SRA, MVT::v4i64, 4+2 }, 528 }; 529 530 // Look for XOP lowering tricks. 531 if (ST->hasXOP()) 532 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 533 return LT.first * Entry->Cost; 534 535 static const CostTblEntry SSE2UniformShiftCostTable[] = { 536 // Uniform splats are cheaper for the following instructions. 537 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 538 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 539 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 540 541 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 542 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 543 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 544 545 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 546 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 547 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 548 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 549 }; 550 551 if (ST->hasSSE2() && 552 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 553 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 554 555 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 556 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 557 return LT.first * 4; // 2*psrad + shuffle. 558 559 if (const auto *Entry = 560 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 561 return LT.first * Entry->Cost; 562 } 563 564 if (ISD == ISD::SHL && 565 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 566 MVT VT = LT.second; 567 // Vector shift left by non uniform constant can be lowered 568 // into vector multiply. 569 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 570 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 571 ISD = ISD::MUL; 572 } 573 574 static const CostTblEntry AVX2CostTable[] = { 575 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 576 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 577 578 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 579 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 580 581 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 582 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 583 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 584 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 585 586 { ISD::SUB, MVT::v32i8, 1 }, // psubb 587 { ISD::ADD, MVT::v32i8, 1 }, // paddb 588 { ISD::SUB, MVT::v16i16, 1 }, // psubw 589 { ISD::ADD, MVT::v16i16, 1 }, // paddw 590 { ISD::SUB, MVT::v8i32, 1 }, // psubd 591 { ISD::ADD, MVT::v8i32, 1 }, // paddd 592 { ISD::SUB, MVT::v4i64, 1 }, // psubq 593 { ISD::ADD, MVT::v4i64, 1 }, // paddq 594 595 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 596 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 597 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 598 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 599 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 600 601 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 602 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 603 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 604 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 605 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 606 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 607 608 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 609 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 610 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 611 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 612 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 613 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 614 }; 615 616 // Look for AVX2 lowering tricks for custom cases. 617 if (ST->hasAVX2()) 618 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 619 return LT.first * Entry->Cost; 620 621 static const CostTblEntry AVX1CostTable[] = { 622 // We don't have to scalarize unsupported ops. We can issue two half-sized 623 // operations and we only need to extract the upper YMM half. 624 // Two ops + 1 extract + 1 insert = 4. 625 { ISD::MUL, MVT::v16i16, 4 }, 626 { ISD::MUL, MVT::v8i32, 4 }, 627 { ISD::SUB, MVT::v32i8, 4 }, 628 { ISD::ADD, MVT::v32i8, 4 }, 629 { ISD::SUB, MVT::v16i16, 4 }, 630 { ISD::ADD, MVT::v16i16, 4 }, 631 { ISD::SUB, MVT::v8i32, 4 }, 632 { ISD::ADD, MVT::v8i32, 4 }, 633 { ISD::SUB, MVT::v4i64, 4 }, 634 { ISD::ADD, MVT::v4i64, 4 }, 635 636 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 637 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 638 // Because we believe v4i64 to be a legal type, we must also include the 639 // extract+insert in the cost table. Therefore, the cost here is 18 640 // instead of 8. 641 { ISD::MUL, MVT::v4i64, 18 }, 642 643 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 644 645 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 646 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 647 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 648 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 649 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 650 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 651 652 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 653 { ISD::SDIV, MVT::v32i8, 32*20 }, 654 { ISD::SDIV, MVT::v16i16, 16*20 }, 655 { ISD::SDIV, MVT::v8i32, 8*20 }, 656 { ISD::SDIV, MVT::v4i64, 4*20 }, 657 { ISD::UDIV, MVT::v32i8, 32*20 }, 658 { ISD::UDIV, MVT::v16i16, 16*20 }, 659 { ISD::UDIV, MVT::v8i32, 8*20 }, 660 { ISD::UDIV, MVT::v4i64, 4*20 }, 661 }; 662 663 if (ST->hasAVX()) 664 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 665 return LT.first * Entry->Cost; 666 667 static const CostTblEntry SSE42CostTable[] = { 668 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 669 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 670 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 671 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 672 673 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 674 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 675 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 676 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 677 678 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 679 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 680 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 681 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 682 683 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 684 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 685 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 686 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 687 }; 688 689 if (ST->hasSSE42()) 690 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 691 return LT.first * Entry->Cost; 692 693 static const CostTblEntry SSE41CostTable[] = { 694 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 695 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. 696 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 697 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 698 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 699 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split 700 701 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 702 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. 703 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 704 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 705 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 706 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. 707 708 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 709 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. 710 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 711 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 712 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 713 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. 714 715 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 716 }; 717 718 if (ST->hasSSE41()) 719 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 720 return LT.first * Entry->Cost; 721 722 static const CostTblEntry SSE2CostTable[] = { 723 // We don't correctly identify costs of casts because they are marked as 724 // custom. 725 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 726 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 727 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 728 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 729 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 730 731 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 732 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 733 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 734 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 735 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 736 737 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 738 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 739 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 740 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 741 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. 742 743 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 744 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 745 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 746 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 747 748 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 749 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 750 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 751 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 752 753 // It is not a good idea to vectorize division. We have to scalarize it and 754 // in the process we will often end up having to spilling regular 755 // registers. The overhead of division is going to dominate most kernels 756 // anyways so try hard to prevent vectorization of division - it is 757 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 758 // to hide "20 cycles" for each lane. 759 { ISD::SDIV, MVT::v16i8, 16*20 }, 760 { ISD::SDIV, MVT::v8i16, 8*20 }, 761 { ISD::SDIV, MVT::v4i32, 4*20 }, 762 { ISD::SDIV, MVT::v2i64, 2*20 }, 763 { ISD::UDIV, MVT::v16i8, 16*20 }, 764 { ISD::UDIV, MVT::v8i16, 8*20 }, 765 { ISD::UDIV, MVT::v4i32, 4*20 }, 766 { ISD::UDIV, MVT::v2i64, 2*20 }, 767 }; 768 769 if (ST->hasSSE2()) 770 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 771 return LT.first * Entry->Cost; 772 773 static const CostTblEntry SSE1CostTable[] = { 774 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 775 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 776 }; 777 778 if (ST->hasSSE1()) 779 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 780 return LT.first * Entry->Cost; 781 782 // Fallback to the default implementation. 783 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 784 } 785 786 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 787 Type *SubTp) { 788 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 789 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 790 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 791 792 // For Broadcasts we are splatting the first element from the first input 793 // register, so only need to reference that input and all the output 794 // registers are the same. 795 if (Kind == TTI::SK_Broadcast) 796 LT.first = 1; 797 798 // We are going to permute multiple sources and the result will be in multiple 799 // destinations. Providing an accurate cost only for splits where the element 800 // type remains the same. 801 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 802 MVT LegalVT = LT.second; 803 if (LegalVT.isVector() && 804 LegalVT.getVectorElementType().getSizeInBits() == 805 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 806 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 807 808 unsigned VecTySize = DL.getTypeStoreSize(Tp); 809 unsigned LegalVTSize = LegalVT.getStoreSize(); 810 // Number of source vectors after legalization: 811 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 812 // Number of destination vectors after legalization: 813 unsigned NumOfDests = LT.first; 814 815 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 816 LegalVT.getVectorNumElements()); 817 818 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 819 return NumOfShuffles * 820 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 821 } 822 823 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 824 } 825 826 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 827 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 828 // We assume that source and destination have the same vector type. 829 int NumOfDests = LT.first; 830 int NumOfShufflesPerDest = LT.first * 2 - 1; 831 LT.first = NumOfDests * NumOfShufflesPerDest; 832 } 833 834 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 835 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 836 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 837 838 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 839 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 840 841 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 842 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 843 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 844 }; 845 846 if (ST->hasVBMI()) 847 if (const auto *Entry = 848 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 849 return LT.first * Entry->Cost; 850 851 static const CostTblEntry AVX512BWShuffleTbl[] = { 852 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 853 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 854 855 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 856 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 857 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 858 859 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 860 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 861 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 862 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 863 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 864 865 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 866 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 867 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 868 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 869 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 870 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 871 }; 872 873 if (ST->hasBWI()) 874 if (const auto *Entry = 875 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 876 return LT.first * Entry->Cost; 877 878 static const CostTblEntry AVX512ShuffleTbl[] = { 879 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 880 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 881 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 882 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 883 884 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 885 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 886 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 887 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 888 889 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 890 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 891 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 892 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 893 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 894 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 895 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 896 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 897 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 898 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 899 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 900 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 901 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 902 903 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 904 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 905 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 906 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 907 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 908 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 909 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 910 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 911 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 912 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 913 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 914 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 915 }; 916 917 if (ST->hasAVX512()) 918 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 919 return LT.first * Entry->Cost; 920 921 static const CostTblEntry AVX2ShuffleTbl[] = { 922 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 923 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 924 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 925 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 926 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 927 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 928 929 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 930 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 931 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 932 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 933 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 934 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 935 936 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw 937 { TTI::SK_Alternate, MVT::v32i8, 1 }, // vpblendvb 938 939 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 940 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 941 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 942 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 943 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2*vpshufb 944 // + vpblendvb 945 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vperm2i128 + 2*vpshufb 946 // + vpblendvb 947 948 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 3 }, // 2*vpermpd + vblendpd 949 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 3 }, // 2*vpermps + vblendps 950 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 3 }, // 2*vpermq + vpblendd 951 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 3 }, // 2*vpermd + vpblendd 952 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 7 }, // 2*vperm2i128 + 4*vpshufb 953 // + vpblendvb 954 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 7 }, // 2*vperm2i128 + 4*vpshufb 955 // + vpblendvb 956 }; 957 958 if (ST->hasAVX2()) 959 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 960 return LT.first * Entry->Cost; 961 962 static const CostTblEntry XOPShuffleTbl[] = { 963 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 2 }, // vperm2f128 + vpermil2pd 964 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 2 }, // vperm2f128 + vpermil2ps 965 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 2 }, // vperm2f128 + vpermil2pd 966 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 2 }, // vperm2f128 + vpermil2ps 967 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vextractf128 + 2*vpperm 968 // + vinsertf128 969 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vextractf128 + 2*vpperm 970 // + vinsertf128 971 972 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 9 }, // 2*vextractf128 + 6*vpperm 973 // + vinsertf128 974 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpperm 975 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 9 }, // 2*vextractf128 + 6*vpperm 976 // + vinsertf128 977 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 }, // vpperm 978 }; 979 980 if (ST->hasXOP()) 981 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 982 return LT.first * Entry->Cost; 983 984 static const CostTblEntry AVX1ShuffleTbl[] = { 985 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 986 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 987 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 988 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 989 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 990 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 991 992 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 993 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 994 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 995 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 996 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 997 // + vinsertf128 998 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 999 // + vinsertf128 1000 1001 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd 1002 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd 1003 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps 1004 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps 1005 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor 1006 { TTI::SK_Alternate, MVT::v32i8, 3 }, // vpand + vpandn + vpor 1007 1008 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 3 }, // 2*vperm2f128 + vshufpd 1009 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 3 }, // 2*vperm2f128 + vshufpd 1010 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 1011 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 1012 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 8 }, // vextractf128 + 4*pshufb 1013 // + 2*por + vinsertf128 1014 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 8 }, // vextractf128 + 4*pshufb 1015 // + 2*por + vinsertf128 1016 1017 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 4 }, // 2*vperm2f128 + 2*vshufpd 1018 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 1019 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 4 }, // 2*vperm2f128 + 2*vshufpd 1020 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 1021 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 15 }, // 2*vextractf128 + 8*pshufb 1022 // + 4*por + vinsertf128 1023 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 15 }, // 2*vextractf128 + 8*pshufb 1024 // + 4*por + vinsertf128 1025 }; 1026 1027 if (ST->hasAVX()) 1028 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1029 return LT.first * Entry->Cost; 1030 1031 static const CostTblEntry SSE41ShuffleTbl[] = { 1032 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw 1033 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 1034 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw 1035 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps 1036 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw 1037 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb 1038 }; 1039 1040 if (ST->hasSSE41()) 1041 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1042 return LT.first * Entry->Cost; 1043 1044 static const CostTblEntry SSSE3ShuffleTbl[] = { 1045 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 1046 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 1047 1048 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 1049 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 1050 1051 { TTI::SK_Alternate, MVT::v8i16, 3 }, // 2*pshufb + por 1052 { TTI::SK_Alternate, MVT::v16i8, 3 }, // 2*pshufb + por 1053 1054 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb 1055 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 1056 1057 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 3 }, // 2*pshufb + por 1058 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 }, // 2*pshufb + por 1059 }; 1060 1061 if (ST->hasSSSE3()) 1062 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1063 return LT.first * Entry->Cost; 1064 1065 static const CostTblEntry SSE2ShuffleTbl[] = { 1066 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 1067 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 1068 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 1069 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 1070 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 1071 1072 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 1073 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 1074 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 1075 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 1076 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 1077 // + 2*pshufd + 2*unpck + packus 1078 1079 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd 1080 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 1081 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps 1082 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por 1083 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pand + pandn + por 1084 1085 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // shufpd 1086 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd 1087 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // pshufd 1088 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 5 }, // 2*pshuflw + 2*pshufhw 1089 // + pshufd/unpck 1090 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1091 // + 2*pshufd + 2*unpck + 2*packus 1092 1093 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1094 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1095 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1096 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1097 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1098 }; 1099 1100 if (ST->hasSSE2()) 1101 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1102 return LT.first * Entry->Cost; 1103 1104 static const CostTblEntry SSE1ShuffleTbl[] = { 1105 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1106 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1107 { TTI::SK_Alternate, MVT::v4f32, 2 }, // 2*shufps 1108 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1109 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1110 }; 1111 1112 if (ST->hasSSE1()) 1113 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1114 return LT.first * Entry->Cost; 1115 1116 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 1117 } 1118 1119 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 1120 const Instruction *I) { 1121 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1122 assert(ISD && "Invalid opcode"); 1123 1124 // FIXME: Need a better design of the cost table to handle non-simple types of 1125 // potential massive combinations (elem_num x src_type x dst_type). 1126 1127 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1128 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1129 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1130 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1131 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1132 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1133 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1134 1135 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1136 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1137 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1138 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1139 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1140 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1141 1142 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 1143 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1144 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1145 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1146 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1147 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1148 1149 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 1150 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1151 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1152 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1153 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1154 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1155 }; 1156 1157 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1158 // 256-bit wide vectors. 1159 1160 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1161 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1162 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1163 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1164 1165 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 1166 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 1167 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 1168 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 1169 1170 // v16i1 -> v16i32 - load + broadcast 1171 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1172 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1173 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1174 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1175 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1176 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1177 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1178 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1179 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1180 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1181 1182 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1183 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1184 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1185 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1186 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1187 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1188 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1189 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1190 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1191 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1192 1193 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1194 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1195 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1196 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1197 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1198 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1199 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1200 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1201 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1202 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1203 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1204 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1205 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1206 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1207 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1208 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1209 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1210 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1211 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1212 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1213 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1214 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 1215 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1216 1217 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1218 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1219 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1220 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 }, 1221 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 }, 1222 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1223 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 }, 1224 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 }, 1225 }; 1226 1227 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1228 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1229 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1230 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1231 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1232 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1233 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1234 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1235 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1236 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1237 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1238 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1239 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1240 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1241 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1242 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1243 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1244 1245 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1246 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1247 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1248 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1249 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1250 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1251 1252 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1253 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1254 1255 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1256 }; 1257 1258 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1259 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1260 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1261 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1262 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1263 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1264 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1265 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1266 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1267 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1268 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1269 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1270 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1271 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1272 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1273 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1274 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1275 1276 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1277 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1278 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1279 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1280 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1281 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1282 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1283 1284 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1285 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1286 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1287 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1288 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1289 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1290 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1291 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1292 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1293 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1294 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1295 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1296 1297 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1298 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1299 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1300 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1301 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1302 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1303 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1304 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1305 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1306 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1307 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1308 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1309 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1310 // The generic code to compute the scalar overhead is currently broken. 1311 // Workaround this limitation by estimating the scalarization overhead 1312 // here. We have roughly 10 instructions per scalar element. 1313 // Multiply that by the vector width. 1314 // FIXME: remove that when PR19268 is fixed. 1315 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 1316 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 1317 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1318 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1319 1320 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1321 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1322 // This node is expanded into scalarized operations but BasicTTI is overly 1323 // optimistic estimating its cost. It computes 3 per element (one 1324 // vector-extract, one scalar conversion and one vector-insert). The 1325 // problem is that the inserts form a read-modify-write chain so latency 1326 // should be factored in too. Inflating the cost per element by 1. 1327 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1328 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1329 1330 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1331 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1332 }; 1333 1334 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1335 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1336 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1337 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1338 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1339 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1340 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1341 1342 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1343 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1344 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1345 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1346 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1347 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1348 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1349 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1350 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1351 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1352 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1353 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1354 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1355 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1356 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1357 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1358 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1359 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1360 1361 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1362 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1363 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1364 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1365 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1366 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1367 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1368 1369 }; 1370 1371 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1372 // These are somewhat magic numbers justified by looking at the output of 1373 // Intel's IACA, running some kernels and making sure when we take 1374 // legalization into account the throughput will be overestimated. 1375 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1376 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1377 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1378 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1379 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1380 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1381 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1382 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1383 1384 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1385 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1386 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1387 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1388 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1389 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1390 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1391 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1392 1393 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1394 1395 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1396 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1397 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1398 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1399 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1400 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1401 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1402 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1403 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1404 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1405 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1406 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1407 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1408 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1409 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1410 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1411 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1412 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1413 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1414 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1415 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1416 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1417 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1418 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1419 1420 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1421 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1422 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1423 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1424 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1425 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1426 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1427 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1428 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1429 }; 1430 1431 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1432 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1433 1434 if (ST->hasSSE2() && !ST->hasAVX()) { 1435 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1436 LTDest.second, LTSrc.second)) 1437 return LTSrc.first * Entry->Cost; 1438 } 1439 1440 EVT SrcTy = TLI->getValueType(DL, Src); 1441 EVT DstTy = TLI->getValueType(DL, Dst); 1442 1443 // The function getSimpleVT only handles simple value types. 1444 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1445 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1446 1447 if (ST->hasDQI()) 1448 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1449 DstTy.getSimpleVT(), 1450 SrcTy.getSimpleVT())) 1451 return Entry->Cost; 1452 1453 if (ST->hasAVX512()) 1454 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1455 DstTy.getSimpleVT(), 1456 SrcTy.getSimpleVT())) 1457 return Entry->Cost; 1458 1459 if (ST->hasAVX2()) { 1460 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1461 DstTy.getSimpleVT(), 1462 SrcTy.getSimpleVT())) 1463 return Entry->Cost; 1464 } 1465 1466 if (ST->hasAVX()) { 1467 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1468 DstTy.getSimpleVT(), 1469 SrcTy.getSimpleVT())) 1470 return Entry->Cost; 1471 } 1472 1473 if (ST->hasSSE41()) { 1474 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1475 DstTy.getSimpleVT(), 1476 SrcTy.getSimpleVT())) 1477 return Entry->Cost; 1478 } 1479 1480 if (ST->hasSSE2()) { 1481 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1482 DstTy.getSimpleVT(), 1483 SrcTy.getSimpleVT())) 1484 return Entry->Cost; 1485 } 1486 1487 return BaseT::getCastInstrCost(Opcode, Dst, Src, I); 1488 } 1489 1490 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 1491 const Instruction *I) { 1492 // Legalize the type. 1493 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1494 1495 MVT MTy = LT.second; 1496 1497 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1498 assert(ISD && "Invalid opcode"); 1499 1500 static const CostTblEntry SSE2CostTbl[] = { 1501 { ISD::SETCC, MVT::v2i64, 8 }, 1502 { ISD::SETCC, MVT::v4i32, 1 }, 1503 { ISD::SETCC, MVT::v8i16, 1 }, 1504 { ISD::SETCC, MVT::v16i8, 1 }, 1505 }; 1506 1507 static const CostTblEntry SSE42CostTbl[] = { 1508 { ISD::SETCC, MVT::v2f64, 1 }, 1509 { ISD::SETCC, MVT::v4f32, 1 }, 1510 { ISD::SETCC, MVT::v2i64, 1 }, 1511 }; 1512 1513 static const CostTblEntry AVX1CostTbl[] = { 1514 { ISD::SETCC, MVT::v4f64, 1 }, 1515 { ISD::SETCC, MVT::v8f32, 1 }, 1516 // AVX1 does not support 8-wide integer compare. 1517 { ISD::SETCC, MVT::v4i64, 4 }, 1518 { ISD::SETCC, MVT::v8i32, 4 }, 1519 { ISD::SETCC, MVT::v16i16, 4 }, 1520 { ISD::SETCC, MVT::v32i8, 4 }, 1521 }; 1522 1523 static const CostTblEntry AVX2CostTbl[] = { 1524 { ISD::SETCC, MVT::v4i64, 1 }, 1525 { ISD::SETCC, MVT::v8i32, 1 }, 1526 { ISD::SETCC, MVT::v16i16, 1 }, 1527 { ISD::SETCC, MVT::v32i8, 1 }, 1528 }; 1529 1530 static const CostTblEntry AVX512CostTbl[] = { 1531 { ISD::SETCC, MVT::v8i64, 1 }, 1532 { ISD::SETCC, MVT::v16i32, 1 }, 1533 { ISD::SETCC, MVT::v8f64, 1 }, 1534 { ISD::SETCC, MVT::v16f32, 1 }, 1535 }; 1536 1537 static const CostTblEntry AVX512BWCostTbl[] = { 1538 { ISD::SETCC, MVT::v32i16, 1 }, 1539 { ISD::SETCC, MVT::v64i8, 1 }, 1540 }; 1541 1542 if (ST->hasBWI()) 1543 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1544 return LT.first * Entry->Cost; 1545 1546 if (ST->hasAVX512()) 1547 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1548 return LT.first * Entry->Cost; 1549 1550 if (ST->hasAVX2()) 1551 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1552 return LT.first * Entry->Cost; 1553 1554 if (ST->hasAVX()) 1555 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1556 return LT.first * Entry->Cost; 1557 1558 if (ST->hasSSE42()) 1559 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1560 return LT.first * Entry->Cost; 1561 1562 if (ST->hasSSE2()) 1563 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1564 return LT.first * Entry->Cost; 1565 1566 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 1567 } 1568 1569 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 1570 1571 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1572 ArrayRef<Type *> Tys, FastMathFlags FMF, 1573 unsigned ScalarizationCostPassed) { 1574 // Costs should match the codegen from: 1575 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1576 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1577 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1578 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1579 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1580 static const CostTblEntry AVX512CDCostTbl[] = { 1581 { ISD::CTLZ, MVT::v8i64, 1 }, 1582 { ISD::CTLZ, MVT::v16i32, 1 }, 1583 { ISD::CTLZ, MVT::v32i16, 8 }, 1584 { ISD::CTLZ, MVT::v64i8, 20 }, 1585 { ISD::CTLZ, MVT::v4i64, 1 }, 1586 { ISD::CTLZ, MVT::v8i32, 1 }, 1587 { ISD::CTLZ, MVT::v16i16, 4 }, 1588 { ISD::CTLZ, MVT::v32i8, 10 }, 1589 { ISD::CTLZ, MVT::v2i64, 1 }, 1590 { ISD::CTLZ, MVT::v4i32, 1 }, 1591 { ISD::CTLZ, MVT::v8i16, 4 }, 1592 { ISD::CTLZ, MVT::v16i8, 4 }, 1593 }; 1594 static const CostTblEntry AVX512BWCostTbl[] = { 1595 { ISD::BITREVERSE, MVT::v8i64, 5 }, 1596 { ISD::BITREVERSE, MVT::v16i32, 5 }, 1597 { ISD::BITREVERSE, MVT::v32i16, 5 }, 1598 { ISD::BITREVERSE, MVT::v64i8, 5 }, 1599 { ISD::CTLZ, MVT::v8i64, 23 }, 1600 { ISD::CTLZ, MVT::v16i32, 22 }, 1601 { ISD::CTLZ, MVT::v32i16, 18 }, 1602 { ISD::CTLZ, MVT::v64i8, 17 }, 1603 { ISD::CTPOP, MVT::v8i64, 7 }, 1604 { ISD::CTPOP, MVT::v16i32, 11 }, 1605 { ISD::CTPOP, MVT::v32i16, 9 }, 1606 { ISD::CTPOP, MVT::v64i8, 6 }, 1607 { ISD::CTTZ, MVT::v8i64, 10 }, 1608 { ISD::CTTZ, MVT::v16i32, 14 }, 1609 { ISD::CTTZ, MVT::v32i16, 12 }, 1610 { ISD::CTTZ, MVT::v64i8, 9 }, 1611 }; 1612 static const CostTblEntry AVX512CostTbl[] = { 1613 { ISD::BITREVERSE, MVT::v8i64, 36 }, 1614 { ISD::BITREVERSE, MVT::v16i32, 24 }, 1615 { ISD::CTLZ, MVT::v8i64, 29 }, 1616 { ISD::CTLZ, MVT::v16i32, 35 }, 1617 { ISD::CTPOP, MVT::v8i64, 16 }, 1618 { ISD::CTPOP, MVT::v16i32, 24 }, 1619 { ISD::CTTZ, MVT::v8i64, 20 }, 1620 { ISD::CTTZ, MVT::v16i32, 28 }, 1621 }; 1622 static const CostTblEntry XOPCostTbl[] = { 1623 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1624 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1625 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1626 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1627 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1628 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1629 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1630 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1631 { ISD::BITREVERSE, MVT::i64, 3 }, 1632 { ISD::BITREVERSE, MVT::i32, 3 }, 1633 { ISD::BITREVERSE, MVT::i16, 3 }, 1634 { ISD::BITREVERSE, MVT::i8, 3 } 1635 }; 1636 static const CostTblEntry AVX2CostTbl[] = { 1637 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1638 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1639 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1640 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1641 { ISD::BSWAP, MVT::v4i64, 1 }, 1642 { ISD::BSWAP, MVT::v8i32, 1 }, 1643 { ISD::BSWAP, MVT::v16i16, 1 }, 1644 { ISD::CTLZ, MVT::v4i64, 23 }, 1645 { ISD::CTLZ, MVT::v8i32, 18 }, 1646 { ISD::CTLZ, MVT::v16i16, 14 }, 1647 { ISD::CTLZ, MVT::v32i8, 9 }, 1648 { ISD::CTPOP, MVT::v4i64, 7 }, 1649 { ISD::CTPOP, MVT::v8i32, 11 }, 1650 { ISD::CTPOP, MVT::v16i16, 9 }, 1651 { ISD::CTPOP, MVT::v32i8, 6 }, 1652 { ISD::CTTZ, MVT::v4i64, 10 }, 1653 { ISD::CTTZ, MVT::v8i32, 14 }, 1654 { ISD::CTTZ, MVT::v16i16, 12 }, 1655 { ISD::CTTZ, MVT::v32i8, 9 }, 1656 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1657 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1658 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1659 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1660 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1661 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1662 }; 1663 static const CostTblEntry AVX1CostTbl[] = { 1664 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 1665 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 1666 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 1667 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 1668 { ISD::BSWAP, MVT::v4i64, 4 }, 1669 { ISD::BSWAP, MVT::v8i32, 4 }, 1670 { ISD::BSWAP, MVT::v16i16, 4 }, 1671 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 1672 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 1673 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 1674 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1675 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 1676 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 1677 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 1678 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 1679 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 1680 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 1681 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 1682 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1683 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1684 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1685 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1686 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1687 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1688 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1689 }; 1690 static const CostTblEntry GLMCostTbl[] = { 1691 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 1692 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 1693 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 1694 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 1695 }; 1696 static const CostTblEntry SLMCostTbl[] = { 1697 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 1698 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 1699 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 1700 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 1701 }; 1702 static const CostTblEntry SSE42CostTbl[] = { 1703 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1704 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1705 }; 1706 static const CostTblEntry SSSE3CostTbl[] = { 1707 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1708 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1709 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1710 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1711 { ISD::BSWAP, MVT::v2i64, 1 }, 1712 { ISD::BSWAP, MVT::v4i32, 1 }, 1713 { ISD::BSWAP, MVT::v8i16, 1 }, 1714 { ISD::CTLZ, MVT::v2i64, 23 }, 1715 { ISD::CTLZ, MVT::v4i32, 18 }, 1716 { ISD::CTLZ, MVT::v8i16, 14 }, 1717 { ISD::CTLZ, MVT::v16i8, 9 }, 1718 { ISD::CTPOP, MVT::v2i64, 7 }, 1719 { ISD::CTPOP, MVT::v4i32, 11 }, 1720 { ISD::CTPOP, MVT::v8i16, 9 }, 1721 { ISD::CTPOP, MVT::v16i8, 6 }, 1722 { ISD::CTTZ, MVT::v2i64, 10 }, 1723 { ISD::CTTZ, MVT::v4i32, 14 }, 1724 { ISD::CTTZ, MVT::v8i16, 12 }, 1725 { ISD::CTTZ, MVT::v16i8, 9 } 1726 }; 1727 static const CostTblEntry SSE2CostTbl[] = { 1728 { ISD::BITREVERSE, MVT::v2i64, 29 }, 1729 { ISD::BITREVERSE, MVT::v4i32, 27 }, 1730 { ISD::BITREVERSE, MVT::v8i16, 27 }, 1731 { ISD::BITREVERSE, MVT::v16i8, 20 }, 1732 { ISD::BSWAP, MVT::v2i64, 7 }, 1733 { ISD::BSWAP, MVT::v4i32, 7 }, 1734 { ISD::BSWAP, MVT::v8i16, 7 }, 1735 { ISD::CTLZ, MVT::v2i64, 25 }, 1736 { ISD::CTLZ, MVT::v4i32, 26 }, 1737 { ISD::CTLZ, MVT::v8i16, 20 }, 1738 { ISD::CTLZ, MVT::v16i8, 17 }, 1739 { ISD::CTPOP, MVT::v2i64, 12 }, 1740 { ISD::CTPOP, MVT::v4i32, 15 }, 1741 { ISD::CTPOP, MVT::v8i16, 13 }, 1742 { ISD::CTPOP, MVT::v16i8, 10 }, 1743 { ISD::CTTZ, MVT::v2i64, 14 }, 1744 { ISD::CTTZ, MVT::v4i32, 18 }, 1745 { ISD::CTTZ, MVT::v8i16, 16 }, 1746 { ISD::CTTZ, MVT::v16i8, 13 }, 1747 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1748 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1749 }; 1750 static const CostTblEntry SSE1CostTbl[] = { 1751 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1752 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1753 }; 1754 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1755 { ISD::BITREVERSE, MVT::i64, 14 } 1756 }; 1757 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1758 { ISD::BITREVERSE, MVT::i32, 14 }, 1759 { ISD::BITREVERSE, MVT::i16, 14 }, 1760 { ISD::BITREVERSE, MVT::i8, 11 } 1761 }; 1762 1763 unsigned ISD = ISD::DELETED_NODE; 1764 switch (IID) { 1765 default: 1766 break; 1767 case Intrinsic::bitreverse: 1768 ISD = ISD::BITREVERSE; 1769 break; 1770 case Intrinsic::bswap: 1771 ISD = ISD::BSWAP; 1772 break; 1773 case Intrinsic::ctlz: 1774 ISD = ISD::CTLZ; 1775 break; 1776 case Intrinsic::ctpop: 1777 ISD = ISD::CTPOP; 1778 break; 1779 case Intrinsic::cttz: 1780 ISD = ISD::CTTZ; 1781 break; 1782 case Intrinsic::sqrt: 1783 ISD = ISD::FSQRT; 1784 break; 1785 } 1786 1787 // Legalize the type. 1788 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1789 MVT MTy = LT.second; 1790 1791 // Attempt to lookup cost. 1792 if (ST->isGLM()) 1793 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 1794 return LT.first * Entry->Cost; 1795 1796 if (ST->isSLM()) 1797 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 1798 return LT.first * Entry->Cost; 1799 1800 if (ST->hasCDI()) 1801 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 1802 return LT.first * Entry->Cost; 1803 1804 if (ST->hasBWI()) 1805 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1806 return LT.first * Entry->Cost; 1807 1808 if (ST->hasAVX512()) 1809 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1810 return LT.first * Entry->Cost; 1811 1812 if (ST->hasXOP()) 1813 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1814 return LT.first * Entry->Cost; 1815 1816 if (ST->hasAVX2()) 1817 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1818 return LT.first * Entry->Cost; 1819 1820 if (ST->hasAVX()) 1821 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1822 return LT.first * Entry->Cost; 1823 1824 if (ST->hasSSE42()) 1825 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1826 return LT.first * Entry->Cost; 1827 1828 if (ST->hasSSSE3()) 1829 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1830 return LT.first * Entry->Cost; 1831 1832 if (ST->hasSSE2()) 1833 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1834 return LT.first * Entry->Cost; 1835 1836 if (ST->hasSSE1()) 1837 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1838 return LT.first * Entry->Cost; 1839 1840 if (ST->is64Bit()) 1841 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 1842 return LT.first * Entry->Cost; 1843 1844 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 1845 return LT.first * Entry->Cost; 1846 1847 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed); 1848 } 1849 1850 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1851 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 1852 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF); 1853 } 1854 1855 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1856 assert(Val->isVectorTy() && "This must be a vector type"); 1857 1858 Type *ScalarType = Val->getScalarType(); 1859 1860 if (Index != -1U) { 1861 // Legalize the type. 1862 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1863 1864 // This type is legalized to a scalar type. 1865 if (!LT.second.isVector()) 1866 return 0; 1867 1868 // The type may be split. Normalize the index to the new type. 1869 unsigned Width = LT.second.getVectorNumElements(); 1870 Index = Index % Width; 1871 1872 // Floating point scalars are already located in index #0. 1873 if (ScalarType->isFloatingPointTy() && Index == 0) 1874 return 0; 1875 } 1876 1877 // Add to the base cost if we know that the extracted element of a vector is 1878 // destined to be moved to and used in the integer register file. 1879 int RegisterFileMoveCost = 0; 1880 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1881 RegisterFileMoveCost = 1; 1882 1883 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1884 } 1885 1886 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1887 unsigned AddressSpace, const Instruction *I) { 1888 // Handle non-power-of-two vectors such as <3 x float> 1889 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1890 unsigned NumElem = VTy->getVectorNumElements(); 1891 1892 // Handle a few common cases: 1893 // <3 x float> 1894 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1895 // Cost = 64 bit store + extract + 32 bit store. 1896 return 3; 1897 1898 // <3 x double> 1899 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1900 // Cost = 128 bit store + unpack + 64 bit store. 1901 return 3; 1902 1903 // Assume that all other non-power-of-two numbers are scalarized. 1904 if (!isPowerOf2_32(NumElem)) { 1905 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1906 AddressSpace); 1907 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1908 Opcode == Instruction::Store); 1909 return NumElem * Cost + SplitCost; 1910 } 1911 } 1912 1913 // Legalize the type. 1914 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1915 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1916 "Invalid Opcode"); 1917 1918 // Each load/store unit costs 1. 1919 int Cost = LT.first * 1; 1920 1921 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1922 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1923 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1924 Cost *= 2; 1925 1926 return Cost; 1927 } 1928 1929 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1930 unsigned Alignment, 1931 unsigned AddressSpace) { 1932 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1933 if (!SrcVTy) 1934 // To calculate scalar take the regular cost, without mask 1935 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1936 1937 unsigned NumElem = SrcVTy->getVectorNumElements(); 1938 VectorType *MaskTy = 1939 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1940 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1941 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1942 !isPowerOf2_32(NumElem)) { 1943 // Scalarization 1944 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1945 int ScalarCompareCost = getCmpSelInstrCost( 1946 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1947 int BranchCost = getCFInstrCost(Instruction::Br); 1948 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1949 1950 int ValueSplitCost = getScalarizationOverhead( 1951 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1952 int MemopCost = 1953 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1954 Alignment, AddressSpace); 1955 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1956 } 1957 1958 // Legalize the type. 1959 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1960 auto VT = TLI->getValueType(DL, SrcVTy); 1961 int Cost = 0; 1962 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1963 LT.second.getVectorNumElements() == NumElem) 1964 // Promotion requires expand/truncate for data and a shuffle for mask. 1965 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1966 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1967 1968 else if (LT.second.getVectorNumElements() > NumElem) { 1969 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1970 LT.second.getVectorNumElements()); 1971 // Expanding requires fill mask with zeroes 1972 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1973 } 1974 if (!ST->hasAVX512()) 1975 return Cost + LT.first*4; // Each maskmov costs 4 1976 1977 // AVX-512 masked load/store is cheapper 1978 return Cost+LT.first; 1979 } 1980 1981 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 1982 const SCEV *Ptr) { 1983 // Address computations in vectorized code with non-consecutive addresses will 1984 // likely result in more instructions compared to scalar code where the 1985 // computation can more often be merged into the index mode. The resulting 1986 // extra micro-ops can significantly decrease throughput. 1987 unsigned NumVectorInstToHideOverhead = 10; 1988 1989 // Cost modeling of Strided Access Computation is hidden by the indexing 1990 // modes of X86 regardless of the stride value. We dont believe that there 1991 // is a difference between constant strided access in gerenal and constant 1992 // strided value which is less than or equal to 64. 1993 // Even in the case of (loop invariant) stride whose value is not known at 1994 // compile time, the address computation will not incur more than one extra 1995 // ADD instruction. 1996 if (Ty->isVectorTy() && SE) { 1997 if (!BaseT::isStridedAccess(Ptr)) 1998 return NumVectorInstToHideOverhead; 1999 if (!BaseT::getConstantStrideStep(SE, Ptr)) 2000 return 1; 2001 } 2002 2003 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 2004 } 2005 2006 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy, 2007 bool IsPairwise) { 2008 2009 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2010 2011 MVT MTy = LT.second; 2012 2013 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2014 assert(ISD && "Invalid opcode"); 2015 2016 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 2017 // and make it as the cost. 2018 2019 static const CostTblEntry SSE42CostTblPairWise[] = { 2020 { ISD::FADD, MVT::v2f64, 2 }, 2021 { ISD::FADD, MVT::v4f32, 4 }, 2022 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2023 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 2024 { ISD::ADD, MVT::v8i16, 5 }, 2025 }; 2026 2027 static const CostTblEntry AVX1CostTblPairWise[] = { 2028 { ISD::FADD, MVT::v4f32, 4 }, 2029 { ISD::FADD, MVT::v4f64, 5 }, 2030 { ISD::FADD, MVT::v8f32, 7 }, 2031 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2032 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 2033 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 2034 { ISD::ADD, MVT::v8i16, 5 }, 2035 { ISD::ADD, MVT::v8i32, 5 }, 2036 }; 2037 2038 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2039 { ISD::FADD, MVT::v2f64, 2 }, 2040 { ISD::FADD, MVT::v4f32, 4 }, 2041 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2042 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 2043 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 2044 }; 2045 2046 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2047 { ISD::FADD, MVT::v4f32, 3 }, 2048 { ISD::FADD, MVT::v4f64, 3 }, 2049 { ISD::FADD, MVT::v8f32, 4 }, 2050 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2051 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 2052 { ISD::ADD, MVT::v4i64, 3 }, 2053 { ISD::ADD, MVT::v8i16, 4 }, 2054 { ISD::ADD, MVT::v8i32, 5 }, 2055 }; 2056 2057 if (IsPairwise) { 2058 if (ST->hasAVX()) 2059 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2060 return LT.first * Entry->Cost; 2061 2062 if (ST->hasSSE42()) 2063 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2064 return LT.first * Entry->Cost; 2065 } else { 2066 if (ST->hasAVX()) 2067 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2068 return LT.first * Entry->Cost; 2069 2070 if (ST->hasSSE42()) 2071 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2072 return LT.first * Entry->Cost; 2073 } 2074 2075 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise); 2076 } 2077 2078 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy, 2079 bool IsPairwise, bool IsUnsigned) { 2080 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2081 2082 MVT MTy = LT.second; 2083 2084 int ISD; 2085 if (ValTy->isIntOrIntVectorTy()) { 2086 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 2087 } else { 2088 assert(ValTy->isFPOrFPVectorTy() && 2089 "Expected float point or integer vector type."); 2090 ISD = ISD::FMINNUM; 2091 } 2092 2093 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 2094 // and make it as the cost. 2095 2096 static const CostTblEntry SSE42CostTblPairWise[] = { 2097 {ISD::FMINNUM, MVT::v2f64, 3}, 2098 {ISD::FMINNUM, MVT::v4f32, 2}, 2099 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2100 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6" 2101 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2102 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2103 {ISD::SMIN, MVT::v8i16, 2}, 2104 {ISD::UMIN, MVT::v8i16, 2}, 2105 }; 2106 2107 static const CostTblEntry AVX1CostTblPairWise[] = { 2108 {ISD::FMINNUM, MVT::v4f32, 1}, 2109 {ISD::FMINNUM, MVT::v4f64, 1}, 2110 {ISD::FMINNUM, MVT::v8f32, 2}, 2111 {ISD::SMIN, MVT::v2i64, 3}, 2112 {ISD::UMIN, MVT::v2i64, 3}, 2113 {ISD::SMIN, MVT::v4i32, 1}, 2114 {ISD::UMIN, MVT::v4i32, 1}, 2115 {ISD::SMIN, MVT::v8i16, 1}, 2116 {ISD::UMIN, MVT::v8i16, 1}, 2117 {ISD::SMIN, MVT::v8i32, 3}, 2118 {ISD::UMIN, MVT::v8i32, 3}, 2119 }; 2120 2121 static const CostTblEntry AVX2CostTblPairWise[] = { 2122 {ISD::SMIN, MVT::v4i64, 2}, 2123 {ISD::UMIN, MVT::v4i64, 2}, 2124 {ISD::SMIN, MVT::v8i32, 1}, 2125 {ISD::UMIN, MVT::v8i32, 1}, 2126 {ISD::SMIN, MVT::v16i16, 1}, 2127 {ISD::UMIN, MVT::v16i16, 1}, 2128 {ISD::SMIN, MVT::v32i8, 2}, 2129 {ISD::UMIN, MVT::v32i8, 2}, 2130 }; 2131 2132 static const CostTblEntry AVX512CostTblPairWise[] = { 2133 {ISD::FMINNUM, MVT::v8f64, 1}, 2134 {ISD::FMINNUM, MVT::v16f32, 2}, 2135 {ISD::SMIN, MVT::v8i64, 2}, 2136 {ISD::UMIN, MVT::v8i64, 2}, 2137 {ISD::SMIN, MVT::v16i32, 1}, 2138 {ISD::UMIN, MVT::v16i32, 1}, 2139 }; 2140 2141 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2142 {ISD::FMINNUM, MVT::v2f64, 3}, 2143 {ISD::FMINNUM, MVT::v4f32, 3}, 2144 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2145 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6" 2146 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2147 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2148 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5" 2149 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8" 2150 }; 2151 2152 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2153 {ISD::FMINNUM, MVT::v4f32, 1}, 2154 {ISD::FMINNUM, MVT::v4f64, 1}, 2155 {ISD::FMINNUM, MVT::v8f32, 1}, 2156 {ISD::SMIN, MVT::v2i64, 3}, 2157 {ISD::UMIN, MVT::v2i64, 3}, 2158 {ISD::SMIN, MVT::v4i32, 1}, 2159 {ISD::UMIN, MVT::v4i32, 1}, 2160 {ISD::SMIN, MVT::v8i16, 1}, 2161 {ISD::UMIN, MVT::v8i16, 1}, 2162 {ISD::SMIN, MVT::v8i32, 2}, 2163 {ISD::UMIN, MVT::v8i32, 2}, 2164 }; 2165 2166 static const CostTblEntry AVX2CostTblNoPairWise[] = { 2167 {ISD::SMIN, MVT::v4i64, 1}, 2168 {ISD::UMIN, MVT::v4i64, 1}, 2169 {ISD::SMIN, MVT::v8i32, 1}, 2170 {ISD::UMIN, MVT::v8i32, 1}, 2171 {ISD::SMIN, MVT::v16i16, 1}, 2172 {ISD::UMIN, MVT::v16i16, 1}, 2173 {ISD::SMIN, MVT::v32i8, 1}, 2174 {ISD::UMIN, MVT::v32i8, 1}, 2175 }; 2176 2177 static const CostTblEntry AVX512CostTblNoPairWise[] = { 2178 {ISD::FMINNUM, MVT::v8f64, 1}, 2179 {ISD::FMINNUM, MVT::v16f32, 2}, 2180 {ISD::SMIN, MVT::v8i64, 1}, 2181 {ISD::UMIN, MVT::v8i64, 1}, 2182 {ISD::SMIN, MVT::v16i32, 1}, 2183 {ISD::UMIN, MVT::v16i32, 1}, 2184 }; 2185 2186 if (IsPairwise) { 2187 if (ST->hasAVX512()) 2188 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy)) 2189 return LT.first * Entry->Cost; 2190 2191 if (ST->hasAVX2()) 2192 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy)) 2193 return LT.first * Entry->Cost; 2194 2195 if (ST->hasAVX()) 2196 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2197 return LT.first * Entry->Cost; 2198 2199 if (ST->hasSSE42()) 2200 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2201 return LT.first * Entry->Cost; 2202 } else { 2203 if (ST->hasAVX512()) 2204 if (const auto *Entry = 2205 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy)) 2206 return LT.first * Entry->Cost; 2207 2208 if (ST->hasAVX2()) 2209 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy)) 2210 return LT.first * Entry->Cost; 2211 2212 if (ST->hasAVX()) 2213 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2214 return LT.first * Entry->Cost; 2215 2216 if (ST->hasSSE42()) 2217 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2218 return LT.first * Entry->Cost; 2219 } 2220 2221 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned); 2222 } 2223 2224 /// \brief Calculate the cost of materializing a 64-bit value. This helper 2225 /// method might only calculate a fraction of a larger immediate. Therefore it 2226 /// is valid to return a cost of ZERO. 2227 int X86TTIImpl::getIntImmCost(int64_t Val) { 2228 if (Val == 0) 2229 return TTI::TCC_Free; 2230 2231 if (isInt<32>(Val)) 2232 return TTI::TCC_Basic; 2233 2234 return 2 * TTI::TCC_Basic; 2235 } 2236 2237 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 2238 assert(Ty->isIntegerTy()); 2239 2240 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2241 if (BitSize == 0) 2242 return ~0U; 2243 2244 // Never hoist constants larger than 128bit, because this might lead to 2245 // incorrect code generation or assertions in codegen. 2246 // Fixme: Create a cost model for types larger than i128 once the codegen 2247 // issues have been fixed. 2248 if (BitSize > 128) 2249 return TTI::TCC_Free; 2250 2251 if (Imm == 0) 2252 return TTI::TCC_Free; 2253 2254 // Sign-extend all constants to a multiple of 64-bit. 2255 APInt ImmVal = Imm; 2256 if (BitSize & 0x3f) 2257 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 2258 2259 // Split the constant into 64-bit chunks and calculate the cost for each 2260 // chunk. 2261 int Cost = 0; 2262 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 2263 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 2264 int64_t Val = Tmp.getSExtValue(); 2265 Cost += getIntImmCost(Val); 2266 } 2267 // We need at least one instruction to materialize the constant. 2268 return std::max(1, Cost); 2269 } 2270 2271 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 2272 Type *Ty) { 2273 assert(Ty->isIntegerTy()); 2274 2275 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2276 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2277 // here, so that constant hoisting will ignore this constant. 2278 if (BitSize == 0) 2279 return TTI::TCC_Free; 2280 2281 unsigned ImmIdx = ~0U; 2282 switch (Opcode) { 2283 default: 2284 return TTI::TCC_Free; 2285 case Instruction::GetElementPtr: 2286 // Always hoist the base address of a GetElementPtr. This prevents the 2287 // creation of new constants for every base constant that gets constant 2288 // folded with the offset. 2289 if (Idx == 0) 2290 return 2 * TTI::TCC_Basic; 2291 return TTI::TCC_Free; 2292 case Instruction::Store: 2293 ImmIdx = 0; 2294 break; 2295 case Instruction::ICmp: 2296 // This is an imperfect hack to prevent constant hoisting of 2297 // compares that might be trying to check if a 64-bit value fits in 2298 // 32-bits. The backend can optimize these cases using a right shift by 32. 2299 // Ideally we would check the compare predicate here. There also other 2300 // similar immediates the backend can use shifts for. 2301 if (Idx == 1 && Imm.getBitWidth() == 64) { 2302 uint64_t ImmVal = Imm.getZExtValue(); 2303 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 2304 return TTI::TCC_Free; 2305 } 2306 ImmIdx = 1; 2307 break; 2308 case Instruction::And: 2309 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 2310 // by using a 32-bit operation with implicit zero extension. Detect such 2311 // immediates here as the normal path expects bit 31 to be sign extended. 2312 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 2313 return TTI::TCC_Free; 2314 LLVM_FALLTHROUGH; 2315 case Instruction::Add: 2316 case Instruction::Sub: 2317 case Instruction::Mul: 2318 case Instruction::UDiv: 2319 case Instruction::SDiv: 2320 case Instruction::URem: 2321 case Instruction::SRem: 2322 case Instruction::Or: 2323 case Instruction::Xor: 2324 ImmIdx = 1; 2325 break; 2326 // Always return TCC_Free for the shift value of a shift instruction. 2327 case Instruction::Shl: 2328 case Instruction::LShr: 2329 case Instruction::AShr: 2330 if (Idx == 1) 2331 return TTI::TCC_Free; 2332 break; 2333 case Instruction::Trunc: 2334 case Instruction::ZExt: 2335 case Instruction::SExt: 2336 case Instruction::IntToPtr: 2337 case Instruction::PtrToInt: 2338 case Instruction::BitCast: 2339 case Instruction::PHI: 2340 case Instruction::Call: 2341 case Instruction::Select: 2342 case Instruction::Ret: 2343 case Instruction::Load: 2344 break; 2345 } 2346 2347 if (Idx == ImmIdx) { 2348 int NumConstants = (BitSize + 63) / 64; 2349 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 2350 return (Cost <= NumConstants * TTI::TCC_Basic) 2351 ? static_cast<int>(TTI::TCC_Free) 2352 : Cost; 2353 } 2354 2355 return X86TTIImpl::getIntImmCost(Imm, Ty); 2356 } 2357 2358 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 2359 Type *Ty) { 2360 assert(Ty->isIntegerTy()); 2361 2362 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2363 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2364 // here, so that constant hoisting will ignore this constant. 2365 if (BitSize == 0) 2366 return TTI::TCC_Free; 2367 2368 switch (IID) { 2369 default: 2370 return TTI::TCC_Free; 2371 case Intrinsic::sadd_with_overflow: 2372 case Intrinsic::uadd_with_overflow: 2373 case Intrinsic::ssub_with_overflow: 2374 case Intrinsic::usub_with_overflow: 2375 case Intrinsic::smul_with_overflow: 2376 case Intrinsic::umul_with_overflow: 2377 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 2378 return TTI::TCC_Free; 2379 break; 2380 case Intrinsic::experimental_stackmap: 2381 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2382 return TTI::TCC_Free; 2383 break; 2384 case Intrinsic::experimental_patchpoint_void: 2385 case Intrinsic::experimental_patchpoint_i64: 2386 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2387 return TTI::TCC_Free; 2388 break; 2389 } 2390 return X86TTIImpl::getIntImmCost(Imm, Ty); 2391 } 2392 2393 unsigned X86TTIImpl::getUserCost(const User *U, 2394 ArrayRef<const Value *> Operands) { 2395 if (isa<StoreInst>(U)) { 2396 Value *Ptr = U->getOperand(1); 2397 // Store instruction with index and scale costs 2 Uops. 2398 // Check the preceding GEP to identify non-const indices. 2399 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 2400 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 2401 return TTI::TCC_Basic * 2; 2402 } 2403 return TTI::TCC_Basic; 2404 } 2405 return BaseT::getUserCost(U, Operands); 2406 } 2407 2408 // Return an average cost of Gather / Scatter instruction, maybe improved later 2409 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 2410 unsigned Alignment, unsigned AddressSpace) { 2411 2412 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 2413 unsigned VF = SrcVTy->getVectorNumElements(); 2414 2415 // Try to reduce index size from 64 bit (default for GEP) 2416 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 2417 // operation will use 16 x 64 indices which do not fit in a zmm and needs 2418 // to split. Also check that the base pointer is the same for all lanes, 2419 // and that there's at most one variable index. 2420 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 2421 unsigned IndexSize = DL.getPointerSizeInBits(); 2422 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 2423 if (IndexSize < 64 || !GEP) 2424 return IndexSize; 2425 2426 unsigned NumOfVarIndices = 0; 2427 Value *Ptrs = GEP->getPointerOperand(); 2428 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 2429 return IndexSize; 2430 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 2431 if (isa<Constant>(GEP->getOperand(i))) 2432 continue; 2433 Type *IndxTy = GEP->getOperand(i)->getType(); 2434 if (IndxTy->isVectorTy()) 2435 IndxTy = IndxTy->getVectorElementType(); 2436 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 2437 !isa<SExtInst>(GEP->getOperand(i))) || 2438 ++NumOfVarIndices > 1) 2439 return IndexSize; // 64 2440 } 2441 return (unsigned)32; 2442 }; 2443 2444 2445 // Trying to reduce IndexSize to 32 bits for vector 16. 2446 // By default the IndexSize is equal to pointer size. 2447 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 2448 ? getIndexSizeInBits(Ptr, DL) 2449 : DL.getPointerSizeInBits(); 2450 2451 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 2452 IndexSize), VF); 2453 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2454 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2455 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2456 if (SplitFactor > 1) { 2457 // Handle splitting of vector of pointers 2458 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2459 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2460 AddressSpace); 2461 } 2462 2463 // The gather / scatter cost is given by Intel architects. It is a rough 2464 // number since we are looking at one instruction in a time. 2465 const int GSOverhead = (Opcode == Instruction::Load) 2466 ? ST->getGatherOverhead() 2467 : ST->getScatterOverhead(); 2468 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2469 Alignment, AddressSpace); 2470 } 2471 2472 /// Return the cost of full scalarization of gather / scatter operation. 2473 /// 2474 /// Opcode - Load or Store instruction. 2475 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2476 /// VariableMask - The mask is non-constant at compile time. 2477 /// Alignment - Alignment for one element. 2478 /// AddressSpace - pointer[s] address space. 2479 /// 2480 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2481 bool VariableMask, unsigned Alignment, 2482 unsigned AddressSpace) { 2483 unsigned VF = SrcVTy->getVectorNumElements(); 2484 2485 int MaskUnpackCost = 0; 2486 if (VariableMask) { 2487 VectorType *MaskTy = 2488 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2489 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2490 int ScalarCompareCost = 2491 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2492 nullptr); 2493 int BranchCost = getCFInstrCost(Instruction::Br); 2494 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2495 } 2496 2497 // The cost of the scalar loads/stores. 2498 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2499 Alignment, AddressSpace); 2500 2501 int InsertExtractCost = 0; 2502 if (Opcode == Instruction::Load) 2503 for (unsigned i = 0; i < VF; ++i) 2504 // Add the cost of inserting each scalar load into the vector 2505 InsertExtractCost += 2506 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2507 else 2508 for (unsigned i = 0; i < VF; ++i) 2509 // Add the cost of extracting each element out of the data vector 2510 InsertExtractCost += 2511 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2512 2513 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2514 } 2515 2516 /// Calculate the cost of Gather / Scatter operation 2517 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2518 Value *Ptr, bool VariableMask, 2519 unsigned Alignment) { 2520 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2521 unsigned VF = SrcVTy->getVectorNumElements(); 2522 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2523 if (!PtrTy && Ptr->getType()->isVectorTy()) 2524 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2525 assert(PtrTy && "Unexpected type for Ptr argument"); 2526 unsigned AddressSpace = PtrTy->getAddressSpace(); 2527 2528 bool Scalarize = false; 2529 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2530 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2531 Scalarize = true; 2532 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2533 // Vector-4 of gather/scatter instruction does not exist on KNL. 2534 // We can extend it to 8 elements, but zeroing upper bits of 2535 // the mask vector will add more instructions. Right now we give the scalar 2536 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2537 // is better in the VariableMask case. 2538 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) 2539 Scalarize = true; 2540 2541 if (Scalarize) 2542 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2543 AddressSpace); 2544 2545 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2546 } 2547 2548 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 2549 TargetTransformInfo::LSRCost &C2) { 2550 // X86 specific here are "instruction number 1st priority". 2551 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 2552 C1.NumIVMuls, C1.NumBaseAdds, 2553 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 2554 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 2555 C2.NumIVMuls, C2.NumBaseAdds, 2556 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 2557 } 2558 2559 bool X86TTIImpl::canMacroFuseCmp() { 2560 return ST->hasMacroFusion(); 2561 } 2562 2563 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2564 // The backend can't handle a single element vector. 2565 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1) 2566 return false; 2567 Type *ScalarTy = DataTy->getScalarType(); 2568 int DataWidth = isa<PointerType>(ScalarTy) ? 2569 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2570 2571 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2572 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2573 } 2574 2575 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2576 return isLegalMaskedLoad(DataType); 2577 } 2578 2579 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2580 // This function is called now in two cases: from the Loop Vectorizer 2581 // and from the Scalarizer. 2582 // When the Loop Vectorizer asks about legality of the feature, 2583 // the vectorization factor is not calculated yet. The Loop Vectorizer 2584 // sends a scalar type and the decision is based on the width of the 2585 // scalar element. 2586 // Later on, the cost model will estimate usage this intrinsic based on 2587 // the vector type. 2588 // The Scalarizer asks again about legality. It sends a vector type. 2589 // In this case we can reject non-power-of-2 vectors. 2590 // We also reject single element vectors as the type legalizer can't 2591 // scalarize it. 2592 if (isa<VectorType>(DataTy)) { 2593 unsigned NumElts = DataTy->getVectorNumElements(); 2594 if (NumElts == 1 || !isPowerOf2_32(NumElts)) 2595 return false; 2596 } 2597 Type *ScalarTy = DataTy->getScalarType(); 2598 int DataWidth = isa<PointerType>(ScalarTy) ? 2599 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2600 2601 // Some CPUs have better gather performance than others. 2602 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 2603 // enable gather with a -march. 2604 return (DataWidth == 32 || DataWidth == 64) && 2605 (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())); 2606 } 2607 2608 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2609 // AVX2 doesn't support scatter 2610 if (!ST->hasAVX512()) 2611 return false; 2612 return isLegalMaskedGather(DataType); 2613 } 2614 2615 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 2616 EVT VT = TLI->getValueType(DL, DataType); 2617 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 2618 } 2619 2620 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 2621 return false; 2622 } 2623 2624 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2625 const Function *Callee) const { 2626 const TargetMachine &TM = getTLI()->getTargetMachine(); 2627 2628 // Work this as a subsetting of subtarget features. 2629 const FeatureBitset &CallerBits = 2630 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2631 const FeatureBitset &CalleeBits = 2632 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2633 2634 // FIXME: This is likely too limiting as it will include subtarget features 2635 // that we might not care about for inlining, but it is conservatively 2636 // correct. 2637 return (CallerBits & CalleeBits) == CalleeBits; 2638 } 2639 2640 const X86TTIImpl::TTI::MemCmpExpansionOptions * 2641 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const { 2642 // Only enable vector loads for equality comparison. 2643 // Right now the vector version is not as fast, see #33329. 2644 static const auto ThreeWayOptions = [this]() { 2645 TTI::MemCmpExpansionOptions Options; 2646 if (ST->is64Bit()) { 2647 Options.LoadSizes.push_back(8); 2648 } 2649 Options.LoadSizes.push_back(4); 2650 Options.LoadSizes.push_back(2); 2651 Options.LoadSizes.push_back(1); 2652 return Options; 2653 }(); 2654 static const auto EqZeroOptions = [this]() { 2655 TTI::MemCmpExpansionOptions Options; 2656 // TODO: enable AVX512 when the DAG is ready. 2657 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64); 2658 if (ST->hasAVX2()) Options.LoadSizes.push_back(32); 2659 if (ST->hasSSE2()) Options.LoadSizes.push_back(16); 2660 if (ST->is64Bit()) { 2661 Options.LoadSizes.push_back(8); 2662 } 2663 Options.LoadSizes.push_back(4); 2664 Options.LoadSizes.push_back(2); 2665 Options.LoadSizes.push_back(1); 2666 return Options; 2667 }(); 2668 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions; 2669 } 2670 2671 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2672 // TODO: We expect this to be beneficial regardless of arch, 2673 // but there are currently some unexplained performance artifacts on Atom. 2674 // As a temporary solution, disable on Atom. 2675 return !(ST->isAtom()); 2676 } 2677 2678 // Get estimation for interleaved load/store operations for AVX2. 2679 // \p Factor is the interleaved-access factor (stride) - number of 2680 // (interleaved) elements in the group. 2681 // \p Indices contains the indices for a strided load: when the 2682 // interleaved load has gaps they indicate which elements are used. 2683 // If Indices is empty (or if the number of indices is equal to the size 2684 // of the interleaved-access as given in \p Factor) the access has no gaps. 2685 // 2686 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 2687 // computing the cost using a generic formula as a function of generic 2688 // shuffles. We therefore use a lookup table instead, filled according to 2689 // the instruction sequences that codegen currently generates. 2690 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy, 2691 unsigned Factor, 2692 ArrayRef<unsigned> Indices, 2693 unsigned Alignment, 2694 unsigned AddressSpace) { 2695 2696 // We currently Support only fully-interleaved groups, with no gaps. 2697 // TODO: Support also strided loads (interleaved-groups with gaps). 2698 if (Indices.size() && Indices.size() != Factor) 2699 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2700 Alignment, AddressSpace); 2701 2702 // VecTy for interleave memop is <VF*Factor x Elt>. 2703 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2704 // VecTy = <12 x i32>. 2705 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2706 2707 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 2708 // the VF=2, while v2i128 is an unsupported MVT vector type 2709 // (see MachineValueType.h::getVectorVT()). 2710 if (!LegalVT.isVector()) 2711 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2712 Alignment, AddressSpace); 2713 2714 unsigned VF = VecTy->getVectorNumElements() / Factor; 2715 Type *ScalarTy = VecTy->getVectorElementType(); 2716 2717 // Calculate the number of memory operations (NumOfMemOps), required 2718 // for load/store the VecTy. 2719 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2720 unsigned LegalVTSize = LegalVT.getStoreSize(); 2721 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2722 2723 // Get the cost of one memory operation. 2724 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2725 LegalVT.getVectorNumElements()); 2726 unsigned MemOpCost = 2727 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2728 2729 VectorType *VT = VectorType::get(ScalarTy, VF); 2730 EVT ETy = TLI->getValueType(DL, VT); 2731 if (!ETy.isSimple()) 2732 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2733 Alignment, AddressSpace); 2734 2735 // TODO: Complete for other data-types and strides. 2736 // Each combination of Stride, ElementTy and VF results in a different 2737 // sequence; The cost tables are therefore accessed with: 2738 // Factor (stride) and VectorType=VFxElemType. 2739 // The Cost accounts only for the shuffle sequence; 2740 // The cost of the loads/stores is accounted for separately. 2741 // 2742 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 2743 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 2744 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 2745 2746 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 2747 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 2748 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 2749 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 2750 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 2751 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 2752 2753 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 2754 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 2755 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 2756 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 2757 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 2758 2759 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 2760 }; 2761 2762 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 2763 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) 2764 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) 2765 2766 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) 2767 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) 2768 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) 2769 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) 2770 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) 2771 2772 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) 2773 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) 2774 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) 2775 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) 2776 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) 2777 }; 2778 2779 if (Opcode == Instruction::Load) { 2780 if (const auto *Entry = 2781 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 2782 return NumOfMemOps * MemOpCost + Entry->Cost; 2783 } else { 2784 assert(Opcode == Instruction::Store && 2785 "Expected Store Instruction at this point"); 2786 if (const auto *Entry = 2787 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 2788 return NumOfMemOps * MemOpCost + Entry->Cost; 2789 } 2790 2791 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2792 Alignment, AddressSpace); 2793 } 2794 2795 // Get estimation for interleaved load/store operations and strided load. 2796 // \p Indices contains indices for strided load. 2797 // \p Factor - the factor of interleaving. 2798 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2799 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2800 unsigned Factor, 2801 ArrayRef<unsigned> Indices, 2802 unsigned Alignment, 2803 unsigned AddressSpace) { 2804 2805 // VecTy for interleave memop is <VF*Factor x Elt>. 2806 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2807 // VecTy = <12 x i32>. 2808 2809 // Calculate the number of memory operations (NumOfMemOps), required 2810 // for load/store the VecTy. 2811 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2812 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2813 unsigned LegalVTSize = LegalVT.getStoreSize(); 2814 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2815 2816 // Get the cost of one memory operation. 2817 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2818 LegalVT.getVectorNumElements()); 2819 unsigned MemOpCost = 2820 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2821 2822 unsigned VF = VecTy->getVectorNumElements() / Factor; 2823 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 2824 2825 if (Opcode == Instruction::Load) { 2826 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 2827 // contain the cost of the optimized shuffle sequence that the 2828 // X86InterleavedAccess pass will generate. 2829 // The cost of loads and stores are computed separately from the table. 2830 2831 // X86InterleavedAccess support only the following interleaved-access group. 2832 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 2833 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 2834 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 2835 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 2836 }; 2837 2838 if (const auto *Entry = 2839 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 2840 return NumOfMemOps * MemOpCost + Entry->Cost; 2841 //If an entry does not exist, fallback to the default implementation. 2842 2843 // Kind of shuffle depends on number of loaded values. 2844 // If we load the entire data in one register, we can use a 1-src shuffle. 2845 // Otherwise, we'll merge 2 sources in each operation. 2846 TTI::ShuffleKind ShuffleKind = 2847 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2848 2849 unsigned ShuffleCost = 2850 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2851 2852 unsigned NumOfLoadsInInterleaveGrp = 2853 Indices.size() ? Indices.size() : Factor; 2854 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2855 VecTy->getVectorNumElements() / Factor); 2856 unsigned NumOfResults = 2857 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2858 NumOfLoadsInInterleaveGrp; 2859 2860 // About a half of the loads may be folded in shuffles when we have only 2861 // one result. If we have more than one result, we do not fold loads at all. 2862 unsigned NumOfUnfoldedLoads = 2863 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2864 2865 // Get a number of shuffle operations per result. 2866 unsigned NumOfShufflesPerResult = 2867 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2868 2869 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2870 // When we have more than one destination, we need additional instructions 2871 // to keep sources. 2872 unsigned NumOfMoves = 0; 2873 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2874 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2875 2876 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2877 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2878 2879 return Cost; 2880 } 2881 2882 // Store. 2883 assert(Opcode == Instruction::Store && 2884 "Expected Store Instruction at this point"); 2885 // X86InterleavedAccess support only the following interleaved-access group. 2886 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 2887 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 2888 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 2889 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 2890 2891 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 2892 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 2893 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 2894 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 2895 }; 2896 2897 if (const auto *Entry = 2898 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 2899 return NumOfMemOps * MemOpCost + Entry->Cost; 2900 //If an entry does not exist, fallback to the default implementation. 2901 2902 // There is no strided stores meanwhile. And store can't be folded in 2903 // shuffle. 2904 unsigned NumOfSources = Factor; // The number of values to be merged. 2905 unsigned ShuffleCost = 2906 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 2907 unsigned NumOfShufflesPerStore = NumOfSources - 1; 2908 2909 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2910 // We need additional instructions to keep sources. 2911 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 2912 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 2913 NumOfMoves; 2914 return Cost; 2915 } 2916 2917 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 2918 unsigned Factor, 2919 ArrayRef<unsigned> Indices, 2920 unsigned Alignment, 2921 unsigned AddressSpace) { 2922 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 2923 Type *EltTy = VecTy->getVectorElementType(); 2924 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 2925 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 2926 return true; 2927 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 2928 return HasBW; 2929 return false; 2930 }; 2931 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 2932 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 2933 Alignment, AddressSpace); 2934 if (ST->hasAVX2()) 2935 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices, 2936 Alignment, AddressSpace); 2937 2938 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2939 Alignment, AddressSpace); 2940 } 2941