1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/CodeGen/CostTable.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/Support/Debug.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 70 TargetTransformInfo::CacheLevel Level) const { 71 switch (Level) { 72 case TargetTransformInfo::CacheLevel::L1D: 73 // - Penryn 74 // - Nehalem 75 // - Westmere 76 // - Sandy Bridge 77 // - Ivy Bridge 78 // - Haswell 79 // - Broadwell 80 // - Skylake 81 // - Kabylake 82 return 32 * 1024; // 32 KByte 83 case TargetTransformInfo::CacheLevel::L2D: 84 // - Penryn 85 // - Nehalem 86 // - Westmere 87 // - Sandy Bridge 88 // - Ivy Bridge 89 // - Haswell 90 // - Broadwell 91 // - Skylake 92 // - Kabylake 93 return 256 * 1024; // 256 KByte 94 } 95 96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 97 } 98 99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 100 TargetTransformInfo::CacheLevel Level) const { 101 // - Penryn 102 // - Nehalem 103 // - Westmere 104 // - Sandy Bridge 105 // - Ivy Bridge 106 // - Haswell 107 // - Broadwell 108 // - Skylake 109 // - Kabylake 110 switch (Level) { 111 case TargetTransformInfo::CacheLevel::L1D: 112 LLVM_FALLTHROUGH; 113 case TargetTransformInfo::CacheLevel::L2D: 114 return 8; 115 } 116 117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 118 } 119 120 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const { 133 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 134 if (Vector) { 135 if (ST->hasAVX512() && PreferVectorWidth >= 512) 136 return 512; 137 if (ST->hasAVX() && PreferVectorWidth >= 256) 138 return 256; 139 if (ST->hasSSE1() && PreferVectorWidth >= 128) 140 return 128; 141 return 0; 142 } 143 144 if (ST->is64Bit()) 145 return 64; 146 147 return 32; 148 } 149 150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 151 return getRegisterBitWidth(true); 152 } 153 154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 155 // If the loop will not be vectorized, don't interleave the loop. 156 // Let regular unroll to unroll the loop, which saves the overflow 157 // check and memory check cost. 158 if (VF == 1) 159 return 1; 160 161 if (ST->isAtom()) 162 return 1; 163 164 // Sandybridge and Haswell have multiple execution ports and pipelined 165 // vector units. 166 if (ST->hasAVX()) 167 return 4; 168 169 return 2; 170 } 171 172 int X86TTIImpl::getArithmeticInstrCost( 173 unsigned Opcode, Type *Ty, 174 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 175 TTI::OperandValueProperties Opd1PropInfo, 176 TTI::OperandValueProperties Opd2PropInfo, 177 ArrayRef<const Value *> Args) { 178 // Legalize the type. 179 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 180 181 int ISD = TLI->InstructionOpcodeToISD(Opcode); 182 assert(ISD && "Invalid opcode"); 183 184 static const CostTblEntry SLMCostTable[] = { 185 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 186 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 187 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 188 { ISD::FMUL, MVT::f64, 2 }, // mulsd 189 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 190 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 191 { ISD::FDIV, MVT::f32, 17 }, // divss 192 { ISD::FDIV, MVT::v4f32, 39 }, // divps 193 { ISD::FDIV, MVT::f64, 32 }, // divsd 194 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 195 { ISD::FADD, MVT::v2f64, 2 }, // addpd 196 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 197 // v2i64/v4i64 mul is custom lowered as a series of long: 198 // multiplies(3), shifts(3) and adds(2) 199 // slm muldq version throughput is 2 and addq throughput 4 200 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 201 // 3X4 (addq throughput) = 17 202 { ISD::MUL, MVT::v2i64, 17 }, 203 // slm addq\subq throughput is 4 204 { ISD::ADD, MVT::v2i64, 4 }, 205 { ISD::SUB, MVT::v2i64, 4 }, 206 }; 207 208 if (ST->isSLM()) { 209 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 210 // Check if the operands can be shrinked into a smaller datatype. 211 bool Op1Signed = false; 212 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 213 bool Op2Signed = false; 214 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 215 216 bool signedMode = Op1Signed | Op2Signed; 217 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 218 219 if (OpMinSize <= 7) 220 return LT.first * 3; // pmullw/sext 221 if (!signedMode && OpMinSize <= 8) 222 return LT.first * 3; // pmullw/zext 223 if (OpMinSize <= 15) 224 return LT.first * 5; // pmullw/pmulhw/pshuf 225 if (!signedMode && OpMinSize <= 16) 226 return LT.first * 5; // pmullw/pmulhw/pshuf 227 } 228 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 229 LT.second)) { 230 return LT.first * Entry->Cost; 231 } 232 } 233 234 if (ISD == ISD::SDIV && 235 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 236 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 237 // On X86, vector signed division by constants power-of-two are 238 // normally expanded to the sequence SRA + SRL + ADD + SRA. 239 // The OperandValue properties many not be same as that of previous 240 // operation;conservatively assume OP_None. 241 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 242 Op2Info, TargetTransformInfo::OP_None, 243 TargetTransformInfo::OP_None); 244 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 245 TargetTransformInfo::OP_None, 246 TargetTransformInfo::OP_None); 247 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 248 TargetTransformInfo::OP_None, 249 TargetTransformInfo::OP_None); 250 251 return Cost; 252 } 253 254 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 255 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 256 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 257 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 258 259 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 260 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 261 }; 262 263 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 264 ST->hasBWI()) { 265 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 266 LT.second)) 267 return LT.first * Entry->Cost; 268 } 269 270 static const CostTblEntry AVX512UniformConstCostTable[] = { 271 { ISD::SRA, MVT::v2i64, 1 }, 272 { ISD::SRA, MVT::v4i64, 1 }, 273 { ISD::SRA, MVT::v8i64, 1 }, 274 275 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 276 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 277 }; 278 279 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 280 ST->hasAVX512()) { 281 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 282 LT.second)) 283 return LT.first * Entry->Cost; 284 } 285 286 static const CostTblEntry AVX2UniformConstCostTable[] = { 287 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 288 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 289 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 290 291 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 292 293 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 294 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 295 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 296 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 297 }; 298 299 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 300 ST->hasAVX2()) { 301 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 302 LT.second)) 303 return LT.first * Entry->Cost; 304 } 305 306 static const CostTblEntry SSE2UniformConstCostTable[] = { 307 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 308 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 309 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 310 311 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 312 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 313 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 314 315 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 316 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 317 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 318 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 319 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 320 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 321 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 322 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 323 }; 324 325 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 326 ST->hasSSE2()) { 327 // pmuldq sequence. 328 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 329 return LT.first * 32; 330 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 331 return LT.first * 15; 332 333 // XOP has faster vXi8 shifts. 334 if ((ISD != ISD::SHL && ISD != ISD::SRL && ISD != ISD::SRA) || 335 !ST->hasXOP()) 336 if (const auto *Entry = 337 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 338 return LT.first * Entry->Cost; 339 } 340 341 static const CostTblEntry AVX2UniformCostTable[] = { 342 // Uniform splats are cheaper for the following instructions. 343 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 344 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 345 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 346 }; 347 348 if (ST->hasAVX2() && 349 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 350 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 351 if (const auto *Entry = 352 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 353 return LT.first * Entry->Cost; 354 } 355 356 static const CostTblEntry SSE2UniformCostTable[] = { 357 // Uniform splats are cheaper for the following instructions. 358 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 359 { ISD::SHL, MVT::v4i32, 1 }, // pslld 360 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 361 362 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 363 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 364 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 365 366 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 367 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 368 }; 369 370 if (ST->hasSSE2() && 371 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 372 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 373 if (const auto *Entry = 374 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 375 return LT.first * Entry->Cost; 376 } 377 378 static const CostTblEntry AVX512DQCostTable[] = { 379 { ISD::MUL, MVT::v2i64, 1 }, 380 { ISD::MUL, MVT::v4i64, 1 }, 381 { ISD::MUL, MVT::v8i64, 1 } 382 }; 383 384 // Look for AVX512DQ lowering tricks for custom cases. 385 if (ST->hasDQI()) 386 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 387 return LT.first * Entry->Cost; 388 389 static const CostTblEntry AVX512BWCostTable[] = { 390 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 391 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 392 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 393 394 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 395 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 396 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 397 398 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 399 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 400 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 401 402 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 403 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 404 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 405 406 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 407 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 408 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 409 410 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 411 { ISD::SDIV, MVT::v64i8, 64*20 }, 412 { ISD::SDIV, MVT::v32i16, 32*20 }, 413 { ISD::UDIV, MVT::v64i8, 64*20 }, 414 { ISD::UDIV, MVT::v32i16, 32*20 } 415 }; 416 417 // Look for AVX512BW lowering tricks for custom cases. 418 if (ST->hasBWI()) 419 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 420 return LT.first * Entry->Cost; 421 422 static const CostTblEntry AVX512CostTable[] = { 423 { ISD::SHL, MVT::v16i32, 1 }, 424 { ISD::SRL, MVT::v16i32, 1 }, 425 { ISD::SRA, MVT::v16i32, 1 }, 426 427 { ISD::SHL, MVT::v8i64, 1 }, 428 { ISD::SRL, MVT::v8i64, 1 }, 429 430 { ISD::SRA, MVT::v2i64, 1 }, 431 { ISD::SRA, MVT::v4i64, 1 }, 432 { ISD::SRA, MVT::v8i64, 1 }, 433 434 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 435 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 436 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 437 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 438 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 439 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 440 441 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 442 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 443 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 444 445 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 446 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 447 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 448 449 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 450 { ISD::SDIV, MVT::v16i32, 16*20 }, 451 { ISD::SDIV, MVT::v8i64, 8*20 }, 452 { ISD::UDIV, MVT::v16i32, 16*20 }, 453 { ISD::UDIV, MVT::v8i64, 8*20 } 454 }; 455 456 if (ST->hasAVX512()) 457 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 458 return LT.first * Entry->Cost; 459 460 static const CostTblEntry AVX2ShiftCostTable[] = { 461 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 462 // customize them to detect the cases where shift amount is a scalar one. 463 { ISD::SHL, MVT::v4i32, 1 }, 464 { ISD::SRL, MVT::v4i32, 1 }, 465 { ISD::SRA, MVT::v4i32, 1 }, 466 { ISD::SHL, MVT::v8i32, 1 }, 467 { ISD::SRL, MVT::v8i32, 1 }, 468 { ISD::SRA, MVT::v8i32, 1 }, 469 { ISD::SHL, MVT::v2i64, 1 }, 470 { ISD::SRL, MVT::v2i64, 1 }, 471 { ISD::SHL, MVT::v4i64, 1 }, 472 { ISD::SRL, MVT::v4i64, 1 }, 473 }; 474 475 // Look for AVX2 lowering tricks. 476 if (ST->hasAVX2()) { 477 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 478 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 479 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 480 // On AVX2, a packed v16i16 shift left by a constant build_vector 481 // is lowered into a vector multiply (vpmullw). 482 return LT.first; 483 484 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 485 return LT.first * Entry->Cost; 486 } 487 488 static const CostTblEntry XOPShiftCostTable[] = { 489 // 128bit shifts take 1cy, but right shifts require negation beforehand. 490 { ISD::SHL, MVT::v16i8, 1 }, 491 { ISD::SRL, MVT::v16i8, 2 }, 492 { ISD::SRA, MVT::v16i8, 2 }, 493 { ISD::SHL, MVT::v8i16, 1 }, 494 { ISD::SRL, MVT::v8i16, 2 }, 495 { ISD::SRA, MVT::v8i16, 2 }, 496 { ISD::SHL, MVT::v4i32, 1 }, 497 { ISD::SRL, MVT::v4i32, 2 }, 498 { ISD::SRA, MVT::v4i32, 2 }, 499 { ISD::SHL, MVT::v2i64, 1 }, 500 { ISD::SRL, MVT::v2i64, 2 }, 501 { ISD::SRA, MVT::v2i64, 2 }, 502 // 256bit shifts require splitting if AVX2 didn't catch them above. 503 { ISD::SHL, MVT::v32i8, 2+2 }, 504 { ISD::SRL, MVT::v32i8, 4+2 }, 505 { ISD::SRA, MVT::v32i8, 4+2 }, 506 { ISD::SHL, MVT::v16i16, 2+2 }, 507 { ISD::SRL, MVT::v16i16, 4+2 }, 508 { ISD::SRA, MVT::v16i16, 4+2 }, 509 { ISD::SHL, MVT::v8i32, 2+2 }, 510 { ISD::SRL, MVT::v8i32, 4+2 }, 511 { ISD::SRA, MVT::v8i32, 4+2 }, 512 { ISD::SHL, MVT::v4i64, 2+2 }, 513 { ISD::SRL, MVT::v4i64, 4+2 }, 514 { ISD::SRA, MVT::v4i64, 4+2 }, 515 }; 516 517 // Look for XOP lowering tricks. 518 if (ST->hasXOP()) 519 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 520 return LT.first * Entry->Cost; 521 522 static const CostTblEntry SSE2UniformShiftCostTable[] = { 523 // Uniform splats are cheaper for the following instructions. 524 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 525 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 526 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 527 528 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 529 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 530 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 531 532 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 533 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 534 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 535 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 536 }; 537 538 if (ST->hasSSE2() && 539 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 540 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 541 542 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 543 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 544 return LT.first * 4; // 2*psrad + shuffle. 545 546 if (const auto *Entry = 547 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 548 return LT.first * Entry->Cost; 549 } 550 551 if (ISD == ISD::SHL && 552 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 553 MVT VT = LT.second; 554 // Vector shift left by non uniform constant can be lowered 555 // into vector multiply. 556 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 557 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 558 ISD = ISD::MUL; 559 } 560 561 static const CostTblEntry AVX2CostTable[] = { 562 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 563 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 564 565 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 566 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 567 568 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 569 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 570 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 571 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 572 573 { ISD::SUB, MVT::v32i8, 1 }, // psubb 574 { ISD::ADD, MVT::v32i8, 1 }, // paddb 575 { ISD::SUB, MVT::v16i16, 1 }, // psubw 576 { ISD::ADD, MVT::v16i16, 1 }, // paddw 577 { ISD::SUB, MVT::v8i32, 1 }, // psubd 578 { ISD::ADD, MVT::v8i32, 1 }, // paddd 579 { ISD::SUB, MVT::v4i64, 1 }, // psubq 580 { ISD::ADD, MVT::v4i64, 1 }, // paddq 581 582 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 583 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 584 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 585 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 586 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 587 588 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 589 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 590 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 591 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 592 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 593 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 594 595 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 596 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 597 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 598 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 599 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 600 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 601 }; 602 603 // Look for AVX2 lowering tricks for custom cases. 604 if (ST->hasAVX2()) 605 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 606 return LT.first * Entry->Cost; 607 608 static const CostTblEntry AVX1CostTable[] = { 609 // We don't have to scalarize unsupported ops. We can issue two half-sized 610 // operations and we only need to extract the upper YMM half. 611 // Two ops + 1 extract + 1 insert = 4. 612 { ISD::MUL, MVT::v16i16, 4 }, 613 { ISD::MUL, MVT::v8i32, 4 }, 614 { ISD::SUB, MVT::v32i8, 4 }, 615 { ISD::ADD, MVT::v32i8, 4 }, 616 { ISD::SUB, MVT::v16i16, 4 }, 617 { ISD::ADD, MVT::v16i16, 4 }, 618 { ISD::SUB, MVT::v8i32, 4 }, 619 { ISD::ADD, MVT::v8i32, 4 }, 620 { ISD::SUB, MVT::v4i64, 4 }, 621 { ISD::ADD, MVT::v4i64, 4 }, 622 623 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 624 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 625 // Because we believe v4i64 to be a legal type, we must also include the 626 // extract+insert in the cost table. Therefore, the cost here is 18 627 // instead of 8. 628 { ISD::MUL, MVT::v4i64, 18 }, 629 630 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 631 632 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 633 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 634 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 635 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 636 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 637 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 638 639 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 640 { ISD::SDIV, MVT::v32i8, 32*20 }, 641 { ISD::SDIV, MVT::v16i16, 16*20 }, 642 { ISD::SDIV, MVT::v8i32, 8*20 }, 643 { ISD::SDIV, MVT::v4i64, 4*20 }, 644 { ISD::UDIV, MVT::v32i8, 32*20 }, 645 { ISD::UDIV, MVT::v16i16, 16*20 }, 646 { ISD::UDIV, MVT::v8i32, 8*20 }, 647 { ISD::UDIV, MVT::v4i64, 4*20 }, 648 }; 649 650 if (ST->hasAVX()) 651 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 652 return LT.first * Entry->Cost; 653 654 static const CostTblEntry SSE42CostTable[] = { 655 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 656 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 657 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 658 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 659 660 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 661 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 662 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 663 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 664 665 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 666 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 667 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 668 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 669 670 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 671 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 672 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 673 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 674 }; 675 676 if (ST->hasSSE42()) 677 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 678 return LT.first * Entry->Cost; 679 680 static const CostTblEntry SSE41CostTable[] = { 681 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 682 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. 683 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 684 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 685 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 686 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split 687 688 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 689 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. 690 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 691 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 692 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 693 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. 694 695 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 696 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. 697 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 698 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 699 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 700 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. 701 702 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 703 }; 704 705 if (ST->hasSSE41()) 706 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 707 return LT.first * Entry->Cost; 708 709 static const CostTblEntry SSE2CostTable[] = { 710 // We don't correctly identify costs of casts because they are marked as 711 // custom. 712 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 713 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 714 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 715 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 716 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 717 718 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 719 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 720 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 721 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 722 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 723 724 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 725 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 726 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 727 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 728 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. 729 730 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 731 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 732 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 733 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 734 735 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 736 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 737 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 738 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 739 740 // It is not a good idea to vectorize division. We have to scalarize it and 741 // in the process we will often end up having to spilling regular 742 // registers. The overhead of division is going to dominate most kernels 743 // anyways so try hard to prevent vectorization of division - it is 744 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 745 // to hide "20 cycles" for each lane. 746 { ISD::SDIV, MVT::v16i8, 16*20 }, 747 { ISD::SDIV, MVT::v8i16, 8*20 }, 748 { ISD::SDIV, MVT::v4i32, 4*20 }, 749 { ISD::SDIV, MVT::v2i64, 2*20 }, 750 { ISD::UDIV, MVT::v16i8, 16*20 }, 751 { ISD::UDIV, MVT::v8i16, 8*20 }, 752 { ISD::UDIV, MVT::v4i32, 4*20 }, 753 { ISD::UDIV, MVT::v2i64, 2*20 }, 754 }; 755 756 if (ST->hasSSE2()) 757 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 758 return LT.first * Entry->Cost; 759 760 static const CostTblEntry SSE1CostTable[] = { 761 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 762 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 763 }; 764 765 if (ST->hasSSE1()) 766 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 767 return LT.first * Entry->Cost; 768 769 // Fallback to the default implementation. 770 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 771 } 772 773 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 774 Type *SubTp) { 775 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 776 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 777 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 778 779 // For Broadcasts we are splatting the first element from the first input 780 // register, so only need to reference that input and all the output 781 // registers are the same. 782 if (Kind == TTI::SK_Broadcast) 783 LT.first = 1; 784 785 // We are going to permute multiple sources and the result will be in multiple 786 // destinations. Providing an accurate cost only for splits where the element 787 // type remains the same. 788 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 789 MVT LegalVT = LT.second; 790 if (LegalVT.isVector() && 791 LegalVT.getVectorElementType().getSizeInBits() == 792 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 793 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 794 795 unsigned VecTySize = DL.getTypeStoreSize(Tp); 796 unsigned LegalVTSize = LegalVT.getStoreSize(); 797 // Number of source vectors after legalization: 798 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 799 // Number of destination vectors after legalization: 800 unsigned NumOfDests = LT.first; 801 802 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 803 LegalVT.getVectorNumElements()); 804 805 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 806 return NumOfShuffles * 807 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 808 } 809 810 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 811 } 812 813 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 814 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 815 // We assume that source and destination have the same vector type. 816 int NumOfDests = LT.first; 817 int NumOfShufflesPerDest = LT.first * 2 - 1; 818 LT.first = NumOfDests * NumOfShufflesPerDest; 819 } 820 821 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 822 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 823 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 824 825 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 826 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 827 828 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 829 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 830 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 831 }; 832 833 if (ST->hasVBMI()) 834 if (const auto *Entry = 835 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 836 return LT.first * Entry->Cost; 837 838 static const CostTblEntry AVX512BWShuffleTbl[] = { 839 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 840 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 841 842 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 843 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 844 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 845 846 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 847 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 848 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 849 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 850 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 851 852 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 853 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 854 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 855 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 856 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 857 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 858 }; 859 860 if (ST->hasBWI()) 861 if (const auto *Entry = 862 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 863 return LT.first * Entry->Cost; 864 865 static const CostTblEntry AVX512ShuffleTbl[] = { 866 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 867 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 868 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 869 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 870 871 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 872 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 873 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 874 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 875 876 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 877 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 878 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 879 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 880 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 881 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 882 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 883 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 884 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 885 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 886 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 887 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 888 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 889 890 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 891 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 892 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 893 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 894 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 895 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 896 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 897 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 898 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 899 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 900 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 901 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 902 }; 903 904 if (ST->hasAVX512()) 905 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 906 return LT.first * Entry->Cost; 907 908 static const CostTblEntry AVX2ShuffleTbl[] = { 909 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 910 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 911 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 912 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 913 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 914 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 915 916 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 917 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 918 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 919 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 920 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 921 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 922 923 { TTI::SK_Alternate, MVT::v16i16, 1 }, // vpblendw 924 { TTI::SK_Alternate, MVT::v32i8, 1 }, // vpblendvb 925 926 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 927 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 928 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 929 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 930 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2*vpshufb 931 // + vpblendvb 932 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vperm2i128 + 2*vpshufb 933 // + vpblendvb 934 935 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 3 }, // 2*vpermpd + vblendpd 936 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 3 }, // 2*vpermps + vblendps 937 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 3 }, // 2*vpermq + vpblendd 938 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 3 }, // 2*vpermd + vpblendd 939 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 7 }, // 2*vperm2i128 + 4*vpshufb 940 // + vpblendvb 941 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 7 }, // 2*vperm2i128 + 4*vpshufb 942 // + vpblendvb 943 }; 944 945 if (ST->hasAVX2()) 946 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 947 return LT.first * Entry->Cost; 948 949 static const CostTblEntry XOPShuffleTbl[] = { 950 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 2 }, // vperm2f128 + vpermil2pd 951 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 2 }, // vperm2f128 + vpermil2ps 952 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 2 }, // vperm2f128 + vpermil2pd 953 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 2 }, // vperm2f128 + vpermil2ps 954 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vextractf128 + 2*vpperm 955 // + vinsertf128 956 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vextractf128 + 2*vpperm 957 // + vinsertf128 958 959 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 9 }, // 2*vextractf128 + 6*vpperm 960 // + vinsertf128 961 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpperm 962 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 9 }, // 2*vextractf128 + 6*vpperm 963 // + vinsertf128 964 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 }, // vpperm 965 }; 966 967 if (ST->hasXOP()) 968 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 969 return LT.first * Entry->Cost; 970 971 static const CostTblEntry AVX1ShuffleTbl[] = { 972 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 973 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 974 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 975 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 976 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 977 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 978 979 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 980 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 981 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 982 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 983 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 984 // + vinsertf128 985 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 986 // + vinsertf128 987 988 { TTI::SK_Alternate, MVT::v4i64, 1 }, // vblendpd 989 { TTI::SK_Alternate, MVT::v4f64, 1 }, // vblendpd 990 { TTI::SK_Alternate, MVT::v8i32, 1 }, // vblendps 991 { TTI::SK_Alternate, MVT::v8f32, 1 }, // vblendps 992 { TTI::SK_Alternate, MVT::v16i16, 3 }, // vpand + vpandn + vpor 993 { TTI::SK_Alternate, MVT::v32i8, 3 }, // vpand + vpandn + vpor 994 995 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 3 }, // 2*vperm2f128 + vshufpd 996 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 3 }, // 2*vperm2f128 + vshufpd 997 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 998 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 999 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 8 }, // vextractf128 + 4*pshufb 1000 // + 2*por + vinsertf128 1001 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 8 }, // vextractf128 + 4*pshufb 1002 // + 2*por + vinsertf128 1003 1004 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 4 }, // 2*vperm2f128 + 2*vshufpd 1005 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 1006 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 4 }, // 2*vperm2f128 + 2*vshufpd 1007 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 1008 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 15 }, // 2*vextractf128 + 8*pshufb 1009 // + 4*por + vinsertf128 1010 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 15 }, // 2*vextractf128 + 8*pshufb 1011 // + 4*por + vinsertf128 1012 }; 1013 1014 if (ST->hasAVX()) 1015 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1016 return LT.first * Entry->Cost; 1017 1018 static const CostTblEntry SSE41ShuffleTbl[] = { 1019 { TTI::SK_Alternate, MVT::v2i64, 1 }, // pblendw 1020 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 1021 { TTI::SK_Alternate, MVT::v4i32, 1 }, // pblendw 1022 { TTI::SK_Alternate, MVT::v4f32, 1 }, // blendps 1023 { TTI::SK_Alternate, MVT::v8i16, 1 }, // pblendw 1024 { TTI::SK_Alternate, MVT::v16i8, 1 } // pblendvb 1025 }; 1026 1027 if (ST->hasSSE41()) 1028 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1029 return LT.first * Entry->Cost; 1030 1031 static const CostTblEntry SSSE3ShuffleTbl[] = { 1032 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 1033 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 1034 1035 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 1036 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 1037 1038 { TTI::SK_Alternate, MVT::v8i16, 3 }, // 2*pshufb + por 1039 { TTI::SK_Alternate, MVT::v16i8, 3 }, // 2*pshufb + por 1040 1041 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb 1042 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 1043 1044 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 3 }, // 2*pshufb + por 1045 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 }, // 2*pshufb + por 1046 }; 1047 1048 if (ST->hasSSSE3()) 1049 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1050 return LT.first * Entry->Cost; 1051 1052 static const CostTblEntry SSE2ShuffleTbl[] = { 1053 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 1054 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 1055 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 1056 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 1057 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 1058 1059 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 1060 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 1061 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 1062 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 1063 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 1064 // + 2*pshufd + 2*unpck + packus 1065 1066 { TTI::SK_Alternate, MVT::v2i64, 1 }, // movsd 1067 { TTI::SK_Alternate, MVT::v2f64, 1 }, // movsd 1068 { TTI::SK_Alternate, MVT::v4i32, 2 }, // 2*shufps 1069 { TTI::SK_Alternate, MVT::v8i16, 3 }, // pand + pandn + por 1070 { TTI::SK_Alternate, MVT::v16i8, 3 }, // pand + pandn + por 1071 1072 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // shufpd 1073 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd 1074 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // pshufd 1075 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 5 }, // 2*pshuflw + 2*pshufhw 1076 // + pshufd/unpck 1077 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1078 // + 2*pshufd + 2*unpck + 2*packus 1079 1080 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1081 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1082 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1083 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1084 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1085 }; 1086 1087 if (ST->hasSSE2()) 1088 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1089 return LT.first * Entry->Cost; 1090 1091 static const CostTblEntry SSE1ShuffleTbl[] = { 1092 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1093 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1094 { TTI::SK_Alternate, MVT::v4f32, 2 }, // 2*shufps 1095 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1096 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1097 }; 1098 1099 if (ST->hasSSE1()) 1100 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1101 return LT.first * Entry->Cost; 1102 1103 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 1104 } 1105 1106 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 1107 const Instruction *I) { 1108 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1109 assert(ISD && "Invalid opcode"); 1110 1111 // FIXME: Need a better design of the cost table to handle non-simple types of 1112 // potential massive combinations (elem_num x src_type x dst_type). 1113 1114 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1115 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1116 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1117 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1118 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1119 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1120 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1121 1122 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1123 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1124 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1125 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1126 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1127 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1128 1129 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 1130 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1131 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1132 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1133 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1134 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1135 1136 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 1137 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1138 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1139 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1140 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1141 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1142 }; 1143 1144 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1145 // 256-bit wide vectors. 1146 1147 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1148 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1149 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1150 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1151 1152 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 1153 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 1154 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 1155 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 1156 1157 // v16i1 -> v16i32 - load + broadcast 1158 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1159 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1160 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1161 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1162 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1163 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1164 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1165 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1166 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1167 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1168 1169 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1170 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1171 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1172 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1173 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1174 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1175 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1176 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1177 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1178 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1179 1180 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1181 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1182 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1183 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1184 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1185 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1186 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1187 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1188 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1189 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1190 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1191 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1192 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1193 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1194 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1195 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1196 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1197 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1198 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1199 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1200 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1201 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 1202 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 1203 1204 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1205 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1206 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1207 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 }, 1208 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 }, 1209 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1210 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 }, 1211 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 }, 1212 }; 1213 1214 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1215 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1216 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1217 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1218 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1219 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1220 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1221 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1222 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1223 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1224 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1225 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1226 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1227 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1228 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1229 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1230 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1231 1232 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1233 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1234 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1235 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1236 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1237 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1238 1239 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1240 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1241 1242 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1243 }; 1244 1245 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1246 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1247 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1248 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1249 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1250 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1251 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1252 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1253 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1254 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1255 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1256 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1257 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1258 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1259 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1260 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1261 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1262 1263 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1264 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1265 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1266 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1267 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1268 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1269 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1270 1271 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1272 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1273 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1274 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1275 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1276 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1277 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1278 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1279 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1280 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1281 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1282 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1283 1284 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1285 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1286 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1287 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1288 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1289 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1290 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1291 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1292 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1293 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1294 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1295 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1296 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1297 // The generic code to compute the scalar overhead is currently broken. 1298 // Workaround this limitation by estimating the scalarization overhead 1299 // here. We have roughly 10 instructions per scalar element. 1300 // Multiply that by the vector width. 1301 // FIXME: remove that when PR19268 is fixed. 1302 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 1303 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 1304 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1305 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1306 1307 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1308 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1309 // This node is expanded into scalarized operations but BasicTTI is overly 1310 // optimistic estimating its cost. It computes 3 per element (one 1311 // vector-extract, one scalar conversion and one vector-insert). The 1312 // problem is that the inserts form a read-modify-write chain so latency 1313 // should be factored in too. Inflating the cost per element by 1. 1314 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1315 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1316 1317 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1318 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1319 }; 1320 1321 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1322 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1323 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1324 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1325 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1326 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1327 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1328 1329 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1330 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1331 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1332 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1333 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1334 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1335 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1336 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1337 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1338 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1339 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1340 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1341 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1342 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1343 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1344 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1345 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1346 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1347 1348 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1349 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1350 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1351 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1352 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1353 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1354 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1355 1356 }; 1357 1358 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1359 // These are somewhat magic numbers justified by looking at the output of 1360 // Intel's IACA, running some kernels and making sure when we take 1361 // legalization into account the throughput will be overestimated. 1362 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1363 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1364 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1365 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1366 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1367 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1368 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1369 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1370 1371 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1372 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1373 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1374 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1375 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1376 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1377 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1378 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1379 1380 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1381 1382 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1383 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1384 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1385 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1386 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1387 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1388 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1389 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1390 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1391 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1392 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1393 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1394 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1395 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1396 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1397 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1398 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1399 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1400 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1401 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1402 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1403 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1404 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1405 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1406 1407 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1408 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1409 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1410 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1411 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1412 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1413 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1414 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1415 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1416 }; 1417 1418 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1419 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1420 1421 if (ST->hasSSE2() && !ST->hasAVX()) { 1422 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1423 LTDest.second, LTSrc.second)) 1424 return LTSrc.first * Entry->Cost; 1425 } 1426 1427 EVT SrcTy = TLI->getValueType(DL, Src); 1428 EVT DstTy = TLI->getValueType(DL, Dst); 1429 1430 // The function getSimpleVT only handles simple value types. 1431 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1432 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1433 1434 if (ST->hasDQI()) 1435 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1436 DstTy.getSimpleVT(), 1437 SrcTy.getSimpleVT())) 1438 return Entry->Cost; 1439 1440 if (ST->hasAVX512()) 1441 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1442 DstTy.getSimpleVT(), 1443 SrcTy.getSimpleVT())) 1444 return Entry->Cost; 1445 1446 if (ST->hasAVX2()) { 1447 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1448 DstTy.getSimpleVT(), 1449 SrcTy.getSimpleVT())) 1450 return Entry->Cost; 1451 } 1452 1453 if (ST->hasAVX()) { 1454 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1455 DstTy.getSimpleVT(), 1456 SrcTy.getSimpleVT())) 1457 return Entry->Cost; 1458 } 1459 1460 if (ST->hasSSE41()) { 1461 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1462 DstTy.getSimpleVT(), 1463 SrcTy.getSimpleVT())) 1464 return Entry->Cost; 1465 } 1466 1467 if (ST->hasSSE2()) { 1468 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1469 DstTy.getSimpleVT(), 1470 SrcTy.getSimpleVT())) 1471 return Entry->Cost; 1472 } 1473 1474 return BaseT::getCastInstrCost(Opcode, Dst, Src, I); 1475 } 1476 1477 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 1478 const Instruction *I) { 1479 // Legalize the type. 1480 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1481 1482 MVT MTy = LT.second; 1483 1484 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1485 assert(ISD && "Invalid opcode"); 1486 1487 static const CostTblEntry SSE2CostTbl[] = { 1488 { ISD::SETCC, MVT::v2i64, 8 }, 1489 { ISD::SETCC, MVT::v4i32, 1 }, 1490 { ISD::SETCC, MVT::v8i16, 1 }, 1491 { ISD::SETCC, MVT::v16i8, 1 }, 1492 }; 1493 1494 static const CostTblEntry SSE42CostTbl[] = { 1495 { ISD::SETCC, MVT::v2f64, 1 }, 1496 { ISD::SETCC, MVT::v4f32, 1 }, 1497 { ISD::SETCC, MVT::v2i64, 1 }, 1498 }; 1499 1500 static const CostTblEntry AVX1CostTbl[] = { 1501 { ISD::SETCC, MVT::v4f64, 1 }, 1502 { ISD::SETCC, MVT::v8f32, 1 }, 1503 // AVX1 does not support 8-wide integer compare. 1504 { ISD::SETCC, MVT::v4i64, 4 }, 1505 { ISD::SETCC, MVT::v8i32, 4 }, 1506 { ISD::SETCC, MVT::v16i16, 4 }, 1507 { ISD::SETCC, MVT::v32i8, 4 }, 1508 }; 1509 1510 static const CostTblEntry AVX2CostTbl[] = { 1511 { ISD::SETCC, MVT::v4i64, 1 }, 1512 { ISD::SETCC, MVT::v8i32, 1 }, 1513 { ISD::SETCC, MVT::v16i16, 1 }, 1514 { ISD::SETCC, MVT::v32i8, 1 }, 1515 }; 1516 1517 static const CostTblEntry AVX512CostTbl[] = { 1518 { ISD::SETCC, MVT::v8i64, 1 }, 1519 { ISD::SETCC, MVT::v16i32, 1 }, 1520 { ISD::SETCC, MVT::v8f64, 1 }, 1521 { ISD::SETCC, MVT::v16f32, 1 }, 1522 }; 1523 1524 if (ST->hasAVX512()) 1525 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1526 return LT.first * Entry->Cost; 1527 1528 if (ST->hasAVX2()) 1529 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1530 return LT.first * Entry->Cost; 1531 1532 if (ST->hasAVX()) 1533 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1534 return LT.first * Entry->Cost; 1535 1536 if (ST->hasSSE42()) 1537 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1538 return LT.first * Entry->Cost; 1539 1540 if (ST->hasSSE2()) 1541 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1542 return LT.first * Entry->Cost; 1543 1544 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 1545 } 1546 1547 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 1548 1549 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1550 ArrayRef<Type *> Tys, FastMathFlags FMF, 1551 unsigned ScalarizationCostPassed) { 1552 // Costs should match the codegen from: 1553 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1554 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1555 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1556 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1557 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1558 static const CostTblEntry AVX512CDCostTbl[] = { 1559 { ISD::CTLZ, MVT::v8i64, 1 }, 1560 { ISD::CTLZ, MVT::v16i32, 1 }, 1561 { ISD::CTLZ, MVT::v32i16, 8 }, 1562 { ISD::CTLZ, MVT::v64i8, 20 }, 1563 { ISD::CTLZ, MVT::v4i64, 1 }, 1564 { ISD::CTLZ, MVT::v8i32, 1 }, 1565 { ISD::CTLZ, MVT::v16i16, 4 }, 1566 { ISD::CTLZ, MVT::v32i8, 10 }, 1567 { ISD::CTLZ, MVT::v2i64, 1 }, 1568 { ISD::CTLZ, MVT::v4i32, 1 }, 1569 { ISD::CTLZ, MVT::v8i16, 4 }, 1570 { ISD::CTLZ, MVT::v16i8, 4 }, 1571 }; 1572 static const CostTblEntry AVX512BWCostTbl[] = { 1573 { ISD::BITREVERSE, MVT::v8i64, 5 }, 1574 { ISD::BITREVERSE, MVT::v16i32, 5 }, 1575 { ISD::BITREVERSE, MVT::v32i16, 5 }, 1576 { ISD::BITREVERSE, MVT::v64i8, 5 }, 1577 { ISD::CTLZ, MVT::v8i64, 23 }, 1578 { ISD::CTLZ, MVT::v16i32, 22 }, 1579 { ISD::CTLZ, MVT::v32i16, 18 }, 1580 { ISD::CTLZ, MVT::v64i8, 17 }, 1581 { ISD::CTPOP, MVT::v8i64, 7 }, 1582 { ISD::CTPOP, MVT::v16i32, 11 }, 1583 { ISD::CTPOP, MVT::v32i16, 9 }, 1584 { ISD::CTPOP, MVT::v64i8, 6 }, 1585 { ISD::CTTZ, MVT::v8i64, 10 }, 1586 { ISD::CTTZ, MVT::v16i32, 14 }, 1587 { ISD::CTTZ, MVT::v32i16, 12 }, 1588 { ISD::CTTZ, MVT::v64i8, 9 }, 1589 }; 1590 static const CostTblEntry AVX512CostTbl[] = { 1591 { ISD::BITREVERSE, MVT::v8i64, 36 }, 1592 { ISD::BITREVERSE, MVT::v16i32, 24 }, 1593 { ISD::CTLZ, MVT::v8i64, 29 }, 1594 { ISD::CTLZ, MVT::v16i32, 35 }, 1595 { ISD::CTPOP, MVT::v8i64, 16 }, 1596 { ISD::CTPOP, MVT::v16i32, 24 }, 1597 { ISD::CTTZ, MVT::v8i64, 20 }, 1598 { ISD::CTTZ, MVT::v16i32, 28 }, 1599 }; 1600 static const CostTblEntry XOPCostTbl[] = { 1601 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1602 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1603 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1604 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1605 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1606 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1607 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1608 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1609 { ISD::BITREVERSE, MVT::i64, 3 }, 1610 { ISD::BITREVERSE, MVT::i32, 3 }, 1611 { ISD::BITREVERSE, MVT::i16, 3 }, 1612 { ISD::BITREVERSE, MVT::i8, 3 } 1613 }; 1614 static const CostTblEntry AVX2CostTbl[] = { 1615 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1616 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1617 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1618 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1619 { ISD::BSWAP, MVT::v4i64, 1 }, 1620 { ISD::BSWAP, MVT::v8i32, 1 }, 1621 { ISD::BSWAP, MVT::v16i16, 1 }, 1622 { ISD::CTLZ, MVT::v4i64, 23 }, 1623 { ISD::CTLZ, MVT::v8i32, 18 }, 1624 { ISD::CTLZ, MVT::v16i16, 14 }, 1625 { ISD::CTLZ, MVT::v32i8, 9 }, 1626 { ISD::CTPOP, MVT::v4i64, 7 }, 1627 { ISD::CTPOP, MVT::v8i32, 11 }, 1628 { ISD::CTPOP, MVT::v16i16, 9 }, 1629 { ISD::CTPOP, MVT::v32i8, 6 }, 1630 { ISD::CTTZ, MVT::v4i64, 10 }, 1631 { ISD::CTTZ, MVT::v8i32, 14 }, 1632 { ISD::CTTZ, MVT::v16i16, 12 }, 1633 { ISD::CTTZ, MVT::v32i8, 9 }, 1634 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1635 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1636 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1637 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1638 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1639 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1640 }; 1641 static const CostTblEntry AVX1CostTbl[] = { 1642 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 1643 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 1644 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 1645 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 1646 { ISD::BSWAP, MVT::v4i64, 4 }, 1647 { ISD::BSWAP, MVT::v8i32, 4 }, 1648 { ISD::BSWAP, MVT::v16i16, 4 }, 1649 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 1650 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 1651 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 1652 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1653 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 1654 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 1655 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 1656 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 1657 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 1658 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 1659 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 1660 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1661 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1662 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1663 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1664 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1665 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1666 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1667 }; 1668 static const CostTblEntry SSE42CostTbl[] = { 1669 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1670 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1671 }; 1672 static const CostTblEntry SSSE3CostTbl[] = { 1673 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1674 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1675 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1676 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1677 { ISD::BSWAP, MVT::v2i64, 1 }, 1678 { ISD::BSWAP, MVT::v4i32, 1 }, 1679 { ISD::BSWAP, MVT::v8i16, 1 }, 1680 { ISD::CTLZ, MVT::v2i64, 23 }, 1681 { ISD::CTLZ, MVT::v4i32, 18 }, 1682 { ISD::CTLZ, MVT::v8i16, 14 }, 1683 { ISD::CTLZ, MVT::v16i8, 9 }, 1684 { ISD::CTPOP, MVT::v2i64, 7 }, 1685 { ISD::CTPOP, MVT::v4i32, 11 }, 1686 { ISD::CTPOP, MVT::v8i16, 9 }, 1687 { ISD::CTPOP, MVT::v16i8, 6 }, 1688 { ISD::CTTZ, MVT::v2i64, 10 }, 1689 { ISD::CTTZ, MVT::v4i32, 14 }, 1690 { ISD::CTTZ, MVT::v8i16, 12 }, 1691 { ISD::CTTZ, MVT::v16i8, 9 } 1692 }; 1693 static const CostTblEntry SSE2CostTbl[] = { 1694 { ISD::BITREVERSE, MVT::v2i64, 29 }, 1695 { ISD::BITREVERSE, MVT::v4i32, 27 }, 1696 { ISD::BITREVERSE, MVT::v8i16, 27 }, 1697 { ISD::BITREVERSE, MVT::v16i8, 20 }, 1698 { ISD::BSWAP, MVT::v2i64, 7 }, 1699 { ISD::BSWAP, MVT::v4i32, 7 }, 1700 { ISD::BSWAP, MVT::v8i16, 7 }, 1701 { ISD::CTLZ, MVT::v2i64, 25 }, 1702 { ISD::CTLZ, MVT::v4i32, 26 }, 1703 { ISD::CTLZ, MVT::v8i16, 20 }, 1704 { ISD::CTLZ, MVT::v16i8, 17 }, 1705 { ISD::CTPOP, MVT::v2i64, 12 }, 1706 { ISD::CTPOP, MVT::v4i32, 15 }, 1707 { ISD::CTPOP, MVT::v8i16, 13 }, 1708 { ISD::CTPOP, MVT::v16i8, 10 }, 1709 { ISD::CTTZ, MVT::v2i64, 14 }, 1710 { ISD::CTTZ, MVT::v4i32, 18 }, 1711 { ISD::CTTZ, MVT::v8i16, 16 }, 1712 { ISD::CTTZ, MVT::v16i8, 13 }, 1713 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1714 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1715 }; 1716 static const CostTblEntry SSE1CostTbl[] = { 1717 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1718 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1719 }; 1720 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1721 { ISD::BITREVERSE, MVT::i64, 14 } 1722 }; 1723 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1724 { ISD::BITREVERSE, MVT::i32, 14 }, 1725 { ISD::BITREVERSE, MVT::i16, 14 }, 1726 { ISD::BITREVERSE, MVT::i8, 11 } 1727 }; 1728 1729 unsigned ISD = ISD::DELETED_NODE; 1730 switch (IID) { 1731 default: 1732 break; 1733 case Intrinsic::bitreverse: 1734 ISD = ISD::BITREVERSE; 1735 break; 1736 case Intrinsic::bswap: 1737 ISD = ISD::BSWAP; 1738 break; 1739 case Intrinsic::ctlz: 1740 ISD = ISD::CTLZ; 1741 break; 1742 case Intrinsic::ctpop: 1743 ISD = ISD::CTPOP; 1744 break; 1745 case Intrinsic::cttz: 1746 ISD = ISD::CTTZ; 1747 break; 1748 case Intrinsic::sqrt: 1749 ISD = ISD::FSQRT; 1750 break; 1751 } 1752 1753 // Legalize the type. 1754 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1755 MVT MTy = LT.second; 1756 1757 // Attempt to lookup cost. 1758 if (ST->hasCDI()) 1759 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 1760 return LT.first * Entry->Cost; 1761 1762 if (ST->hasBWI()) 1763 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1764 return LT.first * Entry->Cost; 1765 1766 if (ST->hasAVX512()) 1767 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1768 return LT.first * Entry->Cost; 1769 1770 if (ST->hasXOP()) 1771 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1772 return LT.first * Entry->Cost; 1773 1774 if (ST->hasAVX2()) 1775 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1776 return LT.first * Entry->Cost; 1777 1778 if (ST->hasAVX()) 1779 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1780 return LT.first * Entry->Cost; 1781 1782 if (ST->hasSSE42()) 1783 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1784 return LT.first * Entry->Cost; 1785 1786 if (ST->hasSSSE3()) 1787 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1788 return LT.first * Entry->Cost; 1789 1790 if (ST->hasSSE2()) 1791 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1792 return LT.first * Entry->Cost; 1793 1794 if (ST->hasSSE1()) 1795 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1796 return LT.first * Entry->Cost; 1797 1798 if (ST->is64Bit()) 1799 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 1800 return LT.first * Entry->Cost; 1801 1802 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 1803 return LT.first * Entry->Cost; 1804 1805 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed); 1806 } 1807 1808 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1809 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 1810 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF); 1811 } 1812 1813 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1814 assert(Val->isVectorTy() && "This must be a vector type"); 1815 1816 Type *ScalarType = Val->getScalarType(); 1817 1818 if (Index != -1U) { 1819 // Legalize the type. 1820 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1821 1822 // This type is legalized to a scalar type. 1823 if (!LT.second.isVector()) 1824 return 0; 1825 1826 // The type may be split. Normalize the index to the new type. 1827 unsigned Width = LT.second.getVectorNumElements(); 1828 Index = Index % Width; 1829 1830 // Floating point scalars are already located in index #0. 1831 if (ScalarType->isFloatingPointTy() && Index == 0) 1832 return 0; 1833 } 1834 1835 // Add to the base cost if we know that the extracted element of a vector is 1836 // destined to be moved to and used in the integer register file. 1837 int RegisterFileMoveCost = 0; 1838 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1839 RegisterFileMoveCost = 1; 1840 1841 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1842 } 1843 1844 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1845 unsigned AddressSpace, const Instruction *I) { 1846 // Handle non-power-of-two vectors such as <3 x float> 1847 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1848 unsigned NumElem = VTy->getVectorNumElements(); 1849 1850 // Handle a few common cases: 1851 // <3 x float> 1852 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1853 // Cost = 64 bit store + extract + 32 bit store. 1854 return 3; 1855 1856 // <3 x double> 1857 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1858 // Cost = 128 bit store + unpack + 64 bit store. 1859 return 3; 1860 1861 // Assume that all other non-power-of-two numbers are scalarized. 1862 if (!isPowerOf2_32(NumElem)) { 1863 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1864 AddressSpace); 1865 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1866 Opcode == Instruction::Store); 1867 return NumElem * Cost + SplitCost; 1868 } 1869 } 1870 1871 // Legalize the type. 1872 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1873 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1874 "Invalid Opcode"); 1875 1876 // Each load/store unit costs 1. 1877 int Cost = LT.first * 1; 1878 1879 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1880 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1881 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1882 Cost *= 2; 1883 1884 return Cost; 1885 } 1886 1887 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1888 unsigned Alignment, 1889 unsigned AddressSpace) { 1890 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1891 if (!SrcVTy) 1892 // To calculate scalar take the regular cost, without mask 1893 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1894 1895 unsigned NumElem = SrcVTy->getVectorNumElements(); 1896 VectorType *MaskTy = 1897 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1898 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1899 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1900 !isPowerOf2_32(NumElem)) { 1901 // Scalarization 1902 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1903 int ScalarCompareCost = getCmpSelInstrCost( 1904 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1905 int BranchCost = getCFInstrCost(Instruction::Br); 1906 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1907 1908 int ValueSplitCost = getScalarizationOverhead( 1909 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1910 int MemopCost = 1911 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1912 Alignment, AddressSpace); 1913 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1914 } 1915 1916 // Legalize the type. 1917 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1918 auto VT = TLI->getValueType(DL, SrcVTy); 1919 int Cost = 0; 1920 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1921 LT.second.getVectorNumElements() == NumElem) 1922 // Promotion requires expand/truncate for data and a shuffle for mask. 1923 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1924 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1925 1926 else if (LT.second.getVectorNumElements() > NumElem) { 1927 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1928 LT.second.getVectorNumElements()); 1929 // Expanding requires fill mask with zeroes 1930 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1931 } 1932 if (!ST->hasAVX512()) 1933 return Cost + LT.first*4; // Each maskmov costs 4 1934 1935 // AVX-512 masked load/store is cheapper 1936 return Cost+LT.first; 1937 } 1938 1939 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 1940 const SCEV *Ptr) { 1941 // Address computations in vectorized code with non-consecutive addresses will 1942 // likely result in more instructions compared to scalar code where the 1943 // computation can more often be merged into the index mode. The resulting 1944 // extra micro-ops can significantly decrease throughput. 1945 unsigned NumVectorInstToHideOverhead = 10; 1946 1947 // Cost modeling of Strided Access Computation is hidden by the indexing 1948 // modes of X86 regardless of the stride value. We dont believe that there 1949 // is a difference between constant strided access in gerenal and constant 1950 // strided value which is less than or equal to 64. 1951 // Even in the case of (loop invariant) stride whose value is not known at 1952 // compile time, the address computation will not incur more than one extra 1953 // ADD instruction. 1954 if (Ty->isVectorTy() && SE) { 1955 if (!BaseT::isStridedAccess(Ptr)) 1956 return NumVectorInstToHideOverhead; 1957 if (!BaseT::getConstantStrideStep(SE, Ptr)) 1958 return 1; 1959 } 1960 1961 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 1962 } 1963 1964 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy, 1965 bool IsPairwise) { 1966 1967 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1968 1969 MVT MTy = LT.second; 1970 1971 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1972 assert(ISD && "Invalid opcode"); 1973 1974 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 1975 // and make it as the cost. 1976 1977 static const CostTblEntry SSE42CostTblPairWise[] = { 1978 { ISD::FADD, MVT::v2f64, 2 }, 1979 { ISD::FADD, MVT::v4f32, 4 }, 1980 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1981 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1982 { ISD::ADD, MVT::v8i16, 5 }, 1983 }; 1984 1985 static const CostTblEntry AVX1CostTblPairWise[] = { 1986 { ISD::FADD, MVT::v4f32, 4 }, 1987 { ISD::FADD, MVT::v4f64, 5 }, 1988 { ISD::FADD, MVT::v8f32, 7 }, 1989 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1990 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1991 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 1992 { ISD::ADD, MVT::v8i16, 5 }, 1993 { ISD::ADD, MVT::v8i32, 5 }, 1994 }; 1995 1996 static const CostTblEntry SSE42CostTblNoPairWise[] = { 1997 { ISD::FADD, MVT::v2f64, 2 }, 1998 { ISD::FADD, MVT::v4f32, 4 }, 1999 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2000 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 2001 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 2002 }; 2003 2004 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2005 { ISD::FADD, MVT::v4f32, 3 }, 2006 { ISD::FADD, MVT::v4f64, 3 }, 2007 { ISD::FADD, MVT::v8f32, 4 }, 2008 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2009 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 2010 { ISD::ADD, MVT::v4i64, 3 }, 2011 { ISD::ADD, MVT::v8i16, 4 }, 2012 { ISD::ADD, MVT::v8i32, 5 }, 2013 }; 2014 2015 if (IsPairwise) { 2016 if (ST->hasAVX()) 2017 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2018 return LT.first * Entry->Cost; 2019 2020 if (ST->hasSSE42()) 2021 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2022 return LT.first * Entry->Cost; 2023 } else { 2024 if (ST->hasAVX()) 2025 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2026 return LT.first * Entry->Cost; 2027 2028 if (ST->hasSSE42()) 2029 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2030 return LT.first * Entry->Cost; 2031 } 2032 2033 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise); 2034 } 2035 2036 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy, 2037 bool IsPairwise, bool IsUnsigned) { 2038 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2039 2040 MVT MTy = LT.second; 2041 2042 int ISD; 2043 if (ValTy->isIntOrIntVectorTy()) { 2044 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 2045 } else { 2046 assert(ValTy->isFPOrFPVectorTy() && 2047 "Expected float point or integer vector type."); 2048 ISD = ISD::FMINNUM; 2049 } 2050 2051 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 2052 // and make it as the cost. 2053 2054 static const CostTblEntry SSE42CostTblPairWise[] = { 2055 {ISD::FMINNUM, MVT::v2f64, 3}, 2056 {ISD::FMINNUM, MVT::v4f32, 2}, 2057 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2058 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6" 2059 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2060 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2061 {ISD::SMIN, MVT::v8i16, 2}, 2062 {ISD::UMIN, MVT::v8i16, 2}, 2063 }; 2064 2065 static const CostTblEntry AVX1CostTblPairWise[] = { 2066 {ISD::FMINNUM, MVT::v4f32, 1}, 2067 {ISD::FMINNUM, MVT::v4f64, 1}, 2068 {ISD::FMINNUM, MVT::v8f32, 2}, 2069 {ISD::SMIN, MVT::v2i64, 3}, 2070 {ISD::UMIN, MVT::v2i64, 3}, 2071 {ISD::SMIN, MVT::v4i32, 1}, 2072 {ISD::UMIN, MVT::v4i32, 1}, 2073 {ISD::SMIN, MVT::v8i16, 1}, 2074 {ISD::UMIN, MVT::v8i16, 1}, 2075 {ISD::SMIN, MVT::v8i32, 3}, 2076 {ISD::UMIN, MVT::v8i32, 3}, 2077 }; 2078 2079 static const CostTblEntry AVX2CostTblPairWise[] = { 2080 {ISD::SMIN, MVT::v4i64, 2}, 2081 {ISD::UMIN, MVT::v4i64, 2}, 2082 {ISD::SMIN, MVT::v8i32, 1}, 2083 {ISD::UMIN, MVT::v8i32, 1}, 2084 {ISD::SMIN, MVT::v16i16, 1}, 2085 {ISD::UMIN, MVT::v16i16, 1}, 2086 {ISD::SMIN, MVT::v32i8, 2}, 2087 {ISD::UMIN, MVT::v32i8, 2}, 2088 }; 2089 2090 static const CostTblEntry AVX512CostTblPairWise[] = { 2091 {ISD::FMINNUM, MVT::v8f64, 1}, 2092 {ISD::FMINNUM, MVT::v16f32, 2}, 2093 {ISD::SMIN, MVT::v8i64, 2}, 2094 {ISD::UMIN, MVT::v8i64, 2}, 2095 {ISD::SMIN, MVT::v16i32, 1}, 2096 {ISD::UMIN, MVT::v16i32, 1}, 2097 }; 2098 2099 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2100 {ISD::FMINNUM, MVT::v2f64, 3}, 2101 {ISD::FMINNUM, MVT::v4f32, 3}, 2102 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2103 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6" 2104 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2105 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2106 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5" 2107 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8" 2108 }; 2109 2110 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2111 {ISD::FMINNUM, MVT::v4f32, 1}, 2112 {ISD::FMINNUM, MVT::v4f64, 1}, 2113 {ISD::FMINNUM, MVT::v8f32, 1}, 2114 {ISD::SMIN, MVT::v2i64, 3}, 2115 {ISD::UMIN, MVT::v2i64, 3}, 2116 {ISD::SMIN, MVT::v4i32, 1}, 2117 {ISD::UMIN, MVT::v4i32, 1}, 2118 {ISD::SMIN, MVT::v8i16, 1}, 2119 {ISD::UMIN, MVT::v8i16, 1}, 2120 {ISD::SMIN, MVT::v8i32, 2}, 2121 {ISD::UMIN, MVT::v8i32, 2}, 2122 }; 2123 2124 static const CostTblEntry AVX2CostTblNoPairWise[] = { 2125 {ISD::SMIN, MVT::v4i64, 1}, 2126 {ISD::UMIN, MVT::v4i64, 1}, 2127 {ISD::SMIN, MVT::v8i32, 1}, 2128 {ISD::UMIN, MVT::v8i32, 1}, 2129 {ISD::SMIN, MVT::v16i16, 1}, 2130 {ISD::UMIN, MVT::v16i16, 1}, 2131 {ISD::SMIN, MVT::v32i8, 1}, 2132 {ISD::UMIN, MVT::v32i8, 1}, 2133 }; 2134 2135 static const CostTblEntry AVX512CostTblNoPairWise[] = { 2136 {ISD::FMINNUM, MVT::v8f64, 1}, 2137 {ISD::FMINNUM, MVT::v16f32, 2}, 2138 {ISD::SMIN, MVT::v8i64, 1}, 2139 {ISD::UMIN, MVT::v8i64, 1}, 2140 {ISD::SMIN, MVT::v16i32, 1}, 2141 {ISD::UMIN, MVT::v16i32, 1}, 2142 }; 2143 2144 if (IsPairwise) { 2145 if (ST->hasAVX512()) 2146 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy)) 2147 return LT.first * Entry->Cost; 2148 2149 if (ST->hasAVX2()) 2150 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy)) 2151 return LT.first * Entry->Cost; 2152 2153 if (ST->hasAVX()) 2154 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2155 return LT.first * Entry->Cost; 2156 2157 if (ST->hasSSE42()) 2158 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2159 return LT.first * Entry->Cost; 2160 } else { 2161 if (ST->hasAVX512()) 2162 if (const auto *Entry = 2163 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy)) 2164 return LT.first * Entry->Cost; 2165 2166 if (ST->hasAVX2()) 2167 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy)) 2168 return LT.first * Entry->Cost; 2169 2170 if (ST->hasAVX()) 2171 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2172 return LT.first * Entry->Cost; 2173 2174 if (ST->hasSSE42()) 2175 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2176 return LT.first * Entry->Cost; 2177 } 2178 2179 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned); 2180 } 2181 2182 /// \brief Calculate the cost of materializing a 64-bit value. This helper 2183 /// method might only calculate a fraction of a larger immediate. Therefore it 2184 /// is valid to return a cost of ZERO. 2185 int X86TTIImpl::getIntImmCost(int64_t Val) { 2186 if (Val == 0) 2187 return TTI::TCC_Free; 2188 2189 if (isInt<32>(Val)) 2190 return TTI::TCC_Basic; 2191 2192 return 2 * TTI::TCC_Basic; 2193 } 2194 2195 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 2196 assert(Ty->isIntegerTy()); 2197 2198 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2199 if (BitSize == 0) 2200 return ~0U; 2201 2202 // Never hoist constants larger than 128bit, because this might lead to 2203 // incorrect code generation or assertions in codegen. 2204 // Fixme: Create a cost model for types larger than i128 once the codegen 2205 // issues have been fixed. 2206 if (BitSize > 128) 2207 return TTI::TCC_Free; 2208 2209 if (Imm == 0) 2210 return TTI::TCC_Free; 2211 2212 // Sign-extend all constants to a multiple of 64-bit. 2213 APInt ImmVal = Imm; 2214 if (BitSize & 0x3f) 2215 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 2216 2217 // Split the constant into 64-bit chunks and calculate the cost for each 2218 // chunk. 2219 int Cost = 0; 2220 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 2221 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 2222 int64_t Val = Tmp.getSExtValue(); 2223 Cost += getIntImmCost(Val); 2224 } 2225 // We need at least one instruction to materialize the constant. 2226 return std::max(1, Cost); 2227 } 2228 2229 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 2230 Type *Ty) { 2231 assert(Ty->isIntegerTy()); 2232 2233 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2234 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2235 // here, so that constant hoisting will ignore this constant. 2236 if (BitSize == 0) 2237 return TTI::TCC_Free; 2238 2239 unsigned ImmIdx = ~0U; 2240 switch (Opcode) { 2241 default: 2242 return TTI::TCC_Free; 2243 case Instruction::GetElementPtr: 2244 // Always hoist the base address of a GetElementPtr. This prevents the 2245 // creation of new constants for every base constant that gets constant 2246 // folded with the offset. 2247 if (Idx == 0) 2248 return 2 * TTI::TCC_Basic; 2249 return TTI::TCC_Free; 2250 case Instruction::Store: 2251 ImmIdx = 0; 2252 break; 2253 case Instruction::ICmp: 2254 // This is an imperfect hack to prevent constant hoisting of 2255 // compares that might be trying to check if a 64-bit value fits in 2256 // 32-bits. The backend can optimize these cases using a right shift by 32. 2257 // Ideally we would check the compare predicate here. There also other 2258 // similar immediates the backend can use shifts for. 2259 if (Idx == 1 && Imm.getBitWidth() == 64) { 2260 uint64_t ImmVal = Imm.getZExtValue(); 2261 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 2262 return TTI::TCC_Free; 2263 } 2264 ImmIdx = 1; 2265 break; 2266 case Instruction::And: 2267 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 2268 // by using a 32-bit operation with implicit zero extension. Detect such 2269 // immediates here as the normal path expects bit 31 to be sign extended. 2270 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 2271 return TTI::TCC_Free; 2272 LLVM_FALLTHROUGH; 2273 case Instruction::Add: 2274 case Instruction::Sub: 2275 case Instruction::Mul: 2276 case Instruction::UDiv: 2277 case Instruction::SDiv: 2278 case Instruction::URem: 2279 case Instruction::SRem: 2280 case Instruction::Or: 2281 case Instruction::Xor: 2282 ImmIdx = 1; 2283 break; 2284 // Always return TCC_Free for the shift value of a shift instruction. 2285 case Instruction::Shl: 2286 case Instruction::LShr: 2287 case Instruction::AShr: 2288 if (Idx == 1) 2289 return TTI::TCC_Free; 2290 break; 2291 case Instruction::Trunc: 2292 case Instruction::ZExt: 2293 case Instruction::SExt: 2294 case Instruction::IntToPtr: 2295 case Instruction::PtrToInt: 2296 case Instruction::BitCast: 2297 case Instruction::PHI: 2298 case Instruction::Call: 2299 case Instruction::Select: 2300 case Instruction::Ret: 2301 case Instruction::Load: 2302 break; 2303 } 2304 2305 if (Idx == ImmIdx) { 2306 int NumConstants = (BitSize + 63) / 64; 2307 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 2308 return (Cost <= NumConstants * TTI::TCC_Basic) 2309 ? static_cast<int>(TTI::TCC_Free) 2310 : Cost; 2311 } 2312 2313 return X86TTIImpl::getIntImmCost(Imm, Ty); 2314 } 2315 2316 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 2317 Type *Ty) { 2318 assert(Ty->isIntegerTy()); 2319 2320 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2321 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2322 // here, so that constant hoisting will ignore this constant. 2323 if (BitSize == 0) 2324 return TTI::TCC_Free; 2325 2326 switch (IID) { 2327 default: 2328 return TTI::TCC_Free; 2329 case Intrinsic::sadd_with_overflow: 2330 case Intrinsic::uadd_with_overflow: 2331 case Intrinsic::ssub_with_overflow: 2332 case Intrinsic::usub_with_overflow: 2333 case Intrinsic::smul_with_overflow: 2334 case Intrinsic::umul_with_overflow: 2335 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 2336 return TTI::TCC_Free; 2337 break; 2338 case Intrinsic::experimental_stackmap: 2339 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2340 return TTI::TCC_Free; 2341 break; 2342 case Intrinsic::experimental_patchpoint_void: 2343 case Intrinsic::experimental_patchpoint_i64: 2344 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2345 return TTI::TCC_Free; 2346 break; 2347 } 2348 return X86TTIImpl::getIntImmCost(Imm, Ty); 2349 } 2350 2351 unsigned X86TTIImpl::getUserCost(const User *U, 2352 ArrayRef<const Value *> Operands) { 2353 if (isa<StoreInst>(U)) { 2354 Value *Ptr = U->getOperand(1); 2355 // Store instruction with index and scale costs 2 Uops. 2356 // Check the preceding GEP to identify non-const indices. 2357 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 2358 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 2359 return TTI::TCC_Basic * 2; 2360 } 2361 return TTI::TCC_Basic; 2362 } 2363 return BaseT::getUserCost(U, Operands); 2364 } 2365 2366 // Return an average cost of Gather / Scatter instruction, maybe improved later 2367 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 2368 unsigned Alignment, unsigned AddressSpace) { 2369 2370 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 2371 unsigned VF = SrcVTy->getVectorNumElements(); 2372 2373 // Try to reduce index size from 64 bit (default for GEP) 2374 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 2375 // operation will use 16 x 64 indices which do not fit in a zmm and needs 2376 // to split. Also check that the base pointer is the same for all lanes, 2377 // and that there's at most one variable index. 2378 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 2379 unsigned IndexSize = DL.getPointerSizeInBits(); 2380 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 2381 if (IndexSize < 64 || !GEP) 2382 return IndexSize; 2383 2384 unsigned NumOfVarIndices = 0; 2385 Value *Ptrs = GEP->getPointerOperand(); 2386 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 2387 return IndexSize; 2388 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 2389 if (isa<Constant>(GEP->getOperand(i))) 2390 continue; 2391 Type *IndxTy = GEP->getOperand(i)->getType(); 2392 if (IndxTy->isVectorTy()) 2393 IndxTy = IndxTy->getVectorElementType(); 2394 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 2395 !isa<SExtInst>(GEP->getOperand(i))) || 2396 ++NumOfVarIndices > 1) 2397 return IndexSize; // 64 2398 } 2399 return (unsigned)32; 2400 }; 2401 2402 2403 // Trying to reduce IndexSize to 32 bits for vector 16. 2404 // By default the IndexSize is equal to pointer size. 2405 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 2406 ? getIndexSizeInBits(Ptr, DL) 2407 : DL.getPointerSizeInBits(); 2408 2409 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 2410 IndexSize), VF); 2411 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2412 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2413 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2414 if (SplitFactor > 1) { 2415 // Handle splitting of vector of pointers 2416 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2417 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2418 AddressSpace); 2419 } 2420 2421 // The gather / scatter cost is given by Intel architects. It is a rough 2422 // number since we are looking at one instruction in a time. 2423 const int GSOverhead = (Opcode == Instruction::Load) 2424 ? ST->getGatherOverhead() 2425 : ST->getScatterOverhead(); 2426 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2427 Alignment, AddressSpace); 2428 } 2429 2430 /// Return the cost of full scalarization of gather / scatter operation. 2431 /// 2432 /// Opcode - Load or Store instruction. 2433 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2434 /// VariableMask - The mask is non-constant at compile time. 2435 /// Alignment - Alignment for one element. 2436 /// AddressSpace - pointer[s] address space. 2437 /// 2438 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2439 bool VariableMask, unsigned Alignment, 2440 unsigned AddressSpace) { 2441 unsigned VF = SrcVTy->getVectorNumElements(); 2442 2443 int MaskUnpackCost = 0; 2444 if (VariableMask) { 2445 VectorType *MaskTy = 2446 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2447 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2448 int ScalarCompareCost = 2449 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2450 nullptr); 2451 int BranchCost = getCFInstrCost(Instruction::Br); 2452 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2453 } 2454 2455 // The cost of the scalar loads/stores. 2456 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2457 Alignment, AddressSpace); 2458 2459 int InsertExtractCost = 0; 2460 if (Opcode == Instruction::Load) 2461 for (unsigned i = 0; i < VF; ++i) 2462 // Add the cost of inserting each scalar load into the vector 2463 InsertExtractCost += 2464 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2465 else 2466 for (unsigned i = 0; i < VF; ++i) 2467 // Add the cost of extracting each element out of the data vector 2468 InsertExtractCost += 2469 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2470 2471 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2472 } 2473 2474 /// Calculate the cost of Gather / Scatter operation 2475 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2476 Value *Ptr, bool VariableMask, 2477 unsigned Alignment) { 2478 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2479 unsigned VF = SrcVTy->getVectorNumElements(); 2480 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2481 if (!PtrTy && Ptr->getType()->isVectorTy()) 2482 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2483 assert(PtrTy && "Unexpected type for Ptr argument"); 2484 unsigned AddressSpace = PtrTy->getAddressSpace(); 2485 2486 bool Scalarize = false; 2487 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2488 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2489 Scalarize = true; 2490 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2491 // Vector-4 of gather/scatter instruction does not exist on KNL. 2492 // We can extend it to 8 elements, but zeroing upper bits of 2493 // the mask vector will add more instructions. Right now we give the scalar 2494 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2495 // is better in the VariableMask case. 2496 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) 2497 Scalarize = true; 2498 2499 if (Scalarize) 2500 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2501 AddressSpace); 2502 2503 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2504 } 2505 2506 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 2507 TargetTransformInfo::LSRCost &C2) { 2508 // X86 specific here are "instruction number 1st priority". 2509 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 2510 C1.NumIVMuls, C1.NumBaseAdds, 2511 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 2512 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 2513 C2.NumIVMuls, C2.NumBaseAdds, 2514 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 2515 } 2516 2517 bool X86TTIImpl::canMacroFuseCmp() { 2518 return ST->hasMacroFusion(); 2519 } 2520 2521 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2522 // The backend can't handle a single element vector. 2523 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1) 2524 return false; 2525 Type *ScalarTy = DataTy->getScalarType(); 2526 int DataWidth = isa<PointerType>(ScalarTy) ? 2527 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2528 2529 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2530 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2531 } 2532 2533 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2534 return isLegalMaskedLoad(DataType); 2535 } 2536 2537 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2538 // This function is called now in two cases: from the Loop Vectorizer 2539 // and from the Scalarizer. 2540 // When the Loop Vectorizer asks about legality of the feature, 2541 // the vectorization factor is not calculated yet. The Loop Vectorizer 2542 // sends a scalar type and the decision is based on the width of the 2543 // scalar element. 2544 // Later on, the cost model will estimate usage this intrinsic based on 2545 // the vector type. 2546 // The Scalarizer asks again about legality. It sends a vector type. 2547 // In this case we can reject non-power-of-2 vectors. 2548 // We also reject single element vectors as the type legalizer can't 2549 // scalarize it. 2550 if (isa<VectorType>(DataTy)) { 2551 unsigned NumElts = DataTy->getVectorNumElements(); 2552 if (NumElts == 1 || !isPowerOf2_32(NumElts)) 2553 return false; 2554 } 2555 Type *ScalarTy = DataTy->getScalarType(); 2556 int DataWidth = isa<PointerType>(ScalarTy) ? 2557 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2558 2559 // Some CPUs have better gather performance than others. 2560 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 2561 // enable gather with a -march. 2562 return (DataWidth == 32 || DataWidth == 64) && 2563 (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())); 2564 } 2565 2566 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2567 // AVX2 doesn't support scatter 2568 if (!ST->hasAVX512()) 2569 return false; 2570 return isLegalMaskedGather(DataType); 2571 } 2572 2573 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 2574 EVT VT = TLI->getValueType(DL, DataType); 2575 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 2576 } 2577 2578 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 2579 return false; 2580 } 2581 2582 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2583 const Function *Callee) const { 2584 const TargetMachine &TM = getTLI()->getTargetMachine(); 2585 2586 // Work this as a subsetting of subtarget features. 2587 const FeatureBitset &CallerBits = 2588 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2589 const FeatureBitset &CalleeBits = 2590 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2591 2592 // FIXME: This is likely too limiting as it will include subtarget features 2593 // that we might not care about for inlining, but it is conservatively 2594 // correct. 2595 return (CallerBits & CalleeBits) == CalleeBits; 2596 } 2597 2598 const X86TTIImpl::TTI::MemCmpExpansionOptions * 2599 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const { 2600 // Only enable vector loads for equality comparison. 2601 // Right now the vector version is not as fast, see #33329. 2602 static const auto ThreeWayOptions = [this]() { 2603 TTI::MemCmpExpansionOptions Options; 2604 if (ST->is64Bit()) { 2605 Options.LoadSizes.push_back(8); 2606 } 2607 Options.LoadSizes.push_back(4); 2608 Options.LoadSizes.push_back(2); 2609 Options.LoadSizes.push_back(1); 2610 return Options; 2611 }(); 2612 static const auto EqZeroOptions = [this]() { 2613 TTI::MemCmpExpansionOptions Options; 2614 // TODO: enable AVX512 when the DAG is ready. 2615 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64); 2616 if (ST->hasAVX2()) Options.LoadSizes.push_back(32); 2617 if (ST->hasSSE2()) Options.LoadSizes.push_back(16); 2618 if (ST->is64Bit()) { 2619 Options.LoadSizes.push_back(8); 2620 } 2621 Options.LoadSizes.push_back(4); 2622 Options.LoadSizes.push_back(2); 2623 Options.LoadSizes.push_back(1); 2624 return Options; 2625 }(); 2626 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions; 2627 } 2628 2629 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2630 // TODO: We expect this to be beneficial regardless of arch, 2631 // but there are currently some unexplained performance artifacts on Atom. 2632 // As a temporary solution, disable on Atom. 2633 return !(ST->isAtom()); 2634 } 2635 2636 // Get estimation for interleaved load/store operations for AVX2. 2637 // \p Factor is the interleaved-access factor (stride) - number of 2638 // (interleaved) elements in the group. 2639 // \p Indices contains the indices for a strided load: when the 2640 // interleaved load has gaps they indicate which elements are used. 2641 // If Indices is empty (or if the number of indices is equal to the size 2642 // of the interleaved-access as given in \p Factor) the access has no gaps. 2643 // 2644 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 2645 // computing the cost using a generic formula as a function of generic 2646 // shuffles. We therefore use a lookup table instead, filled according to 2647 // the instruction sequences that codegen currently generates. 2648 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy, 2649 unsigned Factor, 2650 ArrayRef<unsigned> Indices, 2651 unsigned Alignment, 2652 unsigned AddressSpace) { 2653 2654 // We currently Support only fully-interleaved groups, with no gaps. 2655 // TODO: Support also strided loads (interleaved-groups with gaps). 2656 if (Indices.size() && Indices.size() != Factor) 2657 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2658 Alignment, AddressSpace); 2659 2660 // VecTy for interleave memop is <VF*Factor x Elt>. 2661 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2662 // VecTy = <12 x i32>. 2663 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2664 2665 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 2666 // the VF=2, while v2i128 is an unsupported MVT vector type 2667 // (see MachineValueType.h::getVectorVT()). 2668 if (!LegalVT.isVector()) 2669 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2670 Alignment, AddressSpace); 2671 2672 unsigned VF = VecTy->getVectorNumElements() / Factor; 2673 Type *ScalarTy = VecTy->getVectorElementType(); 2674 2675 // Calculate the number of memory operations (NumOfMemOps), required 2676 // for load/store the VecTy. 2677 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2678 unsigned LegalVTSize = LegalVT.getStoreSize(); 2679 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2680 2681 // Get the cost of one memory operation. 2682 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2683 LegalVT.getVectorNumElements()); 2684 unsigned MemOpCost = 2685 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2686 2687 VectorType *VT = VectorType::get(ScalarTy, VF); 2688 EVT ETy = TLI->getValueType(DL, VT); 2689 if (!ETy.isSimple()) 2690 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2691 Alignment, AddressSpace); 2692 2693 // TODO: Complete for other data-types and strides. 2694 // Each combination of Stride, ElementTy and VF results in a different 2695 // sequence; The cost tables are therefore accessed with: 2696 // Factor (stride) and VectorType=VFxElemType. 2697 // The Cost accounts only for the shuffle sequence; 2698 // The cost of the loads/stores is accounted for separately. 2699 // 2700 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 2701 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 2702 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 2703 2704 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 2705 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 2706 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 2707 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 2708 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 2709 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 2710 2711 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 2712 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 2713 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 2714 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 2715 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 2716 2717 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 2718 }; 2719 2720 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 2721 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) 2722 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) 2723 2724 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) 2725 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) 2726 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) 2727 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) 2728 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) 2729 2730 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) 2731 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) 2732 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) 2733 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) 2734 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) 2735 }; 2736 2737 if (Opcode == Instruction::Load) { 2738 if (const auto *Entry = 2739 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 2740 return NumOfMemOps * MemOpCost + Entry->Cost; 2741 } else { 2742 assert(Opcode == Instruction::Store && 2743 "Expected Store Instruction at this point"); 2744 if (const auto *Entry = 2745 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 2746 return NumOfMemOps * MemOpCost + Entry->Cost; 2747 } 2748 2749 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2750 Alignment, AddressSpace); 2751 } 2752 2753 // Get estimation for interleaved load/store operations and strided load. 2754 // \p Indices contains indices for strided load. 2755 // \p Factor - the factor of interleaving. 2756 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2757 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2758 unsigned Factor, 2759 ArrayRef<unsigned> Indices, 2760 unsigned Alignment, 2761 unsigned AddressSpace) { 2762 2763 // VecTy for interleave memop is <VF*Factor x Elt>. 2764 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2765 // VecTy = <12 x i32>. 2766 2767 // Calculate the number of memory operations (NumOfMemOps), required 2768 // for load/store the VecTy. 2769 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2770 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2771 unsigned LegalVTSize = LegalVT.getStoreSize(); 2772 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2773 2774 // Get the cost of one memory operation. 2775 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2776 LegalVT.getVectorNumElements()); 2777 unsigned MemOpCost = 2778 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2779 2780 unsigned VF = VecTy->getVectorNumElements() / Factor; 2781 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 2782 2783 if (Opcode == Instruction::Load) { 2784 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 2785 // contain the cost of the optimized shuffle sequence that the 2786 // X86InterleavedAccess pass will generate. 2787 // The cost of loads and stores are computed separately from the table. 2788 2789 // X86InterleavedAccess support only the following interleaved-access group. 2790 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 2791 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 2792 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 2793 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 2794 }; 2795 2796 if (const auto *Entry = 2797 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 2798 return NumOfMemOps * MemOpCost + Entry->Cost; 2799 //If an entry does not exist, fallback to the default implementation. 2800 2801 // Kind of shuffle depends on number of loaded values. 2802 // If we load the entire data in one register, we can use a 1-src shuffle. 2803 // Otherwise, we'll merge 2 sources in each operation. 2804 TTI::ShuffleKind ShuffleKind = 2805 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2806 2807 unsigned ShuffleCost = 2808 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2809 2810 unsigned NumOfLoadsInInterleaveGrp = 2811 Indices.size() ? Indices.size() : Factor; 2812 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2813 VecTy->getVectorNumElements() / Factor); 2814 unsigned NumOfResults = 2815 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2816 NumOfLoadsInInterleaveGrp; 2817 2818 // About a half of the loads may be folded in shuffles when we have only 2819 // one result. If we have more than one result, we do not fold loads at all. 2820 unsigned NumOfUnfoldedLoads = 2821 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2822 2823 // Get a number of shuffle operations per result. 2824 unsigned NumOfShufflesPerResult = 2825 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2826 2827 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2828 // When we have more than one destination, we need additional instructions 2829 // to keep sources. 2830 unsigned NumOfMoves = 0; 2831 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2832 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2833 2834 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2835 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2836 2837 return Cost; 2838 } 2839 2840 // Store. 2841 assert(Opcode == Instruction::Store && 2842 "Expected Store Instruction at this point"); 2843 // X86InterleavedAccess support only the following interleaved-access group. 2844 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 2845 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 2846 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 2847 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 2848 2849 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 2850 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 2851 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 2852 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 2853 }; 2854 2855 if (const auto *Entry = 2856 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 2857 return NumOfMemOps * MemOpCost + Entry->Cost; 2858 //If an entry does not exist, fallback to the default implementation. 2859 2860 // There is no strided stores meanwhile. And store can't be folded in 2861 // shuffle. 2862 unsigned NumOfSources = Factor; // The number of values to be merged. 2863 unsigned ShuffleCost = 2864 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 2865 unsigned NumOfShufflesPerStore = NumOfSources - 1; 2866 2867 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2868 // We need additional instructions to keep sources. 2869 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 2870 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 2871 NumOfMoves; 2872 return Cost; 2873 } 2874 2875 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 2876 unsigned Factor, 2877 ArrayRef<unsigned> Indices, 2878 unsigned Alignment, 2879 unsigned AddressSpace) { 2880 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 2881 Type *EltTy = VecTy->getVectorElementType(); 2882 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 2883 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 2884 return true; 2885 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 2886 return HasBW; 2887 return false; 2888 }; 2889 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 2890 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 2891 Alignment, AddressSpace); 2892 if (ST->hasAVX2()) 2893 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices, 2894 Alignment, AddressSpace); 2895 2896 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2897 Alignment, AddressSpace); 2898 } 2899