1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/CodeGen/CostTable.h" 46 #include "llvm/CodeGen/TargetLowering.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/Support/Debug.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 70 TargetTransformInfo::CacheLevel Level) const { 71 switch (Level) { 72 case TargetTransformInfo::CacheLevel::L1D: 73 // - Penryn 74 // - Nehalem 75 // - Westmere 76 // - Sandy Bridge 77 // - Ivy Bridge 78 // - Haswell 79 // - Broadwell 80 // - Skylake 81 // - Kabylake 82 return 32 * 1024; // 32 KByte 83 case TargetTransformInfo::CacheLevel::L2D: 84 // - Penryn 85 // - Nehalem 86 // - Westmere 87 // - Sandy Bridge 88 // - Ivy Bridge 89 // - Haswell 90 // - Broadwell 91 // - Skylake 92 // - Kabylake 93 return 256 * 1024; // 256 KByte 94 } 95 96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 97 } 98 99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 100 TargetTransformInfo::CacheLevel Level) const { 101 // - Penryn 102 // - Nehalem 103 // - Westmere 104 // - Sandy Bridge 105 // - Ivy Bridge 106 // - Haswell 107 // - Broadwell 108 // - Skylake 109 // - Kabylake 110 switch (Level) { 111 case TargetTransformInfo::CacheLevel::L1D: 112 LLVM_FALLTHROUGH; 113 case TargetTransformInfo::CacheLevel::L2D: 114 return 8; 115 } 116 117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 118 } 119 120 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const { 133 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 134 if (Vector) { 135 if (ST->hasAVX512() && PreferVectorWidth >= 512) 136 return 512; 137 if (ST->hasAVX() && PreferVectorWidth >= 256) 138 return 256; 139 if (ST->hasSSE1() && PreferVectorWidth >= 128) 140 return 128; 141 return 0; 142 } 143 144 if (ST->is64Bit()) 145 return 64; 146 147 return 32; 148 } 149 150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 151 return getRegisterBitWidth(true); 152 } 153 154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 155 // If the loop will not be vectorized, don't interleave the loop. 156 // Let regular unroll to unroll the loop, which saves the overflow 157 // check and memory check cost. 158 if (VF == 1) 159 return 1; 160 161 if (ST->isAtom()) 162 return 1; 163 164 // Sandybridge and Haswell have multiple execution ports and pipelined 165 // vector units. 166 if (ST->hasAVX()) 167 return 4; 168 169 return 2; 170 } 171 172 int X86TTIImpl::getArithmeticInstrCost( 173 unsigned Opcode, Type *Ty, 174 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 175 TTI::OperandValueProperties Opd1PropInfo, 176 TTI::OperandValueProperties Opd2PropInfo, 177 ArrayRef<const Value *> Args) { 178 // Legalize the type. 179 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 180 181 int ISD = TLI->InstructionOpcodeToISD(Opcode); 182 assert(ISD && "Invalid opcode"); 183 184 static const CostTblEntry GLMCostTable[] = { 185 { ISD::FDIV, MVT::f32, 18 }, // divss 186 { ISD::FDIV, MVT::v4f32, 35 }, // divps 187 { ISD::FDIV, MVT::f64, 33 }, // divsd 188 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 189 }; 190 191 if (ST->isGLM()) 192 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 193 LT.second)) 194 return LT.first * Entry->Cost; 195 196 static const CostTblEntry SLMCostTable[] = { 197 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 198 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 199 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 200 { ISD::FMUL, MVT::f64, 2 }, // mulsd 201 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 202 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 203 { ISD::FDIV, MVT::f32, 17 }, // divss 204 { ISD::FDIV, MVT::v4f32, 39 }, // divps 205 { ISD::FDIV, MVT::f64, 32 }, // divsd 206 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 207 { ISD::FADD, MVT::v2f64, 2 }, // addpd 208 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 209 // v2i64/v4i64 mul is custom lowered as a series of long: 210 // multiplies(3), shifts(3) and adds(2) 211 // slm muldq version throughput is 2 and addq throughput 4 212 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 213 // 3X4 (addq throughput) = 17 214 { ISD::MUL, MVT::v2i64, 17 }, 215 // slm addq\subq throughput is 4 216 { ISD::ADD, MVT::v2i64, 4 }, 217 { ISD::SUB, MVT::v2i64, 4 }, 218 }; 219 220 if (ST->isSLM()) { 221 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 222 // Check if the operands can be shrinked into a smaller datatype. 223 bool Op1Signed = false; 224 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 225 bool Op2Signed = false; 226 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 227 228 bool signedMode = Op1Signed | Op2Signed; 229 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 230 231 if (OpMinSize <= 7) 232 return LT.first * 3; // pmullw/sext 233 if (!signedMode && OpMinSize <= 8) 234 return LT.first * 3; // pmullw/zext 235 if (OpMinSize <= 15) 236 return LT.first * 5; // pmullw/pmulhw/pshuf 237 if (!signedMode && OpMinSize <= 16) 238 return LT.first * 5; // pmullw/pmulhw/pshuf 239 } 240 241 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 242 LT.second)) { 243 return LT.first * Entry->Cost; 244 } 245 } 246 247 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV || 248 ISD == ISD::UREM) && 249 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 250 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 251 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 252 if (ISD == ISD::SDIV || ISD == ISD::SREM) { 253 // On X86, vector signed division by constants power-of-two are 254 // normally expanded to the sequence SRA + SRL + ADD + SRA. 255 // The OperandValue properties may not be the same as that of the previous 256 // operation; conservatively assume OP_None. 257 int Cost = 258 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info, 259 TargetTransformInfo::OP_None, 260 TargetTransformInfo::OP_None); 261 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 262 TargetTransformInfo::OP_None, 263 TargetTransformInfo::OP_None); 264 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 265 TargetTransformInfo::OP_None, 266 TargetTransformInfo::OP_None); 267 268 if (ISD == ISD::SREM) { 269 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 270 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info); 271 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Op1Info, Op2Info); 272 } 273 274 return Cost; 275 } 276 277 // Vector unsigned division/remainder will be simplified to shifts/masks. 278 if (ISD == ISD::UDIV) 279 return getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 280 TargetTransformInfo::OP_None, 281 TargetTransformInfo::OP_None); 282 283 if (ISD == ISD::UREM) 284 return getArithmeticInstrCost(Instruction::And, Ty, Op1Info, Op2Info, 285 TargetTransformInfo::OP_None, 286 TargetTransformInfo::OP_None); 287 } 288 289 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 290 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 291 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 292 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 293 }; 294 295 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 296 ST->hasBWI()) { 297 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 298 LT.second)) 299 return LT.first * Entry->Cost; 300 } 301 302 static const CostTblEntry AVX512UniformConstCostTable[] = { 303 { ISD::SRA, MVT::v2i64, 1 }, 304 { ISD::SRA, MVT::v4i64, 1 }, 305 { ISD::SRA, MVT::v8i64, 1 }, 306 }; 307 308 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 309 ST->hasAVX512()) { 310 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 311 LT.second)) 312 return LT.first * Entry->Cost; 313 } 314 315 static const CostTblEntry AVX2UniformConstCostTable[] = { 316 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 317 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 318 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 319 320 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 321 }; 322 323 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 324 ST->hasAVX2()) { 325 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 326 LT.second)) 327 return LT.first * Entry->Cost; 328 } 329 330 static const CostTblEntry SSE2UniformConstCostTable[] = { 331 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 332 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 333 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 334 335 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 336 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 337 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 338 }; 339 340 // XOP has faster vXi8 shifts. 341 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 342 ST->hasSSE2() && !ST->hasXOP()) { 343 if (const auto *Entry = 344 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 345 return LT.first * Entry->Cost; 346 } 347 348 static const CostTblEntry AVX512BWConstCostTable[] = { 349 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 350 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 351 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 352 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 353 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 354 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 355 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 356 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 357 }; 358 359 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 360 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 361 ST->hasBWI()) { 362 if (const auto *Entry = 363 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 364 return LT.first * Entry->Cost; 365 } 366 367 static const CostTblEntry AVX512ConstCostTable[] = { 368 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 369 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 370 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 371 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 372 }; 373 374 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 375 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 376 ST->hasAVX512()) { 377 if (const auto *Entry = 378 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 379 return LT.first * Entry->Cost; 380 } 381 382 static const CostTblEntry AVX2ConstCostTable[] = { 383 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 384 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 385 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 386 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 387 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 388 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 389 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 390 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 391 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 392 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 393 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 394 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 395 }; 396 397 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 398 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 399 ST->hasAVX2()) { 400 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 401 return LT.first * Entry->Cost; 402 } 403 404 static const CostTblEntry SSE2ConstCostTable[] = { 405 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 406 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 407 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 408 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 409 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 410 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 411 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 412 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 413 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 414 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 415 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 416 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 417 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 418 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 419 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 420 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 421 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 422 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 423 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 424 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 425 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 426 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 427 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 428 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 429 }; 430 431 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 432 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 433 ST->hasSSE2()) { 434 // pmuldq sequence. 435 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 436 return LT.first * 32; 437 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 438 return LT.first * 38; 439 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 440 return LT.first * 15; 441 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 442 return LT.first * 20; 443 444 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 445 return LT.first * Entry->Cost; 446 } 447 448 static const CostTblEntry AVX2UniformCostTable[] = { 449 // Uniform splats are cheaper for the following instructions. 450 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 451 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 452 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 453 }; 454 455 if (ST->hasAVX2() && 456 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 457 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 458 if (const auto *Entry = 459 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 460 return LT.first * Entry->Cost; 461 } 462 463 static const CostTblEntry SSE2UniformCostTable[] = { 464 // Uniform splats are cheaper for the following instructions. 465 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 466 { ISD::SHL, MVT::v4i32, 1 }, // pslld 467 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 468 469 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 470 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 471 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 472 473 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 474 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 475 }; 476 477 if (ST->hasSSE2() && 478 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 479 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 480 if (const auto *Entry = 481 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 482 return LT.first * Entry->Cost; 483 } 484 485 static const CostTblEntry AVX512DQCostTable[] = { 486 { ISD::MUL, MVT::v2i64, 1 }, 487 { ISD::MUL, MVT::v4i64, 1 }, 488 { ISD::MUL, MVT::v8i64, 1 } 489 }; 490 491 // Look for AVX512DQ lowering tricks for custom cases. 492 if (ST->hasDQI()) 493 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 494 return LT.first * Entry->Cost; 495 496 static const CostTblEntry AVX512BWCostTable[] = { 497 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 498 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 499 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 500 501 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 502 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 503 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 504 505 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 506 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 507 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 508 509 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 510 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 511 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 512 513 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 514 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 515 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 516 }; 517 518 // Look for AVX512BW lowering tricks for custom cases. 519 if (ST->hasBWI()) 520 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 521 return LT.first * Entry->Cost; 522 523 static const CostTblEntry AVX512CostTable[] = { 524 { ISD::SHL, MVT::v16i32, 1 }, 525 { ISD::SRL, MVT::v16i32, 1 }, 526 { ISD::SRA, MVT::v16i32, 1 }, 527 528 { ISD::SHL, MVT::v8i64, 1 }, 529 { ISD::SRL, MVT::v8i64, 1 }, 530 531 { ISD::SRA, MVT::v2i64, 1 }, 532 { ISD::SRA, MVT::v4i64, 1 }, 533 { ISD::SRA, MVT::v8i64, 1 }, 534 535 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 536 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 537 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 538 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 539 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 540 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 541 542 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 543 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 544 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 545 546 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 547 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 548 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 549 }; 550 551 if (ST->hasAVX512()) 552 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 553 return LT.first * Entry->Cost; 554 555 static const CostTblEntry AVX2ShiftCostTable[] = { 556 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 557 // customize them to detect the cases where shift amount is a scalar one. 558 { ISD::SHL, MVT::v4i32, 1 }, 559 { ISD::SRL, MVT::v4i32, 1 }, 560 { ISD::SRA, MVT::v4i32, 1 }, 561 { ISD::SHL, MVT::v8i32, 1 }, 562 { ISD::SRL, MVT::v8i32, 1 }, 563 { ISD::SRA, MVT::v8i32, 1 }, 564 { ISD::SHL, MVT::v2i64, 1 }, 565 { ISD::SRL, MVT::v2i64, 1 }, 566 { ISD::SHL, MVT::v4i64, 1 }, 567 { ISD::SRL, MVT::v4i64, 1 }, 568 }; 569 570 // Look for AVX2 lowering tricks. 571 if (ST->hasAVX2()) { 572 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 573 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 574 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 575 // On AVX2, a packed v16i16 shift left by a constant build_vector 576 // is lowered into a vector multiply (vpmullw). 577 return getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info, 578 TargetTransformInfo::OP_None, 579 TargetTransformInfo::OP_None); 580 581 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 582 return LT.first * Entry->Cost; 583 } 584 585 static const CostTblEntry XOPShiftCostTable[] = { 586 // 128bit shifts take 1cy, but right shifts require negation beforehand. 587 { ISD::SHL, MVT::v16i8, 1 }, 588 { ISD::SRL, MVT::v16i8, 2 }, 589 { ISD::SRA, MVT::v16i8, 2 }, 590 { ISD::SHL, MVT::v8i16, 1 }, 591 { ISD::SRL, MVT::v8i16, 2 }, 592 { ISD::SRA, MVT::v8i16, 2 }, 593 { ISD::SHL, MVT::v4i32, 1 }, 594 { ISD::SRL, MVT::v4i32, 2 }, 595 { ISD::SRA, MVT::v4i32, 2 }, 596 { ISD::SHL, MVT::v2i64, 1 }, 597 { ISD::SRL, MVT::v2i64, 2 }, 598 { ISD::SRA, MVT::v2i64, 2 }, 599 // 256bit shifts require splitting if AVX2 didn't catch them above. 600 { ISD::SHL, MVT::v32i8, 2+2 }, 601 { ISD::SRL, MVT::v32i8, 4+2 }, 602 { ISD::SRA, MVT::v32i8, 4+2 }, 603 { ISD::SHL, MVT::v16i16, 2+2 }, 604 { ISD::SRL, MVT::v16i16, 4+2 }, 605 { ISD::SRA, MVT::v16i16, 4+2 }, 606 { ISD::SHL, MVT::v8i32, 2+2 }, 607 { ISD::SRL, MVT::v8i32, 4+2 }, 608 { ISD::SRA, MVT::v8i32, 4+2 }, 609 { ISD::SHL, MVT::v4i64, 2+2 }, 610 { ISD::SRL, MVT::v4i64, 4+2 }, 611 { ISD::SRA, MVT::v4i64, 4+2 }, 612 }; 613 614 // Look for XOP lowering tricks. 615 if (ST->hasXOP()) 616 if (const auto *Entry = CostTableLookup(XOPShiftCostTable, ISD, LT.second)) 617 return LT.first * Entry->Cost; 618 619 static const CostTblEntry SSE2UniformShiftCostTable[] = { 620 // Uniform splats are cheaper for the following instructions. 621 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 622 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 623 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 624 625 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 626 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 627 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 628 629 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 630 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 631 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 632 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 633 }; 634 635 if (ST->hasSSE2() && 636 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 637 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 638 639 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 640 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 641 return LT.first * 4; // 2*psrad + shuffle. 642 643 if (const auto *Entry = 644 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 645 return LT.first * Entry->Cost; 646 } 647 648 if (ISD == ISD::SHL && 649 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 650 MVT VT = LT.second; 651 // Vector shift left by non uniform constant can be lowered 652 // into vector multiply. 653 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 654 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 655 ISD = ISD::MUL; 656 } 657 658 static const CostTblEntry AVX2CostTable[] = { 659 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 660 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 661 662 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 663 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 664 665 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 666 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 667 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 668 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 669 670 { ISD::SUB, MVT::v32i8, 1 }, // psubb 671 { ISD::ADD, MVT::v32i8, 1 }, // paddb 672 { ISD::SUB, MVT::v16i16, 1 }, // psubw 673 { ISD::ADD, MVT::v16i16, 1 }, // paddw 674 { ISD::SUB, MVT::v8i32, 1 }, // psubd 675 { ISD::ADD, MVT::v8i32, 1 }, // paddd 676 { ISD::SUB, MVT::v4i64, 1 }, // psubq 677 { ISD::ADD, MVT::v4i64, 1 }, // paddq 678 679 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 680 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 681 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 682 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 683 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 684 685 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 686 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 687 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 688 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 689 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 690 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 691 692 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 693 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 694 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 695 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 696 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 697 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 698 }; 699 700 // Look for AVX2 lowering tricks for custom cases. 701 if (ST->hasAVX2()) 702 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 703 return LT.first * Entry->Cost; 704 705 static const CostTblEntry AVX1CostTable[] = { 706 // We don't have to scalarize unsupported ops. We can issue two half-sized 707 // operations and we only need to extract the upper YMM half. 708 // Two ops + 1 extract + 1 insert = 4. 709 { ISD::MUL, MVT::v16i16, 4 }, 710 { ISD::MUL, MVT::v8i32, 4 }, 711 { ISD::SUB, MVT::v32i8, 4 }, 712 { ISD::ADD, MVT::v32i8, 4 }, 713 { ISD::SUB, MVT::v16i16, 4 }, 714 { ISD::ADD, MVT::v16i16, 4 }, 715 { ISD::SUB, MVT::v8i32, 4 }, 716 { ISD::ADD, MVT::v8i32, 4 }, 717 { ISD::SUB, MVT::v4i64, 4 }, 718 { ISD::ADD, MVT::v4i64, 4 }, 719 720 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 721 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 722 // Because we believe v4i64 to be a legal type, we must also include the 723 // extract+insert in the cost table. Therefore, the cost here is 18 724 // instead of 8. 725 { ISD::MUL, MVT::v4i64, 18 }, 726 727 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 728 729 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 730 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 731 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 732 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 733 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 734 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 735 }; 736 737 if (ST->hasAVX()) 738 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 739 return LT.first * Entry->Cost; 740 741 static const CostTblEntry SSE42CostTable[] = { 742 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 743 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 744 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 745 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 746 747 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 748 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 749 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 750 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 751 752 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 753 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 754 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 755 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 756 757 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 758 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 759 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 760 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 761 }; 762 763 if (ST->hasSSE42()) 764 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 765 return LT.first * Entry->Cost; 766 767 static const CostTblEntry SSE41CostTable[] = { 768 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 769 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. 770 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 771 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 772 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 773 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split 774 775 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 776 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. 777 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 778 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 779 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 780 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. 781 782 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 783 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. 784 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 785 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 786 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 787 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. 788 789 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 790 }; 791 792 if (ST->hasSSE41()) 793 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 794 return LT.first * Entry->Cost; 795 796 static const CostTblEntry SSE2CostTable[] = { 797 // We don't correctly identify costs of casts because they are marked as 798 // custom. 799 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 800 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 801 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 802 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 803 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 804 805 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 806 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 807 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 808 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 809 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 810 811 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 812 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 813 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 814 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 815 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. 816 817 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 818 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 819 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 820 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 821 822 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 823 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 824 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 825 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 826 }; 827 828 if (ST->hasSSE2()) 829 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 830 return LT.first * Entry->Cost; 831 832 static const CostTblEntry SSE1CostTable[] = { 833 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 834 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 835 }; 836 837 if (ST->hasSSE1()) 838 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 839 return LT.first * Entry->Cost; 840 841 // It is not a good idea to vectorize division. We have to scalarize it and 842 // in the process we will often end up having to spilling regular 843 // registers. The overhead of division is going to dominate most kernels 844 // anyways so try hard to prevent vectorization of division - it is 845 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 846 // to hide "20 cycles" for each lane. 847 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 848 ISD == ISD::UDIV || ISD == ISD::UREM)) { 849 int ScalarCost = getArithmeticInstrCost( 850 Opcode, Ty->getScalarType(), Op1Info, Op2Info, 851 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 852 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 853 } 854 855 // Fallback to the default implementation. 856 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 857 } 858 859 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 860 Type *SubTp) { 861 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 862 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 863 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 864 865 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 866 if (Kind == TTI::SK_Transpose) 867 Kind = TTI::SK_PermuteTwoSrc; 868 869 // For Broadcasts we are splatting the first element from the first input 870 // register, so only need to reference that input and all the output 871 // registers are the same. 872 if (Kind == TTI::SK_Broadcast) 873 LT.first = 1; 874 875 // We are going to permute multiple sources and the result will be in multiple 876 // destinations. Providing an accurate cost only for splits where the element 877 // type remains the same. 878 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 879 MVT LegalVT = LT.second; 880 if (LegalVT.isVector() && 881 LegalVT.getVectorElementType().getSizeInBits() == 882 Tp->getVectorElementType()->getPrimitiveSizeInBits() && 883 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) { 884 885 unsigned VecTySize = DL.getTypeStoreSize(Tp); 886 unsigned LegalVTSize = LegalVT.getStoreSize(); 887 // Number of source vectors after legalization: 888 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 889 // Number of destination vectors after legalization: 890 unsigned NumOfDests = LT.first; 891 892 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(), 893 LegalVT.getVectorNumElements()); 894 895 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 896 return NumOfShuffles * 897 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 898 } 899 900 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 901 } 902 903 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 904 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 905 // We assume that source and destination have the same vector type. 906 int NumOfDests = LT.first; 907 int NumOfShufflesPerDest = LT.first * 2 - 1; 908 LT.first = NumOfDests * NumOfShufflesPerDest; 909 } 910 911 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 912 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb 913 { TTI::SK_Reverse, MVT::v32i8, 1 }, // vpermb 914 915 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb 916 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 1 }, // vpermb 917 918 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b 919 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 1 }, // vpermt2b 920 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 } // vpermt2b 921 }; 922 923 if (ST->hasVBMI()) 924 if (const auto *Entry = 925 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 926 return LT.first * Entry->Cost; 927 928 static const CostTblEntry AVX512BWShuffleTbl[] = { 929 { TTI::SK_Broadcast, MVT::v32i16, 1 }, // vpbroadcastw 930 { TTI::SK_Broadcast, MVT::v64i8, 1 }, // vpbroadcastb 931 932 { TTI::SK_Reverse, MVT::v32i16, 1 }, // vpermw 933 { TTI::SK_Reverse, MVT::v16i16, 1 }, // vpermw 934 { TTI::SK_Reverse, MVT::v64i8, 2 }, // pshufb + vshufi64x2 935 936 { TTI::SK_PermuteSingleSrc, MVT::v32i16, 1 }, // vpermw 937 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 1 }, // vpermw 938 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // vpermw 939 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 8 }, // extend to v32i16 940 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 3 }, // vpermw + zext/trunc 941 942 { TTI::SK_PermuteTwoSrc, MVT::v32i16, 1 }, // vpermt2w 943 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 1 }, // vpermt2w 944 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpermt2w 945 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 3 }, // zext + vpermt2w + trunc 946 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 19 }, // 6 * v32i8 + 1 947 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 } // zext + vpermt2w + trunc 948 }; 949 950 if (ST->hasBWI()) 951 if (const auto *Entry = 952 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 953 return LT.first * Entry->Cost; 954 955 static const CostTblEntry AVX512ShuffleTbl[] = { 956 { TTI::SK_Broadcast, MVT::v8f64, 1 }, // vbroadcastpd 957 { TTI::SK_Broadcast, MVT::v16f32, 1 }, // vbroadcastps 958 { TTI::SK_Broadcast, MVT::v8i64, 1 }, // vpbroadcastq 959 { TTI::SK_Broadcast, MVT::v16i32, 1 }, // vpbroadcastd 960 961 { TTI::SK_Reverse, MVT::v8f64, 1 }, // vpermpd 962 { TTI::SK_Reverse, MVT::v16f32, 1 }, // vpermps 963 { TTI::SK_Reverse, MVT::v8i64, 1 }, // vpermq 964 { TTI::SK_Reverse, MVT::v16i32, 1 }, // vpermd 965 966 { TTI::SK_PermuteSingleSrc, MVT::v8f64, 1 }, // vpermpd 967 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 968 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // vpermpd 969 { TTI::SK_PermuteSingleSrc, MVT::v16f32, 1 }, // vpermps 970 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 971 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // vpermps 972 { TTI::SK_PermuteSingleSrc, MVT::v8i64, 1 }, // vpermq 973 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 974 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // vpermq 975 { TTI::SK_PermuteSingleSrc, MVT::v16i32, 1 }, // vpermd 976 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 977 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // vpermd 978 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 979 980 { TTI::SK_PermuteTwoSrc, MVT::v8f64, 1 }, // vpermt2pd 981 { TTI::SK_PermuteTwoSrc, MVT::v16f32, 1 }, // vpermt2ps 982 { TTI::SK_PermuteTwoSrc, MVT::v8i64, 1 }, // vpermt2q 983 { TTI::SK_PermuteTwoSrc, MVT::v16i32, 1 }, // vpermt2d 984 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 1 }, // vpermt2pd 985 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 1 }, // vpermt2ps 986 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 1 }, // vpermt2q 987 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 1 }, // vpermt2d 988 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // vpermt2pd 989 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 1 }, // vpermt2ps 990 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // vpermt2q 991 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 1 } // vpermt2d 992 }; 993 994 if (ST->hasAVX512()) 995 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 996 return LT.first * Entry->Cost; 997 998 static const CostTblEntry AVX2ShuffleTbl[] = { 999 { TTI::SK_Broadcast, MVT::v4f64, 1 }, // vbroadcastpd 1000 { TTI::SK_Broadcast, MVT::v8f32, 1 }, // vbroadcastps 1001 { TTI::SK_Broadcast, MVT::v4i64, 1 }, // vpbroadcastq 1002 { TTI::SK_Broadcast, MVT::v8i32, 1 }, // vpbroadcastd 1003 { TTI::SK_Broadcast, MVT::v16i16, 1 }, // vpbroadcastw 1004 { TTI::SK_Broadcast, MVT::v32i8, 1 }, // vpbroadcastb 1005 1006 { TTI::SK_Reverse, MVT::v4f64, 1 }, // vpermpd 1007 { TTI::SK_Reverse, MVT::v8f32, 1 }, // vpermps 1008 { TTI::SK_Reverse, MVT::v4i64, 1 }, // vpermq 1009 { TTI::SK_Reverse, MVT::v8i32, 1 }, // vpermd 1010 { TTI::SK_Reverse, MVT::v16i16, 2 }, // vperm2i128 + pshufb 1011 { TTI::SK_Reverse, MVT::v32i8, 2 }, // vperm2i128 + pshufb 1012 1013 { TTI::SK_Select, MVT::v16i16, 1 }, // vpblendvb 1014 { TTI::SK_Select, MVT::v32i8, 1 }, // vpblendvb 1015 1016 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 1 }, // vpermpd 1017 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 1 }, // vpermps 1018 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 1 }, // vpermq 1019 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 1 }, // vpermd 1020 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vperm2i128 + 2*vpshufb 1021 // + vpblendvb 1022 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vperm2i128 + 2*vpshufb 1023 // + vpblendvb 1024 1025 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 3 }, // 2*vpermpd + vblendpd 1026 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 3 }, // 2*vpermps + vblendps 1027 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 3 }, // 2*vpermq + vpblendd 1028 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 3 }, // 2*vpermd + vpblendd 1029 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 7 }, // 2*vperm2i128 + 4*vpshufb 1030 // + vpblendvb 1031 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 7 }, // 2*vperm2i128 + 4*vpshufb 1032 // + vpblendvb 1033 }; 1034 1035 if (ST->hasAVX2()) 1036 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1037 return LT.first * Entry->Cost; 1038 1039 static const CostTblEntry XOPShuffleTbl[] = { 1040 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 2 }, // vperm2f128 + vpermil2pd 1041 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 2 }, // vperm2f128 + vpermil2ps 1042 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 2 }, // vperm2f128 + vpermil2pd 1043 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 2 }, // vperm2f128 + vpermil2ps 1044 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 4 }, // vextractf128 + 2*vpperm 1045 // + vinsertf128 1046 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 4 }, // vextractf128 + 2*vpperm 1047 // + vinsertf128 1048 1049 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 9 }, // 2*vextractf128 + 6*vpperm 1050 // + vinsertf128 1051 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 1 }, // vpperm 1052 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 9 }, // 2*vextractf128 + 6*vpperm 1053 // + vinsertf128 1054 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 1 }, // vpperm 1055 }; 1056 1057 if (ST->hasXOP()) 1058 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1059 return LT.first * Entry->Cost; 1060 1061 static const CostTblEntry AVX1ShuffleTbl[] = { 1062 { TTI::SK_Broadcast, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 1063 { TTI::SK_Broadcast, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 1064 { TTI::SK_Broadcast, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 1065 { TTI::SK_Broadcast, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 1066 { TTI::SK_Broadcast, MVT::v16i16, 3 }, // vpshuflw + vpshufd + vinsertf128 1067 { TTI::SK_Broadcast, MVT::v32i8, 2 }, // vpshufb + vinsertf128 1068 1069 { TTI::SK_Reverse, MVT::v4f64, 2 }, // vperm2f128 + vpermilpd 1070 { TTI::SK_Reverse, MVT::v8f32, 2 }, // vperm2f128 + vpermilps 1071 { TTI::SK_Reverse, MVT::v4i64, 2 }, // vperm2f128 + vpermilpd 1072 { TTI::SK_Reverse, MVT::v8i32, 2 }, // vperm2f128 + vpermilps 1073 { TTI::SK_Reverse, MVT::v16i16, 4 }, // vextractf128 + 2*pshufb 1074 // + vinsertf128 1075 { TTI::SK_Reverse, MVT::v32i8, 4 }, // vextractf128 + 2*pshufb 1076 // + vinsertf128 1077 1078 { TTI::SK_Select, MVT::v4i64, 1 }, // vblendpd 1079 { TTI::SK_Select, MVT::v4f64, 1 }, // vblendpd 1080 { TTI::SK_Select, MVT::v8i32, 1 }, // vblendps 1081 { TTI::SK_Select, MVT::v8f32, 1 }, // vblendps 1082 { TTI::SK_Select, MVT::v16i16, 3 }, // vpand + vpandn + vpor 1083 { TTI::SK_Select, MVT::v32i8, 3 }, // vpand + vpandn + vpor 1084 1085 { TTI::SK_PermuteSingleSrc, MVT::v4f64, 2 }, // vperm2f128 + vshufpd 1086 { TTI::SK_PermuteSingleSrc, MVT::v4i64, 2 }, // vperm2f128 + vshufpd 1087 { TTI::SK_PermuteSingleSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 1088 { TTI::SK_PermuteSingleSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 1089 { TTI::SK_PermuteSingleSrc, MVT::v16i16, 8 }, // vextractf128 + 4*pshufb 1090 // + 2*por + vinsertf128 1091 { TTI::SK_PermuteSingleSrc, MVT::v32i8, 8 }, // vextractf128 + 4*pshufb 1092 // + 2*por + vinsertf128 1093 1094 { TTI::SK_PermuteTwoSrc, MVT::v4f64, 3 }, // 2*vperm2f128 + vshufpd 1095 { TTI::SK_PermuteTwoSrc, MVT::v4i64, 3 }, // 2*vperm2f128 + vshufpd 1096 { TTI::SK_PermuteTwoSrc, MVT::v8f32, 4 }, // 2*vperm2f128 + 2*vshufps 1097 { TTI::SK_PermuteTwoSrc, MVT::v8i32, 4 }, // 2*vperm2f128 + 2*vshufps 1098 { TTI::SK_PermuteTwoSrc, MVT::v16i16, 15 }, // 2*vextractf128 + 8*pshufb 1099 // + 4*por + vinsertf128 1100 { TTI::SK_PermuteTwoSrc, MVT::v32i8, 15 }, // 2*vextractf128 + 8*pshufb 1101 // + 4*por + vinsertf128 1102 }; 1103 1104 if (ST->hasAVX()) 1105 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1106 return LT.first * Entry->Cost; 1107 1108 static const CostTblEntry SSE41ShuffleTbl[] = { 1109 { TTI::SK_Select, MVT::v2i64, 1 }, // pblendw 1110 { TTI::SK_Select, MVT::v2f64, 1 }, // movsd 1111 { TTI::SK_Select, MVT::v4i32, 1 }, // pblendw 1112 { TTI::SK_Select, MVT::v4f32, 1 }, // blendps 1113 { TTI::SK_Select, MVT::v8i16, 1 }, // pblendw 1114 { TTI::SK_Select, MVT::v16i8, 1 } // pblendvb 1115 }; 1116 1117 if (ST->hasSSE41()) 1118 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1119 return LT.first * Entry->Cost; 1120 1121 static const CostTblEntry SSSE3ShuffleTbl[] = { 1122 { TTI::SK_Broadcast, MVT::v8i16, 1 }, // pshufb 1123 { TTI::SK_Broadcast, MVT::v16i8, 1 }, // pshufb 1124 1125 { TTI::SK_Reverse, MVT::v8i16, 1 }, // pshufb 1126 { TTI::SK_Reverse, MVT::v16i8, 1 }, // pshufb 1127 1128 { TTI::SK_Select, MVT::v8i16, 3 }, // 2*pshufb + por 1129 { TTI::SK_Select, MVT::v16i8, 3 }, // 2*pshufb + por 1130 1131 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 1 }, // pshufb 1132 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 1 }, // pshufb 1133 1134 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 3 }, // 2*pshufb + por 1135 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 3 }, // 2*pshufb + por 1136 }; 1137 1138 if (ST->hasSSSE3()) 1139 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1140 return LT.first * Entry->Cost; 1141 1142 static const CostTblEntry SSE2ShuffleTbl[] = { 1143 { TTI::SK_Broadcast, MVT::v2f64, 1 }, // shufpd 1144 { TTI::SK_Broadcast, MVT::v2i64, 1 }, // pshufd 1145 { TTI::SK_Broadcast, MVT::v4i32, 1 }, // pshufd 1146 { TTI::SK_Broadcast, MVT::v8i16, 2 }, // pshuflw + pshufd 1147 { TTI::SK_Broadcast, MVT::v16i8, 3 }, // unpck + pshuflw + pshufd 1148 1149 { TTI::SK_Reverse, MVT::v2f64, 1 }, // shufpd 1150 { TTI::SK_Reverse, MVT::v2i64, 1 }, // pshufd 1151 { TTI::SK_Reverse, MVT::v4i32, 1 }, // pshufd 1152 { TTI::SK_Reverse, MVT::v8i16, 3 }, // pshuflw + pshufhw + pshufd 1153 { TTI::SK_Reverse, MVT::v16i8, 9 }, // 2*pshuflw + 2*pshufhw 1154 // + 2*pshufd + 2*unpck + packus 1155 1156 { TTI::SK_Select, MVT::v2i64, 1 }, // movsd 1157 { TTI::SK_Select, MVT::v2f64, 1 }, // movsd 1158 { TTI::SK_Select, MVT::v4i32, 2 }, // 2*shufps 1159 { TTI::SK_Select, MVT::v8i16, 3 }, // pand + pandn + por 1160 { TTI::SK_Select, MVT::v16i8, 3 }, // pand + pandn + por 1161 1162 { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // shufpd 1163 { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // pshufd 1164 { TTI::SK_PermuteSingleSrc, MVT::v4i32, 1 }, // pshufd 1165 { TTI::SK_PermuteSingleSrc, MVT::v8i16, 5 }, // 2*pshuflw + 2*pshufhw 1166 // + pshufd/unpck 1167 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1168 // + 2*pshufd + 2*unpck + 2*packus 1169 1170 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1171 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1172 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1173 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1174 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1175 }; 1176 1177 if (ST->hasSSE2()) 1178 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1179 return LT.first * Entry->Cost; 1180 1181 static const CostTblEntry SSE1ShuffleTbl[] = { 1182 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1183 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1184 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1185 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1186 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1187 }; 1188 1189 if (ST->hasSSE1()) 1190 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1191 return LT.first * Entry->Cost; 1192 1193 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 1194 } 1195 1196 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 1197 const Instruction *I) { 1198 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1199 assert(ISD && "Invalid opcode"); 1200 1201 // FIXME: Need a better design of the cost table to handle non-simple types of 1202 // potential massive combinations (elem_num x src_type x dst_type). 1203 1204 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1205 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1206 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1207 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1208 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1209 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1210 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1211 1212 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1213 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1214 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1215 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1216 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1217 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1218 1219 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 1220 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1221 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1222 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1223 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1224 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1225 1226 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 1227 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1228 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1229 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1230 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1231 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1232 }; 1233 1234 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1235 // 256-bit wide vectors. 1236 1237 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1238 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1239 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1240 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1241 1242 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 1243 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 1244 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 1245 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 1246 1247 // v16i1 -> v16i32 - load + broadcast 1248 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1249 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1250 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1251 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1252 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1253 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1254 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1255 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1256 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1257 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1258 1259 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1260 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1261 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1262 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1263 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1264 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1265 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1266 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1267 1268 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1269 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1270 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1271 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1272 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1273 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1274 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1275 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1276 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1277 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1278 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1279 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1280 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1281 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1282 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1283 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1284 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1285 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1286 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1287 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1288 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1289 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1290 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 1291 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1292 1293 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 1294 1295 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1296 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1297 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 1298 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1299 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 }, 1300 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 }, 1301 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1302 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 }, 1303 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 }, 1304 }; 1305 1306 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1307 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1308 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1309 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1310 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1311 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1312 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 1313 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1314 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 1315 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1316 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1317 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1318 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1319 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1320 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1321 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1322 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1323 1324 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1325 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1326 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1327 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1328 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1329 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 1330 1331 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1332 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1333 1334 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1335 }; 1336 1337 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1338 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1339 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1340 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1341 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1342 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 1343 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1344 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 1345 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1346 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1347 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1348 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 1349 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1350 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1351 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1352 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1353 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1354 1355 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1356 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1357 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1358 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1359 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1360 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 1361 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 1362 1363 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1364 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1365 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1366 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1367 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1368 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1369 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1370 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1371 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1372 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1373 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1374 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1375 1376 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1377 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1378 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1379 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1380 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1381 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1382 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1383 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1384 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1385 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1386 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1387 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1388 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1389 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1390 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 }, 1391 // The generic code to compute the scalar overhead is currently broken. 1392 // Workaround this limitation by estimating the scalarization overhead 1393 // here. We have roughly 10 instructions per scalar element. 1394 // Multiply that by the vector width. 1395 // FIXME: remove that when PR19268 is fixed. 1396 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1397 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1398 1399 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 1400 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 1401 // This node is expanded into scalarized operations but BasicTTI is overly 1402 // optimistic estimating its cost. It computes 3 per element (one 1403 // vector-extract, one scalar conversion and one vector-insert). The 1404 // problem is that the inserts form a read-modify-write chain so latency 1405 // should be factored in too. Inflating the cost per element by 1. 1406 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1407 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1408 1409 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1410 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1411 }; 1412 1413 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1414 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1415 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1416 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1417 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1418 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1419 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1420 1421 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1422 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1423 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1424 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1425 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1426 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1427 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1428 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1429 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1430 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1431 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1432 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1433 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1434 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1435 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1436 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1437 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1438 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1439 1440 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 1441 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1442 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1443 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1444 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1445 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1446 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1447 1448 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 1449 }; 1450 1451 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1452 // These are somewhat magic numbers justified by looking at the output of 1453 // Intel's IACA, running some kernels and making sure when we take 1454 // legalization into account the throughput will be overestimated. 1455 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1456 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1457 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1458 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1459 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1460 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1461 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1462 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1463 1464 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1465 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1466 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1467 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1468 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1469 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1470 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 }, 1471 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1472 1473 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 1474 1475 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 }, 1476 1477 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1478 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1479 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1480 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1481 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1482 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1483 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1484 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1485 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1486 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1487 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1488 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1489 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1490 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1491 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1492 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1493 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1494 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1495 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1496 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1497 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1498 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1499 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1500 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1501 1502 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 1503 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 1504 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1505 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1506 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1507 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1508 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1509 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1510 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1511 }; 1512 1513 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1514 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1515 1516 if (ST->hasSSE2() && !ST->hasAVX()) { 1517 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1518 LTDest.second, LTSrc.second)) 1519 return LTSrc.first * Entry->Cost; 1520 } 1521 1522 EVT SrcTy = TLI->getValueType(DL, Src); 1523 EVT DstTy = TLI->getValueType(DL, Dst); 1524 1525 // The function getSimpleVT only handles simple value types. 1526 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1527 return BaseT::getCastInstrCost(Opcode, Dst, Src); 1528 1529 if (ST->hasDQI()) 1530 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 1531 DstTy.getSimpleVT(), 1532 SrcTy.getSimpleVT())) 1533 return Entry->Cost; 1534 1535 if (ST->hasAVX512()) 1536 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 1537 DstTy.getSimpleVT(), 1538 SrcTy.getSimpleVT())) 1539 return Entry->Cost; 1540 1541 if (ST->hasAVX2()) { 1542 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 1543 DstTy.getSimpleVT(), 1544 SrcTy.getSimpleVT())) 1545 return Entry->Cost; 1546 } 1547 1548 if (ST->hasAVX()) { 1549 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 1550 DstTy.getSimpleVT(), 1551 SrcTy.getSimpleVT())) 1552 return Entry->Cost; 1553 } 1554 1555 if (ST->hasSSE41()) { 1556 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 1557 DstTy.getSimpleVT(), 1558 SrcTy.getSimpleVT())) 1559 return Entry->Cost; 1560 } 1561 1562 if (ST->hasSSE2()) { 1563 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1564 DstTy.getSimpleVT(), 1565 SrcTy.getSimpleVT())) 1566 return Entry->Cost; 1567 } 1568 1569 return BaseT::getCastInstrCost(Opcode, Dst, Src, I); 1570 } 1571 1572 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 1573 const Instruction *I) { 1574 // Legalize the type. 1575 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1576 1577 MVT MTy = LT.second; 1578 1579 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1580 assert(ISD && "Invalid opcode"); 1581 1582 static const CostTblEntry SSE2CostTbl[] = { 1583 { ISD::SETCC, MVT::v2i64, 8 }, 1584 { ISD::SETCC, MVT::v4i32, 1 }, 1585 { ISD::SETCC, MVT::v8i16, 1 }, 1586 { ISD::SETCC, MVT::v16i8, 1 }, 1587 }; 1588 1589 static const CostTblEntry SSE42CostTbl[] = { 1590 { ISD::SETCC, MVT::v2f64, 1 }, 1591 { ISD::SETCC, MVT::v4f32, 1 }, 1592 { ISD::SETCC, MVT::v2i64, 1 }, 1593 }; 1594 1595 static const CostTblEntry AVX1CostTbl[] = { 1596 { ISD::SETCC, MVT::v4f64, 1 }, 1597 { ISD::SETCC, MVT::v8f32, 1 }, 1598 // AVX1 does not support 8-wide integer compare. 1599 { ISD::SETCC, MVT::v4i64, 4 }, 1600 { ISD::SETCC, MVT::v8i32, 4 }, 1601 { ISD::SETCC, MVT::v16i16, 4 }, 1602 { ISD::SETCC, MVT::v32i8, 4 }, 1603 }; 1604 1605 static const CostTblEntry AVX2CostTbl[] = { 1606 { ISD::SETCC, MVT::v4i64, 1 }, 1607 { ISD::SETCC, MVT::v8i32, 1 }, 1608 { ISD::SETCC, MVT::v16i16, 1 }, 1609 { ISD::SETCC, MVT::v32i8, 1 }, 1610 }; 1611 1612 static const CostTblEntry AVX512CostTbl[] = { 1613 { ISD::SETCC, MVT::v8i64, 1 }, 1614 { ISD::SETCC, MVT::v16i32, 1 }, 1615 { ISD::SETCC, MVT::v8f64, 1 }, 1616 { ISD::SETCC, MVT::v16f32, 1 }, 1617 }; 1618 1619 static const CostTblEntry AVX512BWCostTbl[] = { 1620 { ISD::SETCC, MVT::v32i16, 1 }, 1621 { ISD::SETCC, MVT::v64i8, 1 }, 1622 }; 1623 1624 if (ST->hasBWI()) 1625 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1626 return LT.first * Entry->Cost; 1627 1628 if (ST->hasAVX512()) 1629 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1630 return LT.first * Entry->Cost; 1631 1632 if (ST->hasAVX2()) 1633 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1634 return LT.first * Entry->Cost; 1635 1636 if (ST->hasAVX()) 1637 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1638 return LT.first * Entry->Cost; 1639 1640 if (ST->hasSSE42()) 1641 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1642 return LT.first * Entry->Cost; 1643 1644 if (ST->hasSSE2()) 1645 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1646 return LT.first * Entry->Cost; 1647 1648 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I); 1649 } 1650 1651 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 1652 1653 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1654 ArrayRef<Type *> Tys, FastMathFlags FMF, 1655 unsigned ScalarizationCostPassed) { 1656 // Costs should match the codegen from: 1657 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1658 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1659 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1660 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1661 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1662 static const CostTblEntry AVX512CDCostTbl[] = { 1663 { ISD::CTLZ, MVT::v8i64, 1 }, 1664 { ISD::CTLZ, MVT::v16i32, 1 }, 1665 { ISD::CTLZ, MVT::v32i16, 8 }, 1666 { ISD::CTLZ, MVT::v64i8, 20 }, 1667 { ISD::CTLZ, MVT::v4i64, 1 }, 1668 { ISD::CTLZ, MVT::v8i32, 1 }, 1669 { ISD::CTLZ, MVT::v16i16, 4 }, 1670 { ISD::CTLZ, MVT::v32i8, 10 }, 1671 { ISD::CTLZ, MVT::v2i64, 1 }, 1672 { ISD::CTLZ, MVT::v4i32, 1 }, 1673 { ISD::CTLZ, MVT::v8i16, 4 }, 1674 { ISD::CTLZ, MVT::v16i8, 4 }, 1675 }; 1676 static const CostTblEntry AVX512BWCostTbl[] = { 1677 { ISD::BITREVERSE, MVT::v8i64, 5 }, 1678 { ISD::BITREVERSE, MVT::v16i32, 5 }, 1679 { ISD::BITREVERSE, MVT::v32i16, 5 }, 1680 { ISD::BITREVERSE, MVT::v64i8, 5 }, 1681 { ISD::CTLZ, MVT::v8i64, 23 }, 1682 { ISD::CTLZ, MVT::v16i32, 22 }, 1683 { ISD::CTLZ, MVT::v32i16, 18 }, 1684 { ISD::CTLZ, MVT::v64i8, 17 }, 1685 { ISD::CTPOP, MVT::v8i64, 7 }, 1686 { ISD::CTPOP, MVT::v16i32, 11 }, 1687 { ISD::CTPOP, MVT::v32i16, 9 }, 1688 { ISD::CTPOP, MVT::v64i8, 6 }, 1689 { ISD::CTTZ, MVT::v8i64, 10 }, 1690 { ISD::CTTZ, MVT::v16i32, 14 }, 1691 { ISD::CTTZ, MVT::v32i16, 12 }, 1692 { ISD::CTTZ, MVT::v64i8, 9 }, 1693 }; 1694 static const CostTblEntry AVX512CostTbl[] = { 1695 { ISD::BITREVERSE, MVT::v8i64, 36 }, 1696 { ISD::BITREVERSE, MVT::v16i32, 24 }, 1697 { ISD::CTLZ, MVT::v8i64, 29 }, 1698 { ISD::CTLZ, MVT::v16i32, 35 }, 1699 { ISD::CTPOP, MVT::v8i64, 16 }, 1700 { ISD::CTPOP, MVT::v16i32, 24 }, 1701 { ISD::CTTZ, MVT::v8i64, 20 }, 1702 { ISD::CTTZ, MVT::v16i32, 28 }, 1703 }; 1704 static const CostTblEntry XOPCostTbl[] = { 1705 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1706 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1707 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1708 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1709 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1710 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1711 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1712 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1713 { ISD::BITREVERSE, MVT::i64, 3 }, 1714 { ISD::BITREVERSE, MVT::i32, 3 }, 1715 { ISD::BITREVERSE, MVT::i16, 3 }, 1716 { ISD::BITREVERSE, MVT::i8, 3 } 1717 }; 1718 static const CostTblEntry AVX2CostTbl[] = { 1719 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1720 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1721 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1722 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1723 { ISD::BSWAP, MVT::v4i64, 1 }, 1724 { ISD::BSWAP, MVT::v8i32, 1 }, 1725 { ISD::BSWAP, MVT::v16i16, 1 }, 1726 { ISD::CTLZ, MVT::v4i64, 23 }, 1727 { ISD::CTLZ, MVT::v8i32, 18 }, 1728 { ISD::CTLZ, MVT::v16i16, 14 }, 1729 { ISD::CTLZ, MVT::v32i8, 9 }, 1730 { ISD::CTPOP, MVT::v4i64, 7 }, 1731 { ISD::CTPOP, MVT::v8i32, 11 }, 1732 { ISD::CTPOP, MVT::v16i16, 9 }, 1733 { ISD::CTPOP, MVT::v32i8, 6 }, 1734 { ISD::CTTZ, MVT::v4i64, 10 }, 1735 { ISD::CTTZ, MVT::v8i32, 14 }, 1736 { ISD::CTTZ, MVT::v16i16, 12 }, 1737 { ISD::CTTZ, MVT::v32i8, 9 }, 1738 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 1739 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 1740 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 1741 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 1742 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 1743 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 1744 }; 1745 static const CostTblEntry AVX1CostTbl[] = { 1746 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 1747 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 1748 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 1749 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 1750 { ISD::BSWAP, MVT::v4i64, 4 }, 1751 { ISD::BSWAP, MVT::v8i32, 4 }, 1752 { ISD::BSWAP, MVT::v16i16, 4 }, 1753 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 1754 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 1755 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 1756 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1757 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 1758 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 1759 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 1760 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 1761 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 1762 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 1763 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 1764 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 1765 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 1766 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 1767 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 1768 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 1769 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 1770 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 1771 }; 1772 static const CostTblEntry GLMCostTbl[] = { 1773 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 1774 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 1775 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 1776 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 1777 }; 1778 static const CostTblEntry SLMCostTbl[] = { 1779 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 1780 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 1781 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 1782 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 1783 }; 1784 static const CostTblEntry SSE42CostTbl[] = { 1785 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 1786 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 1787 }; 1788 static const CostTblEntry SSSE3CostTbl[] = { 1789 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1790 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1791 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1792 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1793 { ISD::BSWAP, MVT::v2i64, 1 }, 1794 { ISD::BSWAP, MVT::v4i32, 1 }, 1795 { ISD::BSWAP, MVT::v8i16, 1 }, 1796 { ISD::CTLZ, MVT::v2i64, 23 }, 1797 { ISD::CTLZ, MVT::v4i32, 18 }, 1798 { ISD::CTLZ, MVT::v8i16, 14 }, 1799 { ISD::CTLZ, MVT::v16i8, 9 }, 1800 { ISD::CTPOP, MVT::v2i64, 7 }, 1801 { ISD::CTPOP, MVT::v4i32, 11 }, 1802 { ISD::CTPOP, MVT::v8i16, 9 }, 1803 { ISD::CTPOP, MVT::v16i8, 6 }, 1804 { ISD::CTTZ, MVT::v2i64, 10 }, 1805 { ISD::CTTZ, MVT::v4i32, 14 }, 1806 { ISD::CTTZ, MVT::v8i16, 12 }, 1807 { ISD::CTTZ, MVT::v16i8, 9 } 1808 }; 1809 static const CostTblEntry SSE2CostTbl[] = { 1810 { ISD::BITREVERSE, MVT::v2i64, 29 }, 1811 { ISD::BITREVERSE, MVT::v4i32, 27 }, 1812 { ISD::BITREVERSE, MVT::v8i16, 27 }, 1813 { ISD::BITREVERSE, MVT::v16i8, 20 }, 1814 { ISD::BSWAP, MVT::v2i64, 7 }, 1815 { ISD::BSWAP, MVT::v4i32, 7 }, 1816 { ISD::BSWAP, MVT::v8i16, 7 }, 1817 { ISD::CTLZ, MVT::v2i64, 25 }, 1818 { ISD::CTLZ, MVT::v4i32, 26 }, 1819 { ISD::CTLZ, MVT::v8i16, 20 }, 1820 { ISD::CTLZ, MVT::v16i8, 17 }, 1821 { ISD::CTPOP, MVT::v2i64, 12 }, 1822 { ISD::CTPOP, MVT::v4i32, 15 }, 1823 { ISD::CTPOP, MVT::v8i16, 13 }, 1824 { ISD::CTPOP, MVT::v16i8, 10 }, 1825 { ISD::CTTZ, MVT::v2i64, 14 }, 1826 { ISD::CTTZ, MVT::v4i32, 18 }, 1827 { ISD::CTTZ, MVT::v8i16, 16 }, 1828 { ISD::CTTZ, MVT::v16i8, 13 }, 1829 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 1830 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 1831 }; 1832 static const CostTblEntry SSE1CostTbl[] = { 1833 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 1834 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 1835 }; 1836 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1837 { ISD::BITREVERSE, MVT::i64, 14 } 1838 }; 1839 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1840 { ISD::BITREVERSE, MVT::i32, 14 }, 1841 { ISD::BITREVERSE, MVT::i16, 14 }, 1842 { ISD::BITREVERSE, MVT::i8, 11 } 1843 }; 1844 1845 unsigned ISD = ISD::DELETED_NODE; 1846 switch (IID) { 1847 default: 1848 break; 1849 case Intrinsic::bitreverse: 1850 ISD = ISD::BITREVERSE; 1851 break; 1852 case Intrinsic::bswap: 1853 ISD = ISD::BSWAP; 1854 break; 1855 case Intrinsic::ctlz: 1856 ISD = ISD::CTLZ; 1857 break; 1858 case Intrinsic::ctpop: 1859 ISD = ISD::CTPOP; 1860 break; 1861 case Intrinsic::cttz: 1862 ISD = ISD::CTTZ; 1863 break; 1864 case Intrinsic::sqrt: 1865 ISD = ISD::FSQRT; 1866 break; 1867 } 1868 1869 // Legalize the type. 1870 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1871 MVT MTy = LT.second; 1872 1873 // Attempt to lookup cost. 1874 if (ST->isGLM()) 1875 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 1876 return LT.first * Entry->Cost; 1877 1878 if (ST->isSLM()) 1879 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 1880 return LT.first * Entry->Cost; 1881 1882 if (ST->hasCDI()) 1883 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 1884 return LT.first * Entry->Cost; 1885 1886 if (ST->hasBWI()) 1887 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 1888 return LT.first * Entry->Cost; 1889 1890 if (ST->hasAVX512()) 1891 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1892 return LT.first * Entry->Cost; 1893 1894 if (ST->hasXOP()) 1895 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1896 return LT.first * Entry->Cost; 1897 1898 if (ST->hasAVX2()) 1899 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1900 return LT.first * Entry->Cost; 1901 1902 if (ST->hasAVX()) 1903 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1904 return LT.first * Entry->Cost; 1905 1906 if (ST->hasSSE42()) 1907 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1908 return LT.first * Entry->Cost; 1909 1910 if (ST->hasSSSE3()) 1911 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1912 return LT.first * Entry->Cost; 1913 1914 if (ST->hasSSE2()) 1915 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1916 return LT.first * Entry->Cost; 1917 1918 if (ST->hasSSE1()) 1919 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 1920 return LT.first * Entry->Cost; 1921 1922 if (ST->is64Bit()) 1923 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 1924 return LT.first * Entry->Cost; 1925 1926 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 1927 return LT.first * Entry->Cost; 1928 1929 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed); 1930 } 1931 1932 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1933 ArrayRef<Value *> Args, FastMathFlags FMF, unsigned VF) { 1934 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF); 1935 } 1936 1937 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1938 assert(Val->isVectorTy() && "This must be a vector type"); 1939 1940 Type *ScalarType = Val->getScalarType(); 1941 1942 if (Index != -1U) { 1943 // Legalize the type. 1944 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1945 1946 // This type is legalized to a scalar type. 1947 if (!LT.second.isVector()) 1948 return 0; 1949 1950 // The type may be split. Normalize the index to the new type. 1951 unsigned Width = LT.second.getVectorNumElements(); 1952 Index = Index % Width; 1953 1954 // Floating point scalars are already located in index #0. 1955 if (ScalarType->isFloatingPointTy() && Index == 0) 1956 return 0; 1957 } 1958 1959 // Add to the base cost if we know that the extracted element of a vector is 1960 // destined to be moved to and used in the integer register file. 1961 int RegisterFileMoveCost = 0; 1962 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1963 RegisterFileMoveCost = 1; 1964 1965 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1966 } 1967 1968 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1969 unsigned AddressSpace, const Instruction *I) { 1970 // Handle non-power-of-two vectors such as <3 x float> 1971 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1972 unsigned NumElem = VTy->getVectorNumElements(); 1973 1974 // Handle a few common cases: 1975 // <3 x float> 1976 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1977 // Cost = 64 bit store + extract + 32 bit store. 1978 return 3; 1979 1980 // <3 x double> 1981 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1982 // Cost = 128 bit store + unpack + 64 bit store. 1983 return 3; 1984 1985 // Assume that all other non-power-of-two numbers are scalarized. 1986 if (!isPowerOf2_32(NumElem)) { 1987 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1988 AddressSpace); 1989 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1990 Opcode == Instruction::Store); 1991 return NumElem * Cost + SplitCost; 1992 } 1993 } 1994 1995 // Legalize the type. 1996 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1997 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1998 "Invalid Opcode"); 1999 2000 // Each load/store unit costs 1. 2001 int Cost = LT.first * 1; 2002 2003 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 2004 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 2005 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 2006 Cost *= 2; 2007 2008 return Cost; 2009 } 2010 2011 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 2012 unsigned Alignment, 2013 unsigned AddressSpace) { 2014 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 2015 if (!SrcVTy) 2016 // To calculate scalar take the regular cost, without mask 2017 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 2018 2019 unsigned NumElem = SrcVTy->getVectorNumElements(); 2020 VectorType *MaskTy = 2021 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 2022 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 2023 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 2024 !isPowerOf2_32(NumElem)) { 2025 // Scalarization 2026 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 2027 int ScalarCompareCost = getCmpSelInstrCost( 2028 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 2029 int BranchCost = getCFInstrCost(Instruction::Br); 2030 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 2031 2032 int ValueSplitCost = getScalarizationOverhead( 2033 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 2034 int MemopCost = 2035 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2036 Alignment, AddressSpace); 2037 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 2038 } 2039 2040 // Legalize the type. 2041 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2042 auto VT = TLI->getValueType(DL, SrcVTy); 2043 int Cost = 0; 2044 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 2045 LT.second.getVectorNumElements() == NumElem) 2046 // Promotion requires expand/truncate for data and a shuffle for mask. 2047 Cost += getShuffleCost(TTI::SK_Select, SrcVTy, 0, nullptr) + 2048 getShuffleCost(TTI::SK_Select, MaskTy, 0, nullptr); 2049 2050 else if (LT.second.getVectorNumElements() > NumElem) { 2051 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 2052 LT.second.getVectorNumElements()); 2053 // Expanding requires fill mask with zeroes 2054 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 2055 } 2056 if (!ST->hasAVX512()) 2057 return Cost + LT.first*4; // Each maskmov costs 4 2058 2059 // AVX-512 masked load/store is cheapper 2060 return Cost+LT.first; 2061 } 2062 2063 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 2064 const SCEV *Ptr) { 2065 // Address computations in vectorized code with non-consecutive addresses will 2066 // likely result in more instructions compared to scalar code where the 2067 // computation can more often be merged into the index mode. The resulting 2068 // extra micro-ops can significantly decrease throughput. 2069 unsigned NumVectorInstToHideOverhead = 10; 2070 2071 // Cost modeling of Strided Access Computation is hidden by the indexing 2072 // modes of X86 regardless of the stride value. We dont believe that there 2073 // is a difference between constant strided access in gerenal and constant 2074 // strided value which is less than or equal to 64. 2075 // Even in the case of (loop invariant) stride whose value is not known at 2076 // compile time, the address computation will not incur more than one extra 2077 // ADD instruction. 2078 if (Ty->isVectorTy() && SE) { 2079 if (!BaseT::isStridedAccess(Ptr)) 2080 return NumVectorInstToHideOverhead; 2081 if (!BaseT::getConstantStrideStep(SE, Ptr)) 2082 return 1; 2083 } 2084 2085 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 2086 } 2087 2088 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy, 2089 bool IsPairwise) { 2090 2091 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2092 2093 MVT MTy = LT.second; 2094 2095 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2096 assert(ISD && "Invalid opcode"); 2097 2098 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 2099 // and make it as the cost. 2100 2101 static const CostTblEntry SSE42CostTblPairWise[] = { 2102 { ISD::FADD, MVT::v2f64, 2 }, 2103 { ISD::FADD, MVT::v4f32, 4 }, 2104 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2105 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 2106 { ISD::ADD, MVT::v8i16, 5 }, 2107 }; 2108 2109 static const CostTblEntry AVX1CostTblPairWise[] = { 2110 { ISD::FADD, MVT::v4f32, 4 }, 2111 { ISD::FADD, MVT::v4f64, 5 }, 2112 { ISD::FADD, MVT::v8f32, 7 }, 2113 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2114 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 2115 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 2116 { ISD::ADD, MVT::v8i16, 5 }, 2117 { ISD::ADD, MVT::v8i32, 5 }, 2118 }; 2119 2120 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2121 { ISD::FADD, MVT::v2f64, 2 }, 2122 { ISD::FADD, MVT::v4f32, 4 }, 2123 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 2124 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 2125 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 2126 }; 2127 2128 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2129 { ISD::FADD, MVT::v4f32, 3 }, 2130 { ISD::FADD, MVT::v4f64, 3 }, 2131 { ISD::FADD, MVT::v8f32, 4 }, 2132 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 2133 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 2134 { ISD::ADD, MVT::v4i64, 3 }, 2135 { ISD::ADD, MVT::v8i16, 4 }, 2136 { ISD::ADD, MVT::v8i32, 5 }, 2137 }; 2138 2139 if (IsPairwise) { 2140 if (ST->hasAVX()) 2141 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2142 return LT.first * Entry->Cost; 2143 2144 if (ST->hasSSE42()) 2145 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2146 return LT.first * Entry->Cost; 2147 } else { 2148 if (ST->hasAVX()) 2149 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2150 return LT.first * Entry->Cost; 2151 2152 if (ST->hasSSE42()) 2153 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2154 return LT.first * Entry->Cost; 2155 } 2156 2157 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise); 2158 } 2159 2160 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy, 2161 bool IsPairwise, bool IsUnsigned) { 2162 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2163 2164 MVT MTy = LT.second; 2165 2166 int ISD; 2167 if (ValTy->isIntOrIntVectorTy()) { 2168 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 2169 } else { 2170 assert(ValTy->isFPOrFPVectorTy() && 2171 "Expected float point or integer vector type."); 2172 ISD = ISD::FMINNUM; 2173 } 2174 2175 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 2176 // and make it as the cost. 2177 2178 static const CostTblEntry SSE42CostTblPairWise[] = { 2179 {ISD::FMINNUM, MVT::v2f64, 3}, 2180 {ISD::FMINNUM, MVT::v4f32, 2}, 2181 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2182 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6" 2183 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2184 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2185 {ISD::SMIN, MVT::v8i16, 2}, 2186 {ISD::UMIN, MVT::v8i16, 2}, 2187 }; 2188 2189 static const CostTblEntry AVX1CostTblPairWise[] = { 2190 {ISD::FMINNUM, MVT::v4f32, 1}, 2191 {ISD::FMINNUM, MVT::v4f64, 1}, 2192 {ISD::FMINNUM, MVT::v8f32, 2}, 2193 {ISD::SMIN, MVT::v2i64, 3}, 2194 {ISD::UMIN, MVT::v2i64, 3}, 2195 {ISD::SMIN, MVT::v4i32, 1}, 2196 {ISD::UMIN, MVT::v4i32, 1}, 2197 {ISD::SMIN, MVT::v8i16, 1}, 2198 {ISD::UMIN, MVT::v8i16, 1}, 2199 {ISD::SMIN, MVT::v8i32, 3}, 2200 {ISD::UMIN, MVT::v8i32, 3}, 2201 }; 2202 2203 static const CostTblEntry AVX2CostTblPairWise[] = { 2204 {ISD::SMIN, MVT::v4i64, 2}, 2205 {ISD::UMIN, MVT::v4i64, 2}, 2206 {ISD::SMIN, MVT::v8i32, 1}, 2207 {ISD::UMIN, MVT::v8i32, 1}, 2208 {ISD::SMIN, MVT::v16i16, 1}, 2209 {ISD::UMIN, MVT::v16i16, 1}, 2210 {ISD::SMIN, MVT::v32i8, 2}, 2211 {ISD::UMIN, MVT::v32i8, 2}, 2212 }; 2213 2214 static const CostTblEntry AVX512CostTblPairWise[] = { 2215 {ISD::FMINNUM, MVT::v8f64, 1}, 2216 {ISD::FMINNUM, MVT::v16f32, 2}, 2217 {ISD::SMIN, MVT::v8i64, 2}, 2218 {ISD::UMIN, MVT::v8i64, 2}, 2219 {ISD::SMIN, MVT::v16i32, 1}, 2220 {ISD::UMIN, MVT::v16i32, 1}, 2221 }; 2222 2223 static const CostTblEntry SSE42CostTblNoPairWise[] = { 2224 {ISD::FMINNUM, MVT::v2f64, 3}, 2225 {ISD::FMINNUM, MVT::v4f32, 3}, 2226 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8" 2227 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6" 2228 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5" 2229 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8" 2230 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5" 2231 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8" 2232 }; 2233 2234 static const CostTblEntry AVX1CostTblNoPairWise[] = { 2235 {ISD::FMINNUM, MVT::v4f32, 1}, 2236 {ISD::FMINNUM, MVT::v4f64, 1}, 2237 {ISD::FMINNUM, MVT::v8f32, 1}, 2238 {ISD::SMIN, MVT::v2i64, 3}, 2239 {ISD::UMIN, MVT::v2i64, 3}, 2240 {ISD::SMIN, MVT::v4i32, 1}, 2241 {ISD::UMIN, MVT::v4i32, 1}, 2242 {ISD::SMIN, MVT::v8i16, 1}, 2243 {ISD::UMIN, MVT::v8i16, 1}, 2244 {ISD::SMIN, MVT::v8i32, 2}, 2245 {ISD::UMIN, MVT::v8i32, 2}, 2246 }; 2247 2248 static const CostTblEntry AVX2CostTblNoPairWise[] = { 2249 {ISD::SMIN, MVT::v4i64, 1}, 2250 {ISD::UMIN, MVT::v4i64, 1}, 2251 {ISD::SMIN, MVT::v8i32, 1}, 2252 {ISD::UMIN, MVT::v8i32, 1}, 2253 {ISD::SMIN, MVT::v16i16, 1}, 2254 {ISD::UMIN, MVT::v16i16, 1}, 2255 {ISD::SMIN, MVT::v32i8, 1}, 2256 {ISD::UMIN, MVT::v32i8, 1}, 2257 }; 2258 2259 static const CostTblEntry AVX512CostTblNoPairWise[] = { 2260 {ISD::FMINNUM, MVT::v8f64, 1}, 2261 {ISD::FMINNUM, MVT::v16f32, 2}, 2262 {ISD::SMIN, MVT::v8i64, 1}, 2263 {ISD::UMIN, MVT::v8i64, 1}, 2264 {ISD::SMIN, MVT::v16i32, 1}, 2265 {ISD::UMIN, MVT::v16i32, 1}, 2266 }; 2267 2268 if (IsPairwise) { 2269 if (ST->hasAVX512()) 2270 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy)) 2271 return LT.first * Entry->Cost; 2272 2273 if (ST->hasAVX2()) 2274 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy)) 2275 return LT.first * Entry->Cost; 2276 2277 if (ST->hasAVX()) 2278 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 2279 return LT.first * Entry->Cost; 2280 2281 if (ST->hasSSE42()) 2282 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 2283 return LT.first * Entry->Cost; 2284 } else { 2285 if (ST->hasAVX512()) 2286 if (const auto *Entry = 2287 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy)) 2288 return LT.first * Entry->Cost; 2289 2290 if (ST->hasAVX2()) 2291 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy)) 2292 return LT.first * Entry->Cost; 2293 2294 if (ST->hasAVX()) 2295 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 2296 return LT.first * Entry->Cost; 2297 2298 if (ST->hasSSE42()) 2299 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 2300 return LT.first * Entry->Cost; 2301 } 2302 2303 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned); 2304 } 2305 2306 /// Calculate the cost of materializing a 64-bit value. This helper 2307 /// method might only calculate a fraction of a larger immediate. Therefore it 2308 /// is valid to return a cost of ZERO. 2309 int X86TTIImpl::getIntImmCost(int64_t Val) { 2310 if (Val == 0) 2311 return TTI::TCC_Free; 2312 2313 if (isInt<32>(Val)) 2314 return TTI::TCC_Basic; 2315 2316 return 2 * TTI::TCC_Basic; 2317 } 2318 2319 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 2320 assert(Ty->isIntegerTy()); 2321 2322 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2323 if (BitSize == 0) 2324 return ~0U; 2325 2326 // Never hoist constants larger than 128bit, because this might lead to 2327 // incorrect code generation or assertions in codegen. 2328 // Fixme: Create a cost model for types larger than i128 once the codegen 2329 // issues have been fixed. 2330 if (BitSize > 128) 2331 return TTI::TCC_Free; 2332 2333 if (Imm == 0) 2334 return TTI::TCC_Free; 2335 2336 // Sign-extend all constants to a multiple of 64-bit. 2337 APInt ImmVal = Imm; 2338 if (BitSize % 64 != 0) 2339 ImmVal = Imm.sext(alignTo(BitSize, 64)); 2340 2341 // Split the constant into 64-bit chunks and calculate the cost for each 2342 // chunk. 2343 int Cost = 0; 2344 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 2345 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 2346 int64_t Val = Tmp.getSExtValue(); 2347 Cost += getIntImmCost(Val); 2348 } 2349 // We need at least one instruction to materialize the constant. 2350 return std::max(1, Cost); 2351 } 2352 2353 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 2354 Type *Ty) { 2355 assert(Ty->isIntegerTy()); 2356 2357 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2358 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2359 // here, so that constant hoisting will ignore this constant. 2360 if (BitSize == 0) 2361 return TTI::TCC_Free; 2362 2363 unsigned ImmIdx = ~0U; 2364 switch (Opcode) { 2365 default: 2366 return TTI::TCC_Free; 2367 case Instruction::GetElementPtr: 2368 // Always hoist the base address of a GetElementPtr. This prevents the 2369 // creation of new constants for every base constant that gets constant 2370 // folded with the offset. 2371 if (Idx == 0) 2372 return 2 * TTI::TCC_Basic; 2373 return TTI::TCC_Free; 2374 case Instruction::Store: 2375 ImmIdx = 0; 2376 break; 2377 case Instruction::ICmp: 2378 // This is an imperfect hack to prevent constant hoisting of 2379 // compares that might be trying to check if a 64-bit value fits in 2380 // 32-bits. The backend can optimize these cases using a right shift by 32. 2381 // Ideally we would check the compare predicate here. There also other 2382 // similar immediates the backend can use shifts for. 2383 if (Idx == 1 && Imm.getBitWidth() == 64) { 2384 uint64_t ImmVal = Imm.getZExtValue(); 2385 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 2386 return TTI::TCC_Free; 2387 } 2388 ImmIdx = 1; 2389 break; 2390 case Instruction::And: 2391 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 2392 // by using a 32-bit operation with implicit zero extension. Detect such 2393 // immediates here as the normal path expects bit 31 to be sign extended. 2394 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 2395 return TTI::TCC_Free; 2396 ImmIdx = 1; 2397 break; 2398 case Instruction::Add: 2399 case Instruction::Sub: 2400 // For add/sub, we can use the opposite instruction for INT32_MIN. 2401 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 2402 return TTI::TCC_Free; 2403 ImmIdx = 1; 2404 break; 2405 case Instruction::UDiv: 2406 case Instruction::SDiv: 2407 case Instruction::URem: 2408 case Instruction::SRem: 2409 // Division by constant is typically expanded later into a different 2410 // instruction sequence. This completely changes the constants. 2411 // Report them as "free" to stop ConstantHoist from marking them as opaque. 2412 return TTI::TCC_Free; 2413 case Instruction::Mul: 2414 case Instruction::Or: 2415 case Instruction::Xor: 2416 ImmIdx = 1; 2417 break; 2418 // Always return TCC_Free for the shift value of a shift instruction. 2419 case Instruction::Shl: 2420 case Instruction::LShr: 2421 case Instruction::AShr: 2422 if (Idx == 1) 2423 return TTI::TCC_Free; 2424 break; 2425 case Instruction::Trunc: 2426 case Instruction::ZExt: 2427 case Instruction::SExt: 2428 case Instruction::IntToPtr: 2429 case Instruction::PtrToInt: 2430 case Instruction::BitCast: 2431 case Instruction::PHI: 2432 case Instruction::Call: 2433 case Instruction::Select: 2434 case Instruction::Ret: 2435 case Instruction::Load: 2436 break; 2437 } 2438 2439 if (Idx == ImmIdx) { 2440 int NumConstants = divideCeil(BitSize, 64); 2441 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 2442 return (Cost <= NumConstants * TTI::TCC_Basic) 2443 ? static_cast<int>(TTI::TCC_Free) 2444 : Cost; 2445 } 2446 2447 return X86TTIImpl::getIntImmCost(Imm, Ty); 2448 } 2449 2450 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 2451 Type *Ty) { 2452 assert(Ty->isIntegerTy()); 2453 2454 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 2455 // There is no cost model for constants with a bit size of 0. Return TCC_Free 2456 // here, so that constant hoisting will ignore this constant. 2457 if (BitSize == 0) 2458 return TTI::TCC_Free; 2459 2460 switch (IID) { 2461 default: 2462 return TTI::TCC_Free; 2463 case Intrinsic::sadd_with_overflow: 2464 case Intrinsic::uadd_with_overflow: 2465 case Intrinsic::ssub_with_overflow: 2466 case Intrinsic::usub_with_overflow: 2467 case Intrinsic::smul_with_overflow: 2468 case Intrinsic::umul_with_overflow: 2469 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 2470 return TTI::TCC_Free; 2471 break; 2472 case Intrinsic::experimental_stackmap: 2473 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2474 return TTI::TCC_Free; 2475 break; 2476 case Intrinsic::experimental_patchpoint_void: 2477 case Intrinsic::experimental_patchpoint_i64: 2478 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 2479 return TTI::TCC_Free; 2480 break; 2481 } 2482 return X86TTIImpl::getIntImmCost(Imm, Ty); 2483 } 2484 2485 unsigned X86TTIImpl::getUserCost(const User *U, 2486 ArrayRef<const Value *> Operands) { 2487 if (isa<StoreInst>(U)) { 2488 Value *Ptr = U->getOperand(1); 2489 // Store instruction with index and scale costs 2 Uops. 2490 // Check the preceding GEP to identify non-const indices. 2491 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 2492 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 2493 return TTI::TCC_Basic * 2; 2494 } 2495 return TTI::TCC_Basic; 2496 } 2497 return BaseT::getUserCost(U, Operands); 2498 } 2499 2500 // Return an average cost of Gather / Scatter instruction, maybe improved later 2501 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 2502 unsigned Alignment, unsigned AddressSpace) { 2503 2504 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 2505 unsigned VF = SrcVTy->getVectorNumElements(); 2506 2507 // Try to reduce index size from 64 bit (default for GEP) 2508 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 2509 // operation will use 16 x 64 indices which do not fit in a zmm and needs 2510 // to split. Also check that the base pointer is the same for all lanes, 2511 // and that there's at most one variable index. 2512 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 2513 unsigned IndexSize = DL.getPointerSizeInBits(); 2514 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 2515 if (IndexSize < 64 || !GEP) 2516 return IndexSize; 2517 2518 unsigned NumOfVarIndices = 0; 2519 Value *Ptrs = GEP->getPointerOperand(); 2520 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 2521 return IndexSize; 2522 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 2523 if (isa<Constant>(GEP->getOperand(i))) 2524 continue; 2525 Type *IndxTy = GEP->getOperand(i)->getType(); 2526 if (IndxTy->isVectorTy()) 2527 IndxTy = IndxTy->getVectorElementType(); 2528 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 2529 !isa<SExtInst>(GEP->getOperand(i))) || 2530 ++NumOfVarIndices > 1) 2531 return IndexSize; // 64 2532 } 2533 return (unsigned)32; 2534 }; 2535 2536 2537 // Trying to reduce IndexSize to 32 bits for vector 16. 2538 // By default the IndexSize is equal to pointer size. 2539 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 2540 ? getIndexSizeInBits(Ptr, DL) 2541 : DL.getPointerSizeInBits(); 2542 2543 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 2544 IndexSize), VF); 2545 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 2546 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 2547 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 2548 if (SplitFactor > 1) { 2549 // Handle splitting of vector of pointers 2550 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 2551 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 2552 AddressSpace); 2553 } 2554 2555 // The gather / scatter cost is given by Intel architects. It is a rough 2556 // number since we are looking at one instruction in a time. 2557 const int GSOverhead = (Opcode == Instruction::Load) 2558 ? ST->getGatherOverhead() 2559 : ST->getScatterOverhead(); 2560 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2561 Alignment, AddressSpace); 2562 } 2563 2564 /// Return the cost of full scalarization of gather / scatter operation. 2565 /// 2566 /// Opcode - Load or Store instruction. 2567 /// SrcVTy - The type of the data vector that should be gathered or scattered. 2568 /// VariableMask - The mask is non-constant at compile time. 2569 /// Alignment - Alignment for one element. 2570 /// AddressSpace - pointer[s] address space. 2571 /// 2572 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 2573 bool VariableMask, unsigned Alignment, 2574 unsigned AddressSpace) { 2575 unsigned VF = SrcVTy->getVectorNumElements(); 2576 2577 int MaskUnpackCost = 0; 2578 if (VariableMask) { 2579 VectorType *MaskTy = 2580 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 2581 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 2582 int ScalarCompareCost = 2583 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 2584 nullptr); 2585 int BranchCost = getCFInstrCost(Instruction::Br); 2586 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 2587 } 2588 2589 // The cost of the scalar loads/stores. 2590 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 2591 Alignment, AddressSpace); 2592 2593 int InsertExtractCost = 0; 2594 if (Opcode == Instruction::Load) 2595 for (unsigned i = 0; i < VF; ++i) 2596 // Add the cost of inserting each scalar load into the vector 2597 InsertExtractCost += 2598 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 2599 else 2600 for (unsigned i = 0; i < VF; ++i) 2601 // Add the cost of extracting each element out of the data vector 2602 InsertExtractCost += 2603 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 2604 2605 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 2606 } 2607 2608 /// Calculate the cost of Gather / Scatter operation 2609 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 2610 Value *Ptr, bool VariableMask, 2611 unsigned Alignment) { 2612 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 2613 unsigned VF = SrcVTy->getVectorNumElements(); 2614 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 2615 if (!PtrTy && Ptr->getType()->isVectorTy()) 2616 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 2617 assert(PtrTy && "Unexpected type for Ptr argument"); 2618 unsigned AddressSpace = PtrTy->getAddressSpace(); 2619 2620 bool Scalarize = false; 2621 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 2622 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 2623 Scalarize = true; 2624 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 2625 // Vector-4 of gather/scatter instruction does not exist on KNL. 2626 // We can extend it to 8 elements, but zeroing upper bits of 2627 // the mask vector will add more instructions. Right now we give the scalar 2628 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 2629 // is better in the VariableMask case. 2630 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) 2631 Scalarize = true; 2632 2633 if (Scalarize) 2634 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 2635 AddressSpace); 2636 2637 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 2638 } 2639 2640 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 2641 TargetTransformInfo::LSRCost &C2) { 2642 // X86 specific here are "instruction number 1st priority". 2643 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 2644 C1.NumIVMuls, C1.NumBaseAdds, 2645 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 2646 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 2647 C2.NumIVMuls, C2.NumBaseAdds, 2648 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 2649 } 2650 2651 bool X86TTIImpl::canMacroFuseCmp() { 2652 return ST->hasMacroFusion(); 2653 } 2654 2655 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 2656 // The backend can't handle a single element vector. 2657 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1) 2658 return false; 2659 Type *ScalarTy = DataTy->getScalarType(); 2660 int DataWidth = isa<PointerType>(ScalarTy) ? 2661 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2662 2663 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 2664 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 2665 } 2666 2667 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 2668 return isLegalMaskedLoad(DataType); 2669 } 2670 2671 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 2672 // This function is called now in two cases: from the Loop Vectorizer 2673 // and from the Scalarizer. 2674 // When the Loop Vectorizer asks about legality of the feature, 2675 // the vectorization factor is not calculated yet. The Loop Vectorizer 2676 // sends a scalar type and the decision is based on the width of the 2677 // scalar element. 2678 // Later on, the cost model will estimate usage this intrinsic based on 2679 // the vector type. 2680 // The Scalarizer asks again about legality. It sends a vector type. 2681 // In this case we can reject non-power-of-2 vectors. 2682 // We also reject single element vectors as the type legalizer can't 2683 // scalarize it. 2684 if (isa<VectorType>(DataTy)) { 2685 unsigned NumElts = DataTy->getVectorNumElements(); 2686 if (NumElts == 1 || !isPowerOf2_32(NumElts)) 2687 return false; 2688 } 2689 Type *ScalarTy = DataTy->getScalarType(); 2690 int DataWidth = isa<PointerType>(ScalarTy) ? 2691 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 2692 2693 // Some CPUs have better gather performance than others. 2694 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 2695 // enable gather with a -march. 2696 return (DataWidth == 32 || DataWidth == 64) && 2697 (ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())); 2698 } 2699 2700 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 2701 // AVX2 doesn't support scatter 2702 if (!ST->hasAVX512()) 2703 return false; 2704 return isLegalMaskedGather(DataType); 2705 } 2706 2707 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 2708 EVT VT = TLI->getValueType(DL, DataType); 2709 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 2710 } 2711 2712 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 2713 return false; 2714 } 2715 2716 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 2717 const Function *Callee) const { 2718 const TargetMachine &TM = getTLI()->getTargetMachine(); 2719 2720 // Work this as a subsetting of subtarget features. 2721 const FeatureBitset &CallerBits = 2722 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 2723 const FeatureBitset &CalleeBits = 2724 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 2725 2726 // FIXME: This is likely too limiting as it will include subtarget features 2727 // that we might not care about for inlining, but it is conservatively 2728 // correct. 2729 return (CallerBits & CalleeBits) == CalleeBits; 2730 } 2731 2732 const X86TTIImpl::TTI::MemCmpExpansionOptions * 2733 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const { 2734 // Only enable vector loads for equality comparison. 2735 // Right now the vector version is not as fast, see #33329. 2736 static const auto ThreeWayOptions = [this]() { 2737 TTI::MemCmpExpansionOptions Options; 2738 if (ST->is64Bit()) { 2739 Options.LoadSizes.push_back(8); 2740 } 2741 Options.LoadSizes.push_back(4); 2742 Options.LoadSizes.push_back(2); 2743 Options.LoadSizes.push_back(1); 2744 return Options; 2745 }(); 2746 static const auto EqZeroOptions = [this]() { 2747 TTI::MemCmpExpansionOptions Options; 2748 // TODO: enable AVX512 when the DAG is ready. 2749 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64); 2750 if (ST->hasAVX2()) Options.LoadSizes.push_back(32); 2751 if (ST->hasSSE2()) Options.LoadSizes.push_back(16); 2752 if (ST->is64Bit()) { 2753 Options.LoadSizes.push_back(8); 2754 } 2755 Options.LoadSizes.push_back(4); 2756 Options.LoadSizes.push_back(2); 2757 Options.LoadSizes.push_back(1); 2758 return Options; 2759 }(); 2760 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions; 2761 } 2762 2763 bool X86TTIImpl::enableInterleavedAccessVectorization() { 2764 // TODO: We expect this to be beneficial regardless of arch, 2765 // but there are currently some unexplained performance artifacts on Atom. 2766 // As a temporary solution, disable on Atom. 2767 return !(ST->isAtom()); 2768 } 2769 2770 // Get estimation for interleaved load/store operations for AVX2. 2771 // \p Factor is the interleaved-access factor (stride) - number of 2772 // (interleaved) elements in the group. 2773 // \p Indices contains the indices for a strided load: when the 2774 // interleaved load has gaps they indicate which elements are used. 2775 // If Indices is empty (or if the number of indices is equal to the size 2776 // of the interleaved-access as given in \p Factor) the access has no gaps. 2777 // 2778 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 2779 // computing the cost using a generic formula as a function of generic 2780 // shuffles. We therefore use a lookup table instead, filled according to 2781 // the instruction sequences that codegen currently generates. 2782 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy, 2783 unsigned Factor, 2784 ArrayRef<unsigned> Indices, 2785 unsigned Alignment, 2786 unsigned AddressSpace, 2787 bool UseMaskForCond, 2788 bool UseMaskForGaps) { 2789 2790 if (UseMaskForCond || UseMaskForGaps) 2791 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2792 Alignment, AddressSpace, 2793 UseMaskForCond, UseMaskForGaps); 2794 2795 // We currently Support only fully-interleaved groups, with no gaps. 2796 // TODO: Support also strided loads (interleaved-groups with gaps). 2797 if (Indices.size() && Indices.size() != Factor) 2798 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2799 Alignment, AddressSpace); 2800 2801 // VecTy for interleave memop is <VF*Factor x Elt>. 2802 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2803 // VecTy = <12 x i32>. 2804 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2805 2806 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 2807 // the VF=2, while v2i128 is an unsupported MVT vector type 2808 // (see MachineValueType.h::getVectorVT()). 2809 if (!LegalVT.isVector()) 2810 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2811 Alignment, AddressSpace); 2812 2813 unsigned VF = VecTy->getVectorNumElements() / Factor; 2814 Type *ScalarTy = VecTy->getVectorElementType(); 2815 2816 // Calculate the number of memory operations (NumOfMemOps), required 2817 // for load/store the VecTy. 2818 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2819 unsigned LegalVTSize = LegalVT.getStoreSize(); 2820 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2821 2822 // Get the cost of one memory operation. 2823 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2824 LegalVT.getVectorNumElements()); 2825 unsigned MemOpCost = 2826 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2827 2828 VectorType *VT = VectorType::get(ScalarTy, VF); 2829 EVT ETy = TLI->getValueType(DL, VT); 2830 if (!ETy.isSimple()) 2831 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2832 Alignment, AddressSpace); 2833 2834 // TODO: Complete for other data-types and strides. 2835 // Each combination of Stride, ElementTy and VF results in a different 2836 // sequence; The cost tables are therefore accessed with: 2837 // Factor (stride) and VectorType=VFxElemType. 2838 // The Cost accounts only for the shuffle sequence; 2839 // The cost of the loads/stores is accounted for separately. 2840 // 2841 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 2842 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 2843 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 2844 2845 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 2846 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 2847 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 2848 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 2849 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 2850 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 2851 2852 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 2853 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 2854 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 2855 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 2856 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 2857 2858 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 2859 }; 2860 2861 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 2862 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) 2863 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) 2864 2865 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) 2866 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) 2867 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) 2868 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) 2869 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) 2870 2871 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) 2872 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) 2873 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) 2874 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) 2875 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) 2876 }; 2877 2878 if (Opcode == Instruction::Load) { 2879 if (const auto *Entry = 2880 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 2881 return NumOfMemOps * MemOpCost + Entry->Cost; 2882 } else { 2883 assert(Opcode == Instruction::Store && 2884 "Expected Store Instruction at this point"); 2885 if (const auto *Entry = 2886 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 2887 return NumOfMemOps * MemOpCost + Entry->Cost; 2888 } 2889 2890 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2891 Alignment, AddressSpace); 2892 } 2893 2894 // Get estimation for interleaved load/store operations and strided load. 2895 // \p Indices contains indices for strided load. 2896 // \p Factor - the factor of interleaving. 2897 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 2898 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 2899 unsigned Factor, 2900 ArrayRef<unsigned> Indices, 2901 unsigned Alignment, 2902 unsigned AddressSpace, 2903 bool UseMaskForCond, 2904 bool UseMaskForGaps) { 2905 2906 if (UseMaskForCond || UseMaskForGaps) 2907 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 2908 Alignment, AddressSpace, 2909 UseMaskForCond, UseMaskForGaps); 2910 2911 // VecTy for interleave memop is <VF*Factor x Elt>. 2912 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 2913 // VecTy = <12 x i32>. 2914 2915 // Calculate the number of memory operations (NumOfMemOps), required 2916 // for load/store the VecTy. 2917 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 2918 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 2919 unsigned LegalVTSize = LegalVT.getStoreSize(); 2920 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 2921 2922 // Get the cost of one memory operation. 2923 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(), 2924 LegalVT.getVectorNumElements()); 2925 unsigned MemOpCost = 2926 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace); 2927 2928 unsigned VF = VecTy->getVectorNumElements() / Factor; 2929 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 2930 2931 if (Opcode == Instruction::Load) { 2932 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 2933 // contain the cost of the optimized shuffle sequence that the 2934 // X86InterleavedAccess pass will generate. 2935 // The cost of loads and stores are computed separately from the table. 2936 2937 // X86InterleavedAccess support only the following interleaved-access group. 2938 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 2939 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 2940 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 2941 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 2942 }; 2943 2944 if (const auto *Entry = 2945 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 2946 return NumOfMemOps * MemOpCost + Entry->Cost; 2947 //If an entry does not exist, fallback to the default implementation. 2948 2949 // Kind of shuffle depends on number of loaded values. 2950 // If we load the entire data in one register, we can use a 1-src shuffle. 2951 // Otherwise, we'll merge 2 sources in each operation. 2952 TTI::ShuffleKind ShuffleKind = 2953 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 2954 2955 unsigned ShuffleCost = 2956 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 2957 2958 unsigned NumOfLoadsInInterleaveGrp = 2959 Indices.size() ? Indices.size() : Factor; 2960 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(), 2961 VecTy->getVectorNumElements() / Factor); 2962 unsigned NumOfResults = 2963 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 2964 NumOfLoadsInInterleaveGrp; 2965 2966 // About a half of the loads may be folded in shuffles when we have only 2967 // one result. If we have more than one result, we do not fold loads at all. 2968 unsigned NumOfUnfoldedLoads = 2969 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 2970 2971 // Get a number of shuffle operations per result. 2972 unsigned NumOfShufflesPerResult = 2973 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 2974 2975 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 2976 // When we have more than one destination, we need additional instructions 2977 // to keep sources. 2978 unsigned NumOfMoves = 0; 2979 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 2980 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 2981 2982 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 2983 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 2984 2985 return Cost; 2986 } 2987 2988 // Store. 2989 assert(Opcode == Instruction::Store && 2990 "Expected Store Instruction at this point"); 2991 // X86InterleavedAccess support only the following interleaved-access group. 2992 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 2993 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 2994 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 2995 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 2996 2997 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 2998 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 2999 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 3000 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 3001 }; 3002 3003 if (const auto *Entry = 3004 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 3005 return NumOfMemOps * MemOpCost + Entry->Cost; 3006 //If an entry does not exist, fallback to the default implementation. 3007 3008 // There is no strided stores meanwhile. And store can't be folded in 3009 // shuffle. 3010 unsigned NumOfSources = Factor; // The number of values to be merged. 3011 unsigned ShuffleCost = 3012 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 3013 unsigned NumOfShufflesPerStore = NumOfSources - 1; 3014 3015 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 3016 // We need additional instructions to keep sources. 3017 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 3018 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 3019 NumOfMoves; 3020 return Cost; 3021 } 3022 3023 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 3024 unsigned Factor, 3025 ArrayRef<unsigned> Indices, 3026 unsigned Alignment, 3027 unsigned AddressSpace, 3028 bool UseMaskForCond, 3029 bool UseMaskForGaps) { 3030 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 3031 Type *EltTy = VecTy->getVectorElementType(); 3032 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 3033 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 3034 return true; 3035 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 3036 return HasBW; 3037 return false; 3038 }; 3039 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 3040 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 3041 Alignment, AddressSpace, 3042 UseMaskForCond, UseMaskForGaps); 3043 if (ST->hasAVX2()) 3044 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices, 3045 Alignment, AddressSpace, 3046 UseMaskForCond, UseMaskForGaps); 3047 3048 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 3049 Alignment, AddressSpace, 3050 UseMaskForCond, UseMaskForGaps); 3051 } 3052