1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements a TargetTransformInfo analysis pass specific to the 10 /// X86 target machine. It uses the target's detailed information to provide 11 /// more precise answers to certain TTI queries, while letting the target 12 /// independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 /// About Cost Model numbers used below it's necessary to say the following: 16 /// the numbers correspond to some "generic" X86 CPU instead of usage of 17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 19 /// the lookups below the cost is based on Nehalem as that was the first CPU 20 /// to support that feature level and thus has most likely the worst case cost. 21 /// Some examples of other technologies/CPUs: 22 /// SSE 3 - Pentium4 / Athlon64 23 /// SSE 4.1 - Penryn 24 /// SSE 4.2 - Nehalem 25 /// AVX - Sandy Bridge 26 /// AVX2 - Haswell 27 /// AVX-512 - Xeon Phi / Skylake 28 /// And some examples of instruction target dependent costs (latency) 29 /// divss sqrtss rsqrtss 30 /// AMD K7 11-16 19 3 31 /// Piledriver 9-24 13-15 5 32 /// Jaguar 14 16 2 33 /// Pentium II,III 18 30 2 34 /// Nehalem 7-14 7-18 3 35 /// Haswell 10-13 11 5 36 /// TODO: Develop and implement the target dependent cost model and 37 /// specialize cost numbers for different Cost Model Targets such as throughput, 38 /// code size, latency and uop count. 39 //===----------------------------------------------------------------------===// 40 41 #include "X86TargetTransformInfo.h" 42 #include "llvm/Analysis/TargetTransformInfo.h" 43 #include "llvm/CodeGen/BasicTTIImpl.h" 44 #include "llvm/CodeGen/CostTable.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/IR/InstIterator.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/Support/Debug.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 70 TargetTransformInfo::CacheLevel Level) const { 71 switch (Level) { 72 case TargetTransformInfo::CacheLevel::L1D: 73 // - Penryn 74 // - Nehalem 75 // - Westmere 76 // - Sandy Bridge 77 // - Ivy Bridge 78 // - Haswell 79 // - Broadwell 80 // - Skylake 81 // - Kabylake 82 return 32 * 1024; // 32 KByte 83 case TargetTransformInfo::CacheLevel::L2D: 84 // - Penryn 85 // - Nehalem 86 // - Westmere 87 // - Sandy Bridge 88 // - Ivy Bridge 89 // - Haswell 90 // - Broadwell 91 // - Skylake 92 // - Kabylake 93 return 256 * 1024; // 256 KByte 94 } 95 96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 97 } 98 99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 100 TargetTransformInfo::CacheLevel Level) const { 101 // - Penryn 102 // - Nehalem 103 // - Westmere 104 // - Sandy Bridge 105 // - Ivy Bridge 106 // - Haswell 107 // - Broadwell 108 // - Skylake 109 // - Kabylake 110 switch (Level) { 111 case TargetTransformInfo::CacheLevel::L1D: 112 LLVM_FALLTHROUGH; 113 case TargetTransformInfo::CacheLevel::L2D: 114 return 8; 115 } 116 117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 118 } 119 120 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { 121 bool Vector = (ClassID == 1); 122 if (Vector && !ST->hasSSE1()) 123 return 0; 124 125 if (ST->is64Bit()) { 126 if (Vector && ST->hasAVX512()) 127 return 32; 128 return 16; 129 } 130 return 8; 131 } 132 133 TypeSize 134 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 135 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 136 switch (K) { 137 case TargetTransformInfo::RGK_Scalar: 138 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32); 139 case TargetTransformInfo::RGK_FixedWidthVector: 140 if (ST->hasAVX512() && PreferVectorWidth >= 512) 141 return TypeSize::getFixed(512); 142 if (ST->hasAVX() && PreferVectorWidth >= 256) 143 return TypeSize::getFixed(256); 144 if (ST->hasSSE1() && PreferVectorWidth >= 128) 145 return TypeSize::getFixed(128); 146 return TypeSize::getFixed(0); 147 case TargetTransformInfo::RGK_ScalableVector: 148 return TypeSize::getScalable(0); 149 } 150 151 llvm_unreachable("Unsupported register kind"); 152 } 153 154 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 155 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 156 .getFixedSize(); 157 } 158 159 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 160 // If the loop will not be vectorized, don't interleave the loop. 161 // Let regular unroll to unroll the loop, which saves the overflow 162 // check and memory check cost. 163 if (VF == 1) 164 return 1; 165 166 if (ST->isAtom()) 167 return 1; 168 169 // Sandybridge and Haswell have multiple execution ports and pipelined 170 // vector units. 171 if (ST->hasAVX()) 172 return 4; 173 174 return 2; 175 } 176 177 InstructionCost X86TTIImpl::getArithmeticInstrCost( 178 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 179 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 180 TTI::OperandValueProperties Opd1PropInfo, 181 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 182 const Instruction *CxtI) { 183 // TODO: Handle more cost kinds. 184 if (CostKind != TTI::TCK_RecipThroughput) 185 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 186 Op2Info, Opd1PropInfo, 187 Opd2PropInfo, Args, CxtI); 188 189 // vXi8 multiplications are always promoted to vXi16. 190 if (Opcode == Instruction::Mul && Ty->isVectorTy() && 191 Ty->getScalarSizeInBits() == 8) { 192 Type *WideVecTy = 193 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty)); 194 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty, 195 TargetTransformInfo::CastContextHint::None, 196 CostKind) + 197 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy, 198 TargetTransformInfo::CastContextHint::None, 199 CostKind) + 200 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info, 201 Opd1PropInfo, Opd2PropInfo); 202 } 203 204 // Legalize the type. 205 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 206 207 int ISD = TLI->InstructionOpcodeToISD(Opcode); 208 assert(ISD && "Invalid opcode"); 209 210 if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() && 211 LT.second.getScalarType() == MVT::i32) { 212 // Check if the operands can be represented as a smaller datatype. 213 bool Op1Signed = false, Op2Signed = false; 214 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 215 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 216 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 217 218 // If both are representable as i15 and at least one is constant, 219 // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we 220 // can treat this as PMADDWD which has the same costs as a vXi16 multiply. 221 if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) { 222 bool Op1Constant = 223 isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]); 224 bool Op2Constant = 225 isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]); 226 bool Op1Sext = isa<SExtInst>(Args[0]) && 227 (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41())); 228 bool Op2Sext = isa<SExtInst>(Args[1]) && 229 (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41())); 230 231 bool IsZeroExtended = !Op1Signed || !Op2Signed; 232 bool IsConstant = Op1Constant || Op2Constant; 233 bool IsSext = Op1Sext || Op2Sext; 234 if (IsConstant || IsZeroExtended || IsSext) 235 LT.second = 236 MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements()); 237 } 238 } 239 240 // Vector multiply by pow2 will be simplified to shifts. 241 if (ISD == ISD::MUL && 242 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 243 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 244 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) 245 return getArithmeticInstrCost(Instruction::Shl, Ty, CostKind, Op1Info, 246 Op2Info, TargetTransformInfo::OP_None, 247 TargetTransformInfo::OP_None); 248 249 // On X86, vector signed division by constants power-of-two are 250 // normally expanded to the sequence SRA + SRL + ADD + SRA. 251 // The OperandValue properties may not be the same as that of the previous 252 // operation; conservatively assume OP_None. 253 if ((ISD == ISD::SDIV || ISD == ISD::SREM) && 254 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 255 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 256 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 257 InstructionCost Cost = 258 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, 259 Op2Info, TargetTransformInfo::OP_None, 260 TargetTransformInfo::OP_None); 261 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 262 Op2Info, TargetTransformInfo::OP_None, 263 TargetTransformInfo::OP_None); 264 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, 265 Op2Info, TargetTransformInfo::OP_None, 266 TargetTransformInfo::OP_None); 267 268 if (ISD == ISD::SREM) { 269 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 270 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, 271 Op2Info); 272 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, 273 Op2Info); 274 } 275 276 return Cost; 277 } 278 279 // Vector unsigned division/remainder will be simplified to shifts/masks. 280 if ((ISD == ISD::UDIV || ISD == ISD::UREM) && 281 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 282 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 283 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 284 if (ISD == ISD::UDIV) 285 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 286 Op2Info, TargetTransformInfo::OP_None, 287 TargetTransformInfo::OP_None); 288 // UREM 289 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info, 290 Op2Info, TargetTransformInfo::OP_None, 291 TargetTransformInfo::OP_None); 292 } 293 294 static const CostTblEntry GLMCostTable[] = { 295 { ISD::FDIV, MVT::f32, 18 }, // divss 296 { ISD::FDIV, MVT::v4f32, 35 }, // divps 297 { ISD::FDIV, MVT::f64, 33 }, // divsd 298 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 299 }; 300 301 if (ST->useGLMDivSqrtCosts()) 302 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 303 LT.second)) 304 return LT.first * Entry->Cost; 305 306 static const CostTblEntry SLMCostTable[] = { 307 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 308 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 309 { ISD::FMUL, MVT::f64, 2 }, // mulsd 310 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 311 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 312 { ISD::FDIV, MVT::f32, 17 }, // divss 313 { ISD::FDIV, MVT::v4f32, 39 }, // divps 314 { ISD::FDIV, MVT::f64, 32 }, // divsd 315 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 316 { ISD::FADD, MVT::v2f64, 2 }, // addpd 317 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 318 // v2i64/v4i64 mul is custom lowered as a series of long: 319 // multiplies(3), shifts(3) and adds(2) 320 // slm muldq version throughput is 2 and addq throughput 4 321 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 322 // 3X4 (addq throughput) = 17 323 { ISD::MUL, MVT::v2i64, 17 }, 324 // slm addq\subq throughput is 4 325 { ISD::ADD, MVT::v2i64, 4 }, 326 { ISD::SUB, MVT::v2i64, 4 }, 327 }; 328 329 if (ST->useSLMArithCosts()) { 330 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 331 // Check if the operands can be shrinked into a smaller datatype. 332 // TODO: Merge this into generiic vXi32 MUL patterns above. 333 bool Op1Signed = false; 334 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 335 bool Op2Signed = false; 336 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 337 338 bool SignedMode = Op1Signed || Op2Signed; 339 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 340 341 if (OpMinSize <= 7) 342 return LT.first * 3; // pmullw/sext 343 if (!SignedMode && OpMinSize <= 8) 344 return LT.first * 3; // pmullw/zext 345 if (OpMinSize <= 15) 346 return LT.first * 5; // pmullw/pmulhw/pshuf 347 if (!SignedMode && OpMinSize <= 16) 348 return LT.first * 5; // pmullw/pmulhw/pshuf 349 } 350 351 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 352 LT.second)) { 353 return LT.first * Entry->Cost; 354 } 355 } 356 357 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 358 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 359 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 360 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 361 }; 362 363 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 364 ST->hasBWI()) { 365 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 366 LT.second)) 367 return LT.first * Entry->Cost; 368 } 369 370 static const CostTblEntry AVX512UniformConstCostTable[] = { 371 { ISD::SRA, MVT::v2i64, 1 }, 372 { ISD::SRA, MVT::v4i64, 1 }, 373 { ISD::SRA, MVT::v8i64, 1 }, 374 375 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. 376 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. 377 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. 378 379 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence 380 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence 381 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence 382 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence 383 }; 384 385 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 386 ST->hasAVX512()) { 387 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 388 LT.second)) 389 return LT.first * Entry->Cost; 390 } 391 392 static const CostTblEntry AVX2UniformConstCostTable[] = { 393 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 394 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 395 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 396 397 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 398 399 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence 400 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence 401 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence 402 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence 403 }; 404 405 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 406 ST->hasAVX2()) { 407 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 408 LT.second)) 409 return LT.first * Entry->Cost; 410 } 411 412 static const CostTblEntry SSE2UniformConstCostTable[] = { 413 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 414 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 415 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 416 417 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 418 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 419 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 420 421 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split. 422 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split. 423 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence 424 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence 425 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split. 426 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split. 427 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence 428 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence 429 }; 430 431 // XOP has faster vXi8 shifts. 432 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 433 ST->hasSSE2() && !ST->hasXOP()) { 434 if (const auto *Entry = 435 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 436 return LT.first * Entry->Cost; 437 } 438 439 static const CostTblEntry AVX512BWConstCostTable[] = { 440 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 441 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 442 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 443 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 444 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 445 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 446 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 447 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 448 }; 449 450 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 451 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 452 ST->hasBWI()) { 453 if (const auto *Entry = 454 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 455 return LT.first * Entry->Cost; 456 } 457 458 static const CostTblEntry AVX512ConstCostTable[] = { 459 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 460 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 461 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 462 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 463 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 464 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 465 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 466 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 467 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence 468 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence 469 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence 470 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence 471 }; 472 473 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 474 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 475 ST->hasAVX512()) { 476 if (const auto *Entry = 477 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 478 return LT.first * Entry->Cost; 479 } 480 481 static const CostTblEntry AVX2ConstCostTable[] = { 482 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 483 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 484 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 485 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 486 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 487 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 488 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 489 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 490 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 491 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 492 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 493 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 494 }; 495 496 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 497 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 498 ST->hasAVX2()) { 499 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 500 return LT.first * Entry->Cost; 501 } 502 503 static const CostTblEntry SSE2ConstCostTable[] = { 504 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 505 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 506 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 507 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 508 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 509 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 510 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 511 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 512 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 513 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 514 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 515 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 516 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 517 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 518 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 519 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 520 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 521 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 522 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 523 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 524 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 525 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 526 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 527 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 528 }; 529 530 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 531 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 532 ST->hasSSE2()) { 533 // pmuldq sequence. 534 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 535 return LT.first * 32; 536 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 537 return LT.first * 38; 538 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 539 return LT.first * 15; 540 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 541 return LT.first * 20; 542 543 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 544 return LT.first * Entry->Cost; 545 } 546 547 static const CostTblEntry AVX512BWShiftCostTable[] = { 548 { ISD::SHL, MVT::v16i8, 4 }, // extend/vpsllvw/pack sequence. 549 { ISD::SRL, MVT::v16i8, 4 }, // extend/vpsrlvw/pack sequence. 550 { ISD::SRA, MVT::v16i8, 4 }, // extend/vpsravw/pack sequence. 551 { ISD::SHL, MVT::v32i8, 4 }, // extend/vpsllvw/pack sequence. 552 { ISD::SRL, MVT::v32i8, 4 }, // extend/vpsrlvw/pack sequence. 553 { ISD::SRA, MVT::v32i8, 6 }, // extend/vpsravw/pack sequence. 554 { ISD::SHL, MVT::v64i8, 6 }, // extend/vpsllvw/pack sequence. 555 { ISD::SRL, MVT::v64i8, 7 }, // extend/vpsrlvw/pack sequence. 556 { ISD::SRA, MVT::v64i8, 15 }, // extend/vpsravw/pack sequence. 557 558 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 559 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 560 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 561 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 562 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 563 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 564 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 565 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 566 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 567 }; 568 569 if (ST->hasBWI()) 570 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) 571 return LT.first * Entry->Cost; 572 573 static const CostTblEntry AVX2UniformCostTable[] = { 574 // Uniform splats are cheaper for the following instructions. 575 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 576 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 577 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 578 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. 579 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. 580 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. 581 582 { ISD::SHL, MVT::v8i32, 1 }, // pslld 583 { ISD::SRL, MVT::v8i32, 1 }, // psrld 584 { ISD::SRA, MVT::v8i32, 1 }, // psrad 585 { ISD::SHL, MVT::v4i64, 1 }, // psllq 586 { ISD::SRL, MVT::v4i64, 1 }, // psrlq 587 }; 588 589 if (ST->hasAVX2() && 590 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 591 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 592 if (const auto *Entry = 593 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 594 return LT.first * Entry->Cost; 595 } 596 597 static const CostTblEntry SSE2UniformCostTable[] = { 598 // Uniform splats are cheaper for the following instructions. 599 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 600 { ISD::SHL, MVT::v4i32, 1 }, // pslld 601 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 602 603 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 604 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 605 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 606 607 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 608 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 609 }; 610 611 if (ST->hasSSE2() && 612 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 613 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 614 if (const auto *Entry = 615 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 616 return LT.first * Entry->Cost; 617 } 618 619 static const CostTblEntry AVX512DQCostTable[] = { 620 { ISD::MUL, MVT::v2i64, 2 }, // pmullq 621 { ISD::MUL, MVT::v4i64, 2 }, // pmullq 622 { ISD::MUL, MVT::v8i64, 2 } // pmullq 623 }; 624 625 // Look for AVX512DQ lowering tricks for custom cases. 626 if (ST->hasDQI()) 627 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 628 return LT.first * Entry->Cost; 629 630 static const CostTblEntry AVX512BWCostTable[] = { 631 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 632 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 633 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 634 }; 635 636 // Look for AVX512BW lowering tricks for custom cases. 637 if (ST->hasBWI()) 638 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 639 return LT.first * Entry->Cost; 640 641 static const CostTblEntry AVX512CostTable[] = { 642 { ISD::SHL, MVT::v4i32, 1 }, 643 { ISD::SRL, MVT::v4i32, 1 }, 644 { ISD::SRA, MVT::v4i32, 1 }, 645 { ISD::SHL, MVT::v8i32, 1 }, 646 { ISD::SRL, MVT::v8i32, 1 }, 647 { ISD::SRA, MVT::v8i32, 1 }, 648 { ISD::SHL, MVT::v16i32, 1 }, 649 { ISD::SRL, MVT::v16i32, 1 }, 650 { ISD::SRA, MVT::v16i32, 1 }, 651 652 { ISD::SHL, MVT::v2i64, 1 }, 653 { ISD::SRL, MVT::v2i64, 1 }, 654 { ISD::SHL, MVT::v4i64, 1 }, 655 { ISD::SRL, MVT::v4i64, 1 }, 656 { ISD::SHL, MVT::v8i64, 1 }, 657 { ISD::SRL, MVT::v8i64, 1 }, 658 659 { ISD::SRA, MVT::v2i64, 1 }, 660 { ISD::SRA, MVT::v4i64, 1 }, 661 { ISD::SRA, MVT::v8i64, 1 }, 662 663 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 664 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 665 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 666 { ISD::MUL, MVT::v8i64, 6 }, // 3*pmuludq/3*shift/2*add 667 { ISD::MUL, MVT::i64, 1 }, // Skylake from http://www.agner.org/ 668 669 { ISD::FNEG, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 670 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 671 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 672 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 673 { ISD::FDIV, MVT::f64, 4 }, // Skylake from http://www.agner.org/ 674 { ISD::FDIV, MVT::v2f64, 4 }, // Skylake from http://www.agner.org/ 675 { ISD::FDIV, MVT::v4f64, 8 }, // Skylake from http://www.agner.org/ 676 { ISD::FDIV, MVT::v8f64, 16 }, // Skylake from http://www.agner.org/ 677 678 { ISD::FNEG, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 679 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 680 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 681 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 682 { ISD::FDIV, MVT::f32, 3 }, // Skylake from http://www.agner.org/ 683 { ISD::FDIV, MVT::v4f32, 3 }, // Skylake from http://www.agner.org/ 684 { ISD::FDIV, MVT::v8f32, 5 }, // Skylake from http://www.agner.org/ 685 { ISD::FDIV, MVT::v16f32, 10 }, // Skylake from http://www.agner.org/ 686 }; 687 688 if (ST->hasAVX512()) 689 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 690 return LT.first * Entry->Cost; 691 692 static const CostTblEntry AVX2ShiftCostTable[] = { 693 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to 694 // customize them to detect the cases where shift amount is a scalar one. 695 { ISD::SHL, MVT::v4i32, 2 }, // vpsllvd (Haswell from agner.org) 696 { ISD::SRL, MVT::v4i32, 2 }, // vpsrlvd (Haswell from agner.org) 697 { ISD::SRA, MVT::v4i32, 2 }, // vpsravd (Haswell from agner.org) 698 { ISD::SHL, MVT::v8i32, 2 }, // vpsllvd (Haswell from agner.org) 699 { ISD::SRL, MVT::v8i32, 2 }, // vpsrlvd (Haswell from agner.org) 700 { ISD::SRA, MVT::v8i32, 2 }, // vpsravd (Haswell from agner.org) 701 { ISD::SHL, MVT::v2i64, 1 }, // vpsllvq (Haswell from agner.org) 702 { ISD::SRL, MVT::v2i64, 1 }, // vpsrlvq (Haswell from agner.org) 703 { ISD::SHL, MVT::v4i64, 1 }, // vpsllvq (Haswell from agner.org) 704 { ISD::SRL, MVT::v4i64, 1 }, // vpsrlvq (Haswell from agner.org) 705 }; 706 707 if (ST->hasAVX512()) { 708 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && 709 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 710 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 711 // On AVX512, a packed v32i16 shift left by a constant build_vector 712 // is lowered into a vector multiply (vpmullw). 713 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 714 Op1Info, Op2Info, 715 TargetTransformInfo::OP_None, 716 TargetTransformInfo::OP_None); 717 } 718 719 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts). 720 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) { 721 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 722 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 723 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 724 // On AVX2, a packed v16i16 shift left by a constant build_vector 725 // is lowered into a vector multiply (vpmullw). 726 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 727 Op1Info, Op2Info, 728 TargetTransformInfo::OP_None, 729 TargetTransformInfo::OP_None); 730 731 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 732 return LT.first * Entry->Cost; 733 } 734 735 static const CostTblEntry XOPShiftCostTable[] = { 736 // 128bit shifts take 1cy, but right shifts require negation beforehand. 737 { ISD::SHL, MVT::v16i8, 1 }, 738 { ISD::SRL, MVT::v16i8, 2 }, 739 { ISD::SRA, MVT::v16i8, 2 }, 740 { ISD::SHL, MVT::v8i16, 1 }, 741 { ISD::SRL, MVT::v8i16, 2 }, 742 { ISD::SRA, MVT::v8i16, 2 }, 743 { ISD::SHL, MVT::v4i32, 1 }, 744 { ISD::SRL, MVT::v4i32, 2 }, 745 { ISD::SRA, MVT::v4i32, 2 }, 746 { ISD::SHL, MVT::v2i64, 1 }, 747 { ISD::SRL, MVT::v2i64, 2 }, 748 { ISD::SRA, MVT::v2i64, 2 }, 749 // 256bit shifts require splitting if AVX2 didn't catch them above. 750 { ISD::SHL, MVT::v32i8, 2+2 }, 751 { ISD::SRL, MVT::v32i8, 4+2 }, 752 { ISD::SRA, MVT::v32i8, 4+2 }, 753 { ISD::SHL, MVT::v16i16, 2+2 }, 754 { ISD::SRL, MVT::v16i16, 4+2 }, 755 { ISD::SRA, MVT::v16i16, 4+2 }, 756 { ISD::SHL, MVT::v8i32, 2+2 }, 757 { ISD::SRL, MVT::v8i32, 4+2 }, 758 { ISD::SRA, MVT::v8i32, 4+2 }, 759 { ISD::SHL, MVT::v4i64, 2+2 }, 760 { ISD::SRL, MVT::v4i64, 4+2 }, 761 { ISD::SRA, MVT::v4i64, 4+2 }, 762 }; 763 764 // Look for XOP lowering tricks. 765 if (ST->hasXOP()) { 766 // If the right shift is constant then we'll fold the negation so 767 // it's as cheap as a left shift. 768 int ShiftISD = ISD; 769 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && 770 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 771 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 772 ShiftISD = ISD::SHL; 773 if (const auto *Entry = 774 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) 775 return LT.first * Entry->Cost; 776 } 777 778 static const CostTblEntry SSE2UniformShiftCostTable[] = { 779 // Uniform splats are cheaper for the following instructions. 780 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 781 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 782 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 783 784 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 785 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 786 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 787 788 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 789 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 790 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 791 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 792 }; 793 794 if (ST->hasSSE2() && 795 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 796 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 797 798 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 799 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 800 return LT.first * 4; // 2*psrad + shuffle. 801 802 if (const auto *Entry = 803 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 804 return LT.first * Entry->Cost; 805 } 806 807 if (ISD == ISD::SHL && 808 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 809 MVT VT = LT.second; 810 // Vector shift left by non uniform constant can be lowered 811 // into vector multiply. 812 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 813 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 814 ISD = ISD::MUL; 815 } 816 817 static const CostTblEntry AVX2CostTable[] = { 818 { ISD::SHL, MVT::v16i8, 6 }, // vpblendvb sequence. 819 { ISD::SHL, MVT::v32i8, 6 }, // vpblendvb sequence. 820 { ISD::SHL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 821 { ISD::SHL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 822 { ISD::SHL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 823 { ISD::SHL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 824 825 { ISD::SRL, MVT::v16i8, 6 }, // vpblendvb sequence. 826 { ISD::SRL, MVT::v32i8, 6 }, // vpblendvb sequence. 827 { ISD::SRL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 828 { ISD::SRL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 829 { ISD::SRL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 830 { ISD::SRL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 831 832 { ISD::SRA, MVT::v16i8, 17 }, // vpblendvb sequence. 833 { ISD::SRA, MVT::v32i8, 17 }, // vpblendvb sequence. 834 { ISD::SRA, MVT::v64i8, 34 }, // 2*vpblendvb sequence. 835 { ISD::SRA, MVT::v8i16, 5 }, // extend/vpsravd/pack sequence. 836 { ISD::SRA, MVT::v16i16, 7 }, // extend/vpsravd/pack sequence. 837 { ISD::SRA, MVT::v32i16, 14 }, // 2*extend/vpsravd/pack sequence. 838 { ISD::SRA, MVT::v2i64, 2 }, // srl/xor/sub sequence. 839 { ISD::SRA, MVT::v4i64, 2 }, // srl/xor/sub sequence. 840 841 { ISD::SUB, MVT::v32i8, 1 }, // psubb 842 { ISD::ADD, MVT::v32i8, 1 }, // paddb 843 { ISD::SUB, MVT::v16i16, 1 }, // psubw 844 { ISD::ADD, MVT::v16i16, 1 }, // paddw 845 { ISD::SUB, MVT::v8i32, 1 }, // psubd 846 { ISD::ADD, MVT::v8i32, 1 }, // paddd 847 { ISD::SUB, MVT::v4i64, 1 }, // psubq 848 { ISD::ADD, MVT::v4i64, 1 }, // paddq 849 850 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 851 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 852 { ISD::MUL, MVT::v4i64, 6 }, // 3*pmuludq/3*shift/2*add 853 854 { ISD::FNEG, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 855 { ISD::FNEG, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 856 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 857 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 858 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 859 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 860 { ISD::FMUL, MVT::f64, 1 }, // Haswell from http://www.agner.org/ 861 { ISD::FMUL, MVT::v2f64, 1 }, // Haswell from http://www.agner.org/ 862 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 863 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 864 865 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 866 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 867 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 868 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 869 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 870 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 871 }; 872 873 // Look for AVX2 lowering tricks for custom cases. 874 if (ST->hasAVX2()) 875 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 876 return LT.first * Entry->Cost; 877 878 static const CostTblEntry AVX1CostTable[] = { 879 // We don't have to scalarize unsupported ops. We can issue two half-sized 880 // operations and we only need to extract the upper YMM half. 881 // Two ops + 1 extract + 1 insert = 4. 882 { ISD::MUL, MVT::v16i16, 4 }, 883 { ISD::MUL, MVT::v8i32, 5 }, // BTVER2 from http://www.agner.org/ 884 { ISD::MUL, MVT::v4i64, 12 }, 885 886 { ISD::SUB, MVT::v32i8, 4 }, 887 { ISD::ADD, MVT::v32i8, 4 }, 888 { ISD::SUB, MVT::v16i16, 4 }, 889 { ISD::ADD, MVT::v16i16, 4 }, 890 { ISD::SUB, MVT::v8i32, 4 }, 891 { ISD::ADD, MVT::v8i32, 4 }, 892 { ISD::SUB, MVT::v4i64, 4 }, 893 { ISD::ADD, MVT::v4i64, 4 }, 894 895 { ISD::SHL, MVT::v32i8, 22 }, // pblendvb sequence + split. 896 { ISD::SHL, MVT::v8i16, 6 }, // pblendvb sequence. 897 { ISD::SHL, MVT::v16i16, 13 }, // pblendvb sequence + split. 898 { ISD::SHL, MVT::v4i32, 3 }, // pslld/paddd/cvttps2dq/pmulld 899 { ISD::SHL, MVT::v8i32, 9 }, // pslld/paddd/cvttps2dq/pmulld + split 900 { ISD::SHL, MVT::v2i64, 2 }, // Shift each lane + blend. 901 { ISD::SHL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 902 903 { ISD::SRL, MVT::v32i8, 23 }, // pblendvb sequence + split. 904 { ISD::SRL, MVT::v16i16, 28 }, // pblendvb sequence + split. 905 { ISD::SRL, MVT::v4i32, 6 }, // Shift each lane + blend. 906 { ISD::SRL, MVT::v8i32, 14 }, // Shift each lane + blend + split. 907 { ISD::SRL, MVT::v2i64, 2 }, // Shift each lane + blend. 908 { ISD::SRL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 909 910 { ISD::SRA, MVT::v32i8, 44 }, // pblendvb sequence + split. 911 { ISD::SRA, MVT::v16i16, 28 }, // pblendvb sequence + split. 912 { ISD::SRA, MVT::v4i32, 6 }, // Shift each lane + blend. 913 { ISD::SRA, MVT::v8i32, 14 }, // Shift each lane + blend + split. 914 { ISD::SRA, MVT::v2i64, 5 }, // Shift each lane + blend. 915 { ISD::SRA, MVT::v4i64, 12 }, // Shift each lane + blend + split. 916 917 { ISD::FNEG, MVT::v4f64, 2 }, // BTVER2 from http://www.agner.org/ 918 { ISD::FNEG, MVT::v8f32, 2 }, // BTVER2 from http://www.agner.org/ 919 920 { ISD::FMUL, MVT::f64, 2 }, // BTVER2 from http://www.agner.org/ 921 { ISD::FMUL, MVT::v2f64, 2 }, // BTVER2 from http://www.agner.org/ 922 { ISD::FMUL, MVT::v4f64, 4 }, // BTVER2 from http://www.agner.org/ 923 924 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 925 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 926 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 927 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 928 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 929 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 930 }; 931 932 if (ST->hasAVX()) 933 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 934 return LT.first * Entry->Cost; 935 936 static const CostTblEntry SSE42CostTable[] = { 937 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 938 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 939 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 940 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 941 942 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 943 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 944 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 945 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 946 947 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 948 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 949 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 950 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 951 952 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 953 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 954 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 955 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 956 957 { ISD::MUL, MVT::v2i64, 6 } // 3*pmuludq/3*shift/2*add 958 }; 959 960 if (ST->hasSSE42()) 961 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 962 return LT.first * Entry->Cost; 963 964 static const CostTblEntry SSE41CostTable[] = { 965 { ISD::SHL, MVT::v16i8, 10 }, // pblendvb sequence. 966 { ISD::SHL, MVT::v8i16, 11 }, // pblendvb sequence. 967 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 968 969 { ISD::SRL, MVT::v16i8, 11 }, // pblendvb sequence. 970 { ISD::SRL, MVT::v8i16, 13 }, // pblendvb sequence. 971 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 972 973 { ISD::SRA, MVT::v16i8, 21 }, // pblendvb sequence. 974 { ISD::SRA, MVT::v8i16, 13 }, // pblendvb sequence. 975 976 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 977 }; 978 979 if (ST->hasSSE41()) 980 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 981 return LT.first * Entry->Cost; 982 983 static const CostTblEntry SSE2CostTable[] = { 984 // We don't correctly identify costs of casts because they are marked as 985 // custom. 986 { ISD::SHL, MVT::v16i8, 13 }, // cmpgtb sequence. 987 { ISD::SHL, MVT::v8i16, 25 }, // cmpgtw sequence. 988 { ISD::SHL, MVT::v4i32, 16 }, // pslld/paddd/cvttps2dq/pmuludq. 989 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 990 991 { ISD::SRL, MVT::v16i8, 14 }, // cmpgtb sequence. 992 { ISD::SRL, MVT::v8i16, 16 }, // cmpgtw sequence. 993 { ISD::SRL, MVT::v4i32, 12 }, // Shift each lane + blend. 994 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 995 996 { ISD::SRA, MVT::v16i8, 27 }, // unpacked cmpgtb sequence. 997 { ISD::SRA, MVT::v8i16, 16 }, // cmpgtw sequence. 998 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 999 { ISD::SRA, MVT::v2i64, 8 }, // srl/xor/sub splat+shuffle sequence. 1000 1001 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 1002 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 1003 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 1004 1005 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 1006 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 1007 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 1008 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 1009 1010 { ISD::FNEG, MVT::f32, 1 }, // Pentium IV from http://www.agner.org/ 1011 { ISD::FNEG, MVT::f64, 1 }, // Pentium IV from http://www.agner.org/ 1012 { ISD::FNEG, MVT::v4f32, 1 }, // Pentium IV from http://www.agner.org/ 1013 { ISD::FNEG, MVT::v2f64, 1 }, // Pentium IV from http://www.agner.org/ 1014 1015 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 1016 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 1017 1018 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 1019 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 1020 }; 1021 1022 if (ST->hasSSE2()) 1023 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 1024 return LT.first * Entry->Cost; 1025 1026 static const CostTblEntry SSE1CostTable[] = { 1027 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 1028 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 1029 1030 { ISD::FNEG, MVT::f32, 2 }, // Pentium III from http://www.agner.org/ 1031 { ISD::FNEG, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1032 1033 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 1034 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1035 1036 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 1037 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1038 }; 1039 1040 if (ST->hasSSE1()) 1041 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 1042 return LT.first * Entry->Cost; 1043 1044 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1045 { ISD::ADD, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1046 { ISD::SUB, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1047 { ISD::MUL, MVT::i64, 2 }, // Nehalem from http://www.agner.org/ 1048 }; 1049 1050 if (ST->is64Bit()) 1051 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second)) 1052 return LT.first * Entry->Cost; 1053 1054 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1055 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1056 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1057 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1058 1059 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1060 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1061 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1062 }; 1063 1064 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second)) 1065 return LT.first * Entry->Cost; 1066 1067 // It is not a good idea to vectorize division. We have to scalarize it and 1068 // in the process we will often end up having to spilling regular 1069 // registers. The overhead of division is going to dominate most kernels 1070 // anyways so try hard to prevent vectorization of division - it is 1071 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 1072 // to hide "20 cycles" for each lane. 1073 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 1074 ISD == ISD::UDIV || ISD == ISD::UREM)) { 1075 InstructionCost ScalarCost = getArithmeticInstrCost( 1076 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, 1077 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1078 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 1079 } 1080 1081 // Fallback to the default implementation. 1082 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); 1083 } 1084 1085 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1086 VectorType *BaseTp, 1087 ArrayRef<int> Mask, int Index, 1088 VectorType *SubTp, 1089 ArrayRef<const Value *> Args) { 1090 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 1091 // 64-bit packed integer vectors (v2i32) are widened to type v4i32. 1092 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); 1093 1094 Kind = improveShuffleKindFromMask(Kind, Mask); 1095 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 1096 if (Kind == TTI::SK_Transpose) 1097 Kind = TTI::SK_PermuteTwoSrc; 1098 1099 // For Broadcasts we are splatting the first element from the first input 1100 // register, so only need to reference that input and all the output 1101 // registers are the same. 1102 if (Kind == TTI::SK_Broadcast) 1103 LT.first = 1; 1104 1105 // Subvector extractions are free if they start at the beginning of a 1106 // vector and cheap if the subvectors are aligned. 1107 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { 1108 int NumElts = LT.second.getVectorNumElements(); 1109 if ((Index % NumElts) == 0) 1110 return 0; 1111 std::pair<InstructionCost, MVT> SubLT = 1112 TLI->getTypeLegalizationCost(DL, SubTp); 1113 if (SubLT.second.isVector()) { 1114 int NumSubElts = SubLT.second.getVectorNumElements(); 1115 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1116 return SubLT.first; 1117 // Handle some cases for widening legalization. For now we only handle 1118 // cases where the original subvector was naturally aligned and evenly 1119 // fit in its legalized subvector type. 1120 // FIXME: Remove some of the alignment restrictions. 1121 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit 1122 // vectors. 1123 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements(); 1124 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && 1125 (NumSubElts % OrigSubElts) == 0 && 1126 LT.second.getVectorElementType() == 1127 SubLT.second.getVectorElementType() && 1128 LT.second.getVectorElementType().getSizeInBits() == 1129 BaseTp->getElementType()->getPrimitiveSizeInBits()) { 1130 assert(NumElts >= NumSubElts && NumElts > OrigSubElts && 1131 "Unexpected number of elements!"); 1132 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(), 1133 LT.second.getVectorNumElements()); 1134 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), 1135 SubLT.second.getVectorNumElements()); 1136 int ExtractIndex = alignDown((Index % NumElts), NumSubElts); 1137 InstructionCost ExtractCost = getShuffleCost( 1138 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy); 1139 1140 // If the original size is 32-bits or more, we can use pshufd. Otherwise 1141 // if we have SSSE3 we can use pshufb. 1142 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) 1143 return ExtractCost + 1; // pshufd or pshufb 1144 1145 assert(SubTp->getPrimitiveSizeInBits() == 16 && 1146 "Unexpected vector size"); 1147 1148 return ExtractCost + 2; // worst case pshufhw + pshufd 1149 } 1150 } 1151 } 1152 1153 // Subvector insertions are cheap if the subvectors are aligned. 1154 // Note that in general, the insertion starting at the beginning of a vector 1155 // isn't free, because we need to preserve the rest of the wide vector. 1156 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) { 1157 int NumElts = LT.second.getVectorNumElements(); 1158 std::pair<InstructionCost, MVT> SubLT = 1159 TLI->getTypeLegalizationCost(DL, SubTp); 1160 if (SubLT.second.isVector()) { 1161 int NumSubElts = SubLT.second.getVectorNumElements(); 1162 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1163 return SubLT.first; 1164 } 1165 1166 // If the insertion isn't aligned, treat it like a 2-op shuffle. 1167 Kind = TTI::SK_PermuteTwoSrc; 1168 } 1169 1170 // Handle some common (illegal) sub-vector types as they are often very cheap 1171 // to shuffle even on targets without PSHUFB. 1172 EVT VT = TLI->getValueType(DL, BaseTp); 1173 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && 1174 !ST->hasSSSE3()) { 1175 static const CostTblEntry SSE2SubVectorShuffleTbl[] = { 1176 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw 1177 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw 1178 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw 1179 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw 1180 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck 1181 1182 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw 1183 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw 1184 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus 1185 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck 1186 1187 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw 1188 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw 1189 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw 1190 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw 1191 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck 1192 1193 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw 1194 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw 1195 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw 1196 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw 1197 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck 1198 }; 1199 1200 if (ST->hasSSE2()) 1201 if (const auto *Entry = 1202 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) 1203 return Entry->Cost; 1204 } 1205 1206 // We are going to permute multiple sources and the result will be in multiple 1207 // destinations. Providing an accurate cost only for splits where the element 1208 // type remains the same. 1209 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 1210 MVT LegalVT = LT.second; 1211 if (LegalVT.isVector() && 1212 LegalVT.getVectorElementType().getSizeInBits() == 1213 BaseTp->getElementType()->getPrimitiveSizeInBits() && 1214 LegalVT.getVectorNumElements() < 1215 cast<FixedVectorType>(BaseTp)->getNumElements()) { 1216 1217 unsigned VecTySize = DL.getTypeStoreSize(BaseTp); 1218 unsigned LegalVTSize = LegalVT.getStoreSize(); 1219 // Number of source vectors after legalization: 1220 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 1221 // Number of destination vectors after legalization: 1222 InstructionCost NumOfDests = LT.first; 1223 1224 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(), 1225 LegalVT.getVectorNumElements()); 1226 1227 if (!Mask.empty() && NumOfDests.isValid()) { 1228 // Try to perform better estimation of the permutation. 1229 // 1. Split the source/destination vectors into real registers. 1230 // 2. Do the mask analysis to identify which real registers are 1231 // permuted. If more than 1 source registers are used for the 1232 // destination register building, the cost for this destination register 1233 // is (Number_of_source_register - 1) * Cost_PermuteTwoSrc. If only one 1234 // source register is used, build mask and calculate the cost as a cost 1235 // of PermuteSingleSrc. 1236 // Also, for the single register permute we try to identify if the 1237 // destination register is just a copy of the source register or the 1238 // copy of the previous destination register (the cost is 1239 // TTI::TCC_Basic). If the source register is just reused, the cost for 1240 // this operation is 0. 1241 unsigned E = *NumOfDests.getValue(); 1242 unsigned NormalizedVF = 1243 LegalVT.getVectorNumElements() * std::max(NumOfSrcs, E); 1244 unsigned NumOfSrcRegs = NormalizedVF / LegalVT.getVectorNumElements(); 1245 unsigned NumOfDestRegs = NormalizedVF / LegalVT.getVectorNumElements(); 1246 SmallVector<int> NormalizedMask(NormalizedVF, UndefMaskElem); 1247 copy(Mask, NormalizedMask.begin()); 1248 unsigned PrevSrcReg = 0; 1249 ArrayRef<int> PrevRegMask; 1250 InstructionCost Cost = 0; 1251 processShuffleMasks( 1252 NormalizedMask, NumOfSrcRegs, NumOfDestRegs, NumOfDestRegs, []() {}, 1253 [this, SingleOpTy, &PrevSrcReg, &PrevRegMask, 1254 &Cost](ArrayRef<int> RegMask, unsigned SrcReg, unsigned DestReg) { 1255 if (!ShuffleVectorInst::isIdentityMask(RegMask)) { 1256 // Check if the previous register can be just copied to the next 1257 // one. 1258 if (PrevRegMask.empty() || PrevSrcReg != SrcReg || 1259 PrevRegMask != RegMask) 1260 Cost += getShuffleCost(TTI::SK_PermuteSingleSrc, SingleOpTy, 1261 RegMask, 0, nullptr); 1262 else 1263 // Just a copy of previous destination register. 1264 Cost += TTI::TCC_Basic; 1265 return; 1266 } 1267 if (SrcReg != DestReg && 1268 any_of(RegMask, [](int I) { return I != UndefMaskElem; })) { 1269 // Just a copy of the source register. 1270 Cost += TTI::TCC_Basic; 1271 } 1272 PrevSrcReg = SrcReg; 1273 PrevRegMask = RegMask; 1274 }, 1275 [this, SingleOpTy, &Cost](ArrayRef<int> RegMask, 1276 unsigned /*Unused*/, 1277 unsigned /*Unused*/) { 1278 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, RegMask, 1279 0, nullptr); 1280 }); 1281 return Cost; 1282 } 1283 1284 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 1285 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 1286 None, 0, nullptr); 1287 } 1288 1289 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1290 } 1291 1292 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 1293 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 1294 // We assume that source and destination have the same vector type. 1295 InstructionCost NumOfDests = LT.first; 1296 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1; 1297 LT.first = NumOfDests * NumOfShufflesPerDest; 1298 } 1299 1300 static const CostTblEntry AVX512FP16ShuffleTbl[] = { 1301 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw 1302 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw 1303 {TTI::SK_Broadcast, MVT::v8f16, 1}, // vpbroadcastw 1304 1305 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw 1306 {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw 1307 {TTI::SK_Reverse, MVT::v8f16, 1}, // vpshufb 1308 1309 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw 1310 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw 1311 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // vpshufb 1312 1313 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w 1314 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w 1315 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2} // vpermt2w 1316 }; 1317 1318 if (!ST->useSoftFloat() && ST->hasFP16()) 1319 if (const auto *Entry = 1320 CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second)) 1321 return LT.first * Entry->Cost; 1322 1323 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 1324 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb 1325 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb 1326 1327 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb 1328 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb 1329 1330 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b 1331 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b 1332 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b 1333 }; 1334 1335 if (ST->hasVBMI()) 1336 if (const auto *Entry = 1337 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 1338 return LT.first * Entry->Cost; 1339 1340 static const CostTblEntry AVX512BWShuffleTbl[] = { 1341 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1342 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1343 1344 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw 1345 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw 1346 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 1347 1348 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw 1349 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw 1350 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 1351 1352 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w 1353 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w 1354 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w 1355 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 1356 1357 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw 1358 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb 1359 }; 1360 1361 if (ST->hasBWI()) 1362 if (const auto *Entry = 1363 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 1364 return LT.first * Entry->Cost; 1365 1366 static const CostTblEntry AVX512ShuffleTbl[] = { 1367 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd 1368 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps 1369 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq 1370 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd 1371 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1372 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1373 1374 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd 1375 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps 1376 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq 1377 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd 1378 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca 1379 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca 1380 1381 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd 1382 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1383 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd 1384 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps 1385 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1386 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps 1387 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq 1388 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1389 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq 1390 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd 1391 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1392 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd 1393 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1394 1395 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd 1396 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps 1397 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q 1398 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d 1399 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd 1400 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps 1401 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q 1402 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d 1403 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd 1404 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps 1405 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q 1406 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d 1407 1408 // FIXME: This just applies the type legalization cost rules above 1409 // assuming these completely split. 1410 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, 1411 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, 1412 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, 1413 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, 1414 1415 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq 1416 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq 1417 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd 1418 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps 1419 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq 1420 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd 1421 }; 1422 1423 if (ST->hasAVX512()) 1424 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 1425 return LT.first * Entry->Cost; 1426 1427 static const CostTblEntry AVX2ShuffleTbl[] = { 1428 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd 1429 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps 1430 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq 1431 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd 1432 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw 1433 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb 1434 1435 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd 1436 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps 1437 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq 1438 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd 1439 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb 1440 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb 1441 1442 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb 1443 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb 1444 1445 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1446 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1447 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1448 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1449 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb 1450 // + vpblendvb 1451 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb 1452 // + vpblendvb 1453 1454 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd 1455 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps 1456 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd 1457 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd 1458 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb 1459 // + vpblendvb 1460 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb 1461 // + vpblendvb 1462 }; 1463 1464 if (ST->hasAVX2()) 1465 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1466 return LT.first * Entry->Cost; 1467 1468 static const CostTblEntry XOPShuffleTbl[] = { 1469 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd 1470 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps 1471 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd 1472 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps 1473 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm 1474 // + vinsertf128 1475 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm 1476 // + vinsertf128 1477 1478 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm 1479 // + vinsertf128 1480 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm 1481 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm 1482 // + vinsertf128 1483 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm 1484 }; 1485 1486 if (ST->hasXOP()) 1487 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1488 return LT.first * Entry->Cost; 1489 1490 static const CostTblEntry AVX1ShuffleTbl[] = { 1491 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1492 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1493 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1494 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1495 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 1496 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 1497 1498 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1499 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1500 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1501 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1502 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb 1503 // + vinsertf128 1504 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb 1505 // + vinsertf128 1506 1507 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd 1508 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd 1509 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps 1510 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps 1511 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor 1512 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor 1513 1514 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd 1515 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd 1516 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1517 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1518 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb 1519 // + 2*por + vinsertf128 1520 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb 1521 // + 2*por + vinsertf128 1522 1523 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd 1524 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd 1525 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1526 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1527 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb 1528 // + 4*por + vinsertf128 1529 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb 1530 // + 4*por + vinsertf128 1531 }; 1532 1533 if (ST->hasAVX()) 1534 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1535 return LT.first * Entry->Cost; 1536 1537 static const CostTblEntry SSE41ShuffleTbl[] = { 1538 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw 1539 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1540 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw 1541 {TTI::SK_Select, MVT::v4f32, 1}, // blendps 1542 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw 1543 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb 1544 }; 1545 1546 if (ST->hasSSE41()) 1547 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1548 return LT.first * Entry->Cost; 1549 1550 static const CostTblEntry SSSE3ShuffleTbl[] = { 1551 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb 1552 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb 1553 1554 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb 1555 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb 1556 1557 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por 1558 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por 1559 1560 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb 1561 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1562 1563 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por 1564 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por 1565 }; 1566 1567 if (ST->hasSSSE3()) 1568 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1569 return LT.first * Entry->Cost; 1570 1571 static const CostTblEntry SSE2ShuffleTbl[] = { 1572 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd 1573 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd 1574 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd 1575 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd 1576 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd 1577 1578 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd 1579 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd 1580 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd 1581 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd 1582 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw 1583 // + 2*pshufd + 2*unpck + packus 1584 1585 {TTI::SK_Select, MVT::v2i64, 1}, // movsd 1586 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1587 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps 1588 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por 1589 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por 1590 1591 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd 1592 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd 1593 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd 1594 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw 1595 // + pshufd/unpck 1596 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1597 // + 2*pshufd + 2*unpck + 2*packus 1598 1599 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1600 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1601 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1602 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1603 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1604 }; 1605 1606 static const CostTblEntry SSE3BroadcastLoadTbl[] = { 1607 {TTI::SK_Broadcast, MVT::v2f64, 0}, // broadcast handled by movddup 1608 }; 1609 1610 if (ST->hasSSE2()) { 1611 bool IsLoad = 1612 llvm::any_of(Args, [](const auto &V) { return isa<LoadInst>(V); }); 1613 if (ST->hasSSE3() && IsLoad) 1614 if (const auto *Entry = 1615 CostTableLookup(SSE3BroadcastLoadTbl, Kind, LT.second)) { 1616 assert(isLegalBroadcastLoad(BaseTp->getElementType(), 1617 LT.second.getVectorElementCount()) && 1618 "Table entry missing from isLegalBroadcastLoad()"); 1619 return LT.first * Entry->Cost; 1620 } 1621 1622 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1623 return LT.first * Entry->Cost; 1624 } 1625 1626 static const CostTblEntry SSE1ShuffleTbl[] = { 1627 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1628 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1629 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1630 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1631 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1632 }; 1633 1634 if (ST->hasSSE1()) 1635 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1636 return LT.first * Entry->Cost; 1637 1638 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1639 } 1640 1641 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1642 Type *Src, 1643 TTI::CastContextHint CCH, 1644 TTI::TargetCostKind CostKind, 1645 const Instruction *I) { 1646 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1647 assert(ISD && "Invalid opcode"); 1648 1649 // TODO: Allow non-throughput costs that aren't binary. 1650 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1651 if (CostKind != TTI::TCK_RecipThroughput) 1652 return Cost == 0 ? 0 : 1; 1653 return Cost; 1654 }; 1655 1656 // The cost tables include both specific, custom (non-legal) src/dst type 1657 // conversions and generic, legalized types. We test for customs first, before 1658 // falling back to legalization. 1659 // FIXME: Need a better design of the cost table to handle non-simple types of 1660 // potential massive combinations (elem_num x src_type x dst_type). 1661 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { 1662 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1663 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1664 1665 // Mask sign extend has an instruction. 1666 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1667 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 }, 1668 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1669 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 }, 1670 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1671 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 }, 1672 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1673 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 }, 1674 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1675 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 }, 1676 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1677 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1678 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1679 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1680 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, 1681 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, 1682 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1, 1 }, 1683 1684 // Mask zero extend is a sext + shift. 1685 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1686 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 }, 1687 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1688 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 }, 1689 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1690 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 }, 1691 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1692 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 }, 1693 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1694 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 }, 1695 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1696 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1697 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1698 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1699 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, 1700 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, 1701 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1, 2 }, 1702 1703 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, 1704 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 }, 1705 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, 1706 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 }, 1707 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, 1708 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 }, 1709 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, 1710 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 }, 1711 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, 1712 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 }, 1713 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, 1714 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, 1715 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, 1716 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, 1717 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, 1718 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, 1719 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i16, 2 }, 1720 1721 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, 1722 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm 1723 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb 1724 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb 1725 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb 1726 }; 1727 1728 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1729 // Mask sign extend has an instruction. 1730 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, 1731 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 }, 1732 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, 1733 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, 1734 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, 1735 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i1, 1 }, 1736 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, 1737 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, 1738 1739 // Mask zero extend is a sext + shift. 1740 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, 1741 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 }, 1742 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, 1743 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, 1744 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, 1745 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i1, 2 }, 1746 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, 1747 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1748 1749 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, 1750 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 }, 1751 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, 1752 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, 1753 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1754 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, 1755 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, 1756 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i64, 2 }, 1757 1758 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1759 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1760 1761 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1762 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1763 1764 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1765 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1766 1767 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1768 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1769 }; 1770 1771 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1772 // 256-bit wide vectors. 1773 1774 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1775 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1776 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1777 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1778 1779 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1780 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1781 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1782 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd 1783 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1784 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1785 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1786 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd 1787 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd 1788 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd 1789 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd 1790 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd 1791 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq 1792 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq 1793 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq 1794 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb 1795 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb 1796 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb 1797 { ISD::TRUNCATE, MVT::v32i8, MVT::v16i32, 2 }, // vpmovdb 1798 { ISD::TRUNCATE, MVT::v64i8, MVT::v16i32, 2 }, // vpmovdb 1799 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdw 1800 { ISD::TRUNCATE, MVT::v32i16, MVT::v16i32, 2 }, // vpmovdw 1801 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb 1802 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb 1803 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb 1804 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i64, 2 }, // vpmovqb 1805 { ISD::TRUNCATE, MVT::v32i8, MVT::v8i64, 2 }, // vpmovqb 1806 { ISD::TRUNCATE, MVT::v64i8, MVT::v8i64, 2 }, // vpmovqb 1807 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw 1808 { ISD::TRUNCATE, MVT::v16i16, MVT::v8i64, 2 }, // vpmovqw 1809 { ISD::TRUNCATE, MVT::v32i16, MVT::v8i64, 2 }, // vpmovqw 1810 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd 1811 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd 1812 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb 1813 1814 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 1815 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, 1816 { ISD::TRUNCATE, MVT::v64i8, MVT::v32i16, 8 }, 1817 1818 // Sign extend is zmm vpternlogd+vptruncdb. 1819 // Zero extend is zmm broadcast load+vptruncdw. 1820 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, 1821 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, 1822 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, 1823 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, 1824 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, 1825 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, 1826 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, 1827 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, 1828 1829 // Sign extend is zmm vpternlogd+vptruncdw. 1830 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. 1831 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, 1832 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1833 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, 1834 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1835 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, 1836 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1837 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, 1838 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1839 1840 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd 1841 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld 1842 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd 1843 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld 1844 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd 1845 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld 1846 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq 1847 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq 1848 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq 1849 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq 1850 1851 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd 1852 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld 1853 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq 1854 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq 1855 1856 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1857 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1858 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1859 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1860 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1861 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1862 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1863 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1864 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1865 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1866 1867 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1868 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1869 1870 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1871 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1872 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1873 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1874 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1875 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1876 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1877 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1878 1879 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1880 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1881 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1882 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1883 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1884 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1885 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1886 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1887 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1888 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1889 1890 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 1891 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 }, 1892 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 }, 1893 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 }, 1894 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 }, 1895 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, 1896 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 }, 1897 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 }, 1898 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 }, 1899 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 }, 1900 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 }, 1901 1902 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1903 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, 1904 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, 1905 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1906 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, 1907 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, 1908 }; 1909 1910 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { 1911 // Mask sign extend has an instruction. 1912 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1913 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 }, 1914 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1915 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 }, 1916 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1917 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 }, 1918 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1919 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 }, 1920 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1921 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 }, 1922 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1923 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1924 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1925 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1926 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v32i1, 1 }, 1927 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v64i1, 1 }, 1928 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v64i1, 1 }, 1929 1930 // Mask zero extend is a sext + shift. 1931 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1932 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 }, 1933 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1934 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 }, 1935 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1936 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 }, 1937 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1938 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 }, 1939 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1940 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 }, 1941 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1942 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1943 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1944 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1945 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v32i1, 2 }, 1946 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v64i1, 2 }, 1947 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v64i1, 2 }, 1948 1949 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, 1950 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 }, 1951 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, 1952 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 }, 1953 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, 1954 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 }, 1955 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, 1956 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 }, 1957 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, 1958 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 }, 1959 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, 1960 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, 1961 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, 1962 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, 1963 { ISD::TRUNCATE, MVT::v32i1, MVT::v16i16, 2 }, 1964 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i8, 2 }, 1965 { ISD::TRUNCATE, MVT::v64i1, MVT::v16i16, 2 }, 1966 1967 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, 1968 }; 1969 1970 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { 1971 // Mask sign extend has an instruction. 1972 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, 1973 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 }, 1974 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, 1975 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i1, 1 }, 1976 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, 1977 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i1, 1 }, 1978 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 }, 1979 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, 1980 1981 // Mask zero extend is a sext + shift. 1982 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, 1983 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 }, 1984 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, 1985 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i1, 2 }, 1986 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, 1987 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i1, 2 }, 1988 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 }, 1989 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, 1990 1991 { ISD::TRUNCATE, MVT::v16i1, MVT::v4i64, 2 }, 1992 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 }, 1993 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, 1994 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 }, 1995 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, 1996 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, 1997 { ISD::TRUNCATE, MVT::v8i1, MVT::v4i64, 2 }, 1998 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1999 2000 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 2001 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 2002 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 2003 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 2004 2005 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 2006 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 2007 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 2008 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 2009 2010 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 }, 2011 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 2012 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 2013 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 2014 2015 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 }, 2016 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 2017 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 2018 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 2019 }; 2020 2021 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { 2022 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 2023 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 2024 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 2025 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 2026 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 2027 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 2028 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 2029 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 2030 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd 2031 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd 2032 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd 2033 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq 2034 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq 2035 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd 2036 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb 2037 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw 2038 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb 2039 2040 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb 2041 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb 2042 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, 2043 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, 2044 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, 2045 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, 2046 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, 2047 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, 2048 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, 2049 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, 2050 2051 // sign extend is vpcmpeq+maskedmove+vpmovdw 2052 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw 2053 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 2054 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, 2055 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 2056 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, 2057 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 2058 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, 2059 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, 2060 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, 2061 2062 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd 2063 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld 2064 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd 2065 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld 2066 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd 2067 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld 2068 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq 2069 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq 2070 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq 2071 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq 2072 2073 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 2074 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 2075 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 2076 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 2077 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 2078 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 2079 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 2080 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 2081 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 2082 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 2083 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 2084 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 2085 2086 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2087 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 2088 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2089 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 2090 2091 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, 2092 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 2093 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2094 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 2095 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2096 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 2097 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 2098 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2099 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 2100 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 2101 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 2102 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 2103 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 2104 2105 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 2106 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 2107 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 }, 2108 2109 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, 2110 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, 2111 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 2112 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 }, 2113 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 2114 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 2115 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 2116 }; 2117 2118 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 2119 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 2120 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 2121 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 2122 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 2123 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 2124 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 2125 2126 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 2127 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 2128 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 2129 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 2130 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 2131 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 2132 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 2133 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 2134 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 2135 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 2136 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 2137 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 2138 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 2139 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 2140 2141 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 2142 2143 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 4 }, 2144 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4 }, 2145 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 }, 2146 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 }, 2147 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 }, 2148 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 }, 2149 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 }, 2150 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 }, 2151 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 }, 2152 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 }, 2153 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 2154 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 2155 2156 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 2157 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 2158 2159 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 }, 2160 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 }, 2161 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 }, 2162 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 }, 2163 2164 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 }, 2165 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 }, 2166 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 }, 2167 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 2168 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2169 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 }, 2170 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 }, 2171 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 }, 2172 2173 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 2174 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 2175 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 2176 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 2177 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 2178 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 2179 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 }, 2180 2181 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 2182 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 2183 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 2184 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 2185 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 2186 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 2187 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 }, 2188 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2189 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 2190 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 2191 }; 2192 2193 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 2194 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 2195 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 2196 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 2197 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 2198 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 2199 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 2200 2201 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 2202 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 2203 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 2204 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 2205 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 2206 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 2207 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 2208 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 2209 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 2210 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 2211 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 2212 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 2213 2214 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, 2215 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, 2216 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, 2217 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, 2218 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, 2219 2220 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 2221 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 2222 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb 2223 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 }, 2224 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2225 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 }, 2226 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw 2227 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 2228 2229 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 2230 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 2231 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 2232 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 2233 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 2234 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2235 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 2236 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2237 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 2238 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 2239 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 }, 2240 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 }, 2241 2242 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 2243 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 2244 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 2245 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 2246 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 2247 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2248 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 2249 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 }, 2250 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 }, 2251 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2252 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 2253 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 2254 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 }, 2255 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 }, 2256 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 }, 2257 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 2258 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 }, 2259 2260 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 2261 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 }, 2262 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 }, 2263 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 }, 2264 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 }, 2265 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 }, 2266 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 }, 2267 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 }, 2268 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 }, 2269 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 }, 2270 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 }, 2271 2272 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 }, 2273 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 }, 2274 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 }, 2275 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 }, 2276 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 }, 2277 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 }, 2278 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 }, 2279 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 }, 2280 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 2281 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2282 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 }, 2283 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 }, 2284 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 }, 2285 2286 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 2287 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 2288 }; 2289 2290 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 2291 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2292 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2293 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2294 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2295 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2296 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2297 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2298 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2299 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2300 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2301 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2302 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2303 2304 // These truncates end up widening elements. 2305 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ 2306 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ 2307 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD 2308 2309 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 }, 2310 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 }, 2311 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 }, 2312 2313 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2314 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2315 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 }, 2316 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 }, 2317 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2318 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2319 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2320 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2321 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2322 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 }, 2323 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2324 2325 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2326 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2327 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, 2328 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 2329 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2330 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2331 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2332 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2333 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 }, 2334 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2335 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 }, 2336 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 }, 2337 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 }, 2338 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 }, 2339 2340 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 }, 2341 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 }, 2342 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 }, 2343 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 }, 2344 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 }, 2345 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 }, 2346 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 }, 2347 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 }, 2348 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 2349 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 }, 2350 2351 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 }, 2352 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2353 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 }, 2354 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, 2355 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 }, 2356 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 }, 2357 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 }, 2358 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 }, 2359 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 }, 2360 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2361 }; 2362 2363 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 2364 // These are somewhat magic numbers justified by comparing the 2365 // output of llvm-mca for our various supported scheduler models 2366 // and basing it off the worst case scenario. 2367 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2368 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2369 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 }, 2370 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 }, 2371 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 }, 2372 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2373 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 }, 2374 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2375 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2376 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 }, 2377 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 }, 2378 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 }, 2379 2380 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2381 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2382 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 }, 2383 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 }, 2384 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2385 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 }, 2386 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 }, 2387 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2388 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 }, 2389 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 }, 2390 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2391 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 }, 2392 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 }, 2393 2394 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 }, 2395 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 }, 2396 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 }, 2397 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 }, 2398 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 }, 2399 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 }, 2400 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 }, 2401 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 }, 2402 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 }, 2403 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 }, 2404 2405 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 }, 2406 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2407 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 }, 2408 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 }, 2409 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 }, 2410 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 }, 2411 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 }, 2412 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 }, 2413 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 }, 2414 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 }, 2415 2416 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2417 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2418 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 }, 2419 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 }, 2420 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2421 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 }, 2422 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 }, 2423 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 }, 2424 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2425 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 }, 2426 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2427 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 }, 2428 2429 // These truncates are really widening elements. 2430 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD 2431 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ 2432 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD 2433 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD 2434 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD 2435 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW 2436 2437 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB 2438 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 2439 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB 2440 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 2441 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, 2442 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 }, 2443 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2444 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 }, 2445 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB 2446 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW 2447 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD 2448 }; 2449 2450 // Attempt to map directly to (simple) MVT types to let us match custom entries. 2451 EVT SrcTy = TLI->getValueType(DL, Src); 2452 EVT DstTy = TLI->getValueType(DL, Dst); 2453 2454 // The function getSimpleVT only handles simple value types. 2455 if (SrcTy.isSimple() && DstTy.isSimple()) { 2456 MVT SimpleSrcTy = SrcTy.getSimpleVT(); 2457 MVT SimpleDstTy = DstTy.getSimpleVT(); 2458 2459 if (ST->useAVX512Regs()) { 2460 if (ST->hasBWI()) 2461 if (const auto *Entry = ConvertCostTableLookup( 2462 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2463 return AdjustCost(Entry->Cost); 2464 2465 if (ST->hasDQI()) 2466 if (const auto *Entry = ConvertCostTableLookup( 2467 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2468 return AdjustCost(Entry->Cost); 2469 2470 if (ST->hasAVX512()) 2471 if (const auto *Entry = ConvertCostTableLookup( 2472 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2473 return AdjustCost(Entry->Cost); 2474 } 2475 2476 if (ST->hasBWI()) 2477 if (const auto *Entry = ConvertCostTableLookup( 2478 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2479 return AdjustCost(Entry->Cost); 2480 2481 if (ST->hasDQI()) 2482 if (const auto *Entry = ConvertCostTableLookup( 2483 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2484 return AdjustCost(Entry->Cost); 2485 2486 if (ST->hasAVX512()) 2487 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2488 SimpleDstTy, SimpleSrcTy)) 2489 return AdjustCost(Entry->Cost); 2490 2491 if (ST->hasAVX2()) { 2492 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2493 SimpleDstTy, SimpleSrcTy)) 2494 return AdjustCost(Entry->Cost); 2495 } 2496 2497 if (ST->hasAVX()) { 2498 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2499 SimpleDstTy, SimpleSrcTy)) 2500 return AdjustCost(Entry->Cost); 2501 } 2502 2503 if (ST->hasSSE41()) { 2504 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2505 SimpleDstTy, SimpleSrcTy)) 2506 return AdjustCost(Entry->Cost); 2507 } 2508 2509 if (ST->hasSSE2()) { 2510 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2511 SimpleDstTy, SimpleSrcTy)) 2512 return AdjustCost(Entry->Cost); 2513 } 2514 } 2515 2516 // Fall back to legalized types. 2517 std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 2518 std::pair<InstructionCost, MVT> LTDest = 2519 TLI->getTypeLegalizationCost(DL, Dst); 2520 2521 // If we're truncating to the same legalized type - just assume its free. 2522 if (ISD == ISD::TRUNCATE && LTSrc.second == LTDest.second) 2523 return TTI::TCC_Free; 2524 2525 if (ST->useAVX512Regs()) { 2526 if (ST->hasBWI()) 2527 if (const auto *Entry = ConvertCostTableLookup( 2528 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second)) 2529 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2530 2531 if (ST->hasDQI()) 2532 if (const auto *Entry = ConvertCostTableLookup( 2533 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second)) 2534 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2535 2536 if (ST->hasAVX512()) 2537 if (const auto *Entry = ConvertCostTableLookup( 2538 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second)) 2539 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2540 } 2541 2542 if (ST->hasBWI()) 2543 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, 2544 LTDest.second, LTSrc.second)) 2545 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2546 2547 if (ST->hasDQI()) 2548 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, 2549 LTDest.second, LTSrc.second)) 2550 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2551 2552 if (ST->hasAVX512()) 2553 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2554 LTDest.second, LTSrc.second)) 2555 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2556 2557 if (ST->hasAVX2()) 2558 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2559 LTDest.second, LTSrc.second)) 2560 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2561 2562 if (ST->hasAVX()) 2563 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2564 LTDest.second, LTSrc.second)) 2565 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2566 2567 if (ST->hasSSE41()) 2568 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2569 LTDest.second, LTSrc.second)) 2570 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2571 2572 if (ST->hasSSE2()) 2573 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2574 LTDest.second, LTSrc.second)) 2575 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2576 2577 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for 2578 // sitofp. 2579 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) && 2580 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) { 2581 Type *ExtSrc = Src->getWithNewBitWidth(32); 2582 unsigned ExtOpc = 2583 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt; 2584 2585 // For scalar loads the extend would be free. 2586 InstructionCost ExtCost = 0; 2587 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0)))) 2588 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind); 2589 2590 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc, 2591 TTI::CastContextHint::None, CostKind); 2592 } 2593 2594 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi 2595 // i32. 2596 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) && 2597 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) { 2598 Type *TruncDst = Dst->getWithNewBitWidth(32); 2599 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) + 2600 getCastInstrCost(Instruction::Trunc, Dst, TruncDst, 2601 TTI::CastContextHint::None, CostKind); 2602 } 2603 2604 return AdjustCost( 2605 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2606 } 2607 2608 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 2609 Type *CondTy, 2610 CmpInst::Predicate VecPred, 2611 TTI::TargetCostKind CostKind, 2612 const Instruction *I) { 2613 // TODO: Handle other cost kinds. 2614 if (CostKind != TTI::TCK_RecipThroughput) 2615 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2616 I); 2617 2618 // Legalize the type. 2619 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2620 2621 MVT MTy = LT.second; 2622 2623 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2624 assert(ISD && "Invalid opcode"); 2625 2626 unsigned ExtraCost = 0; 2627 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) { 2628 // Some vector comparison predicates cost extra instructions. 2629 // TODO: Should we invert this and assume worst case cmp costs 2630 // and reduce for particular predicates? 2631 if (MTy.isVector() && 2632 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || 2633 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || 2634 ST->hasBWI())) { 2635 // Fallback to I if a specific predicate wasn't specified. 2636 CmpInst::Predicate Pred = VecPred; 2637 if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE || 2638 Pred == CmpInst::BAD_FCMP_PREDICATE)) 2639 Pred = cast<CmpInst>(I)->getPredicate(); 2640 2641 switch (Pred) { 2642 case CmpInst::Predicate::ICMP_NE: 2643 // xor(cmpeq(x,y),-1) 2644 ExtraCost = 1; 2645 break; 2646 case CmpInst::Predicate::ICMP_SGE: 2647 case CmpInst::Predicate::ICMP_SLE: 2648 // xor(cmpgt(x,y),-1) 2649 ExtraCost = 1; 2650 break; 2651 case CmpInst::Predicate::ICMP_ULT: 2652 case CmpInst::Predicate::ICMP_UGT: 2653 // cmpgt(xor(x,signbit),xor(y,signbit)) 2654 // xor(cmpeq(pmaxu(x,y),x),-1) 2655 ExtraCost = 2; 2656 break; 2657 case CmpInst::Predicate::ICMP_ULE: 2658 case CmpInst::Predicate::ICMP_UGE: 2659 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || 2660 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { 2661 // cmpeq(psubus(x,y),0) 2662 // cmpeq(pminu(x,y),x) 2663 ExtraCost = 1; 2664 } else { 2665 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) 2666 ExtraCost = 3; 2667 } 2668 break; 2669 case CmpInst::Predicate::BAD_ICMP_PREDICATE: 2670 case CmpInst::Predicate::BAD_FCMP_PREDICATE: 2671 // Assume worst case scenario and add the maximum extra cost. 2672 ExtraCost = 3; 2673 break; 2674 default: 2675 break; 2676 } 2677 } 2678 } 2679 2680 static const CostTblEntry SLMCostTbl[] = { 2681 // slm pcmpeq/pcmpgt throughput is 2 2682 { ISD::SETCC, MVT::v2i64, 2 }, 2683 }; 2684 2685 static const CostTblEntry AVX512BWCostTbl[] = { 2686 { ISD::SETCC, MVT::v32i16, 1 }, 2687 { ISD::SETCC, MVT::v64i8, 1 }, 2688 2689 { ISD::SELECT, MVT::v32i16, 1 }, 2690 { ISD::SELECT, MVT::v64i8, 1 }, 2691 }; 2692 2693 static const CostTblEntry AVX512CostTbl[] = { 2694 { ISD::SETCC, MVT::v8i64, 1 }, 2695 { ISD::SETCC, MVT::v16i32, 1 }, 2696 { ISD::SETCC, MVT::v8f64, 1 }, 2697 { ISD::SETCC, MVT::v16f32, 1 }, 2698 2699 { ISD::SELECT, MVT::v8i64, 1 }, 2700 { ISD::SELECT, MVT::v4i64, 1 }, 2701 { ISD::SELECT, MVT::v2i64, 1 }, 2702 { ISD::SELECT, MVT::v16i32, 1 }, 2703 { ISD::SELECT, MVT::v8i32, 1 }, 2704 { ISD::SELECT, MVT::v4i32, 1 }, 2705 { ISD::SELECT, MVT::v8f64, 1 }, 2706 { ISD::SELECT, MVT::v4f64, 1 }, 2707 { ISD::SELECT, MVT::v2f64, 1 }, 2708 { ISD::SELECT, MVT::f64, 1 }, 2709 { ISD::SELECT, MVT::v16f32, 1 }, 2710 { ISD::SELECT, MVT::v8f32 , 1 }, 2711 { ISD::SELECT, MVT::v4f32, 1 }, 2712 { ISD::SELECT, MVT::f32 , 1 }, 2713 2714 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 2715 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 2716 2717 { ISD::SELECT, MVT::v32i16, 2 }, 2718 { ISD::SELECT, MVT::v16i16, 1 }, 2719 { ISD::SELECT, MVT::v8i16, 1 }, 2720 { ISD::SELECT, MVT::v64i8, 2 }, 2721 { ISD::SELECT, MVT::v32i8, 1 }, 2722 { ISD::SELECT, MVT::v16i8, 1 }, 2723 }; 2724 2725 static const CostTblEntry AVX2CostTbl[] = { 2726 { ISD::SETCC, MVT::v4i64, 1 }, 2727 { ISD::SETCC, MVT::v8i32, 1 }, 2728 { ISD::SETCC, MVT::v16i16, 1 }, 2729 { ISD::SETCC, MVT::v32i8, 1 }, 2730 2731 { ISD::SELECT, MVT::v4f64, 2 }, // vblendvpd 2732 { ISD::SELECT, MVT::v8f32, 2 }, // vblendvps 2733 { ISD::SELECT, MVT::v4i64, 2 }, // pblendvb 2734 { ISD::SELECT, MVT::v8i32, 2 }, // pblendvb 2735 { ISD::SELECT, MVT::v16i16, 2 }, // pblendvb 2736 { ISD::SELECT, MVT::v32i8, 2 }, // pblendvb 2737 }; 2738 2739 static const CostTblEntry AVX1CostTbl[] = { 2740 { ISD::SETCC, MVT::v4f64, 1 }, 2741 { ISD::SETCC, MVT::v8f32, 1 }, 2742 // AVX1 does not support 8-wide integer compare. 2743 { ISD::SETCC, MVT::v4i64, 4 }, 2744 { ISD::SETCC, MVT::v8i32, 4 }, 2745 { ISD::SETCC, MVT::v16i16, 4 }, 2746 { ISD::SETCC, MVT::v32i8, 4 }, 2747 2748 { ISD::SELECT, MVT::v4f64, 3 }, // vblendvpd 2749 { ISD::SELECT, MVT::v8f32, 3 }, // vblendvps 2750 { ISD::SELECT, MVT::v4i64, 3 }, // vblendvpd 2751 { ISD::SELECT, MVT::v8i32, 3 }, // vblendvps 2752 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps 2753 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps 2754 }; 2755 2756 static const CostTblEntry SSE42CostTbl[] = { 2757 { ISD::SETCC, MVT::v2i64, 1 }, 2758 }; 2759 2760 static const CostTblEntry SSE41CostTbl[] = { 2761 { ISD::SETCC, MVT::v2f64, 1 }, 2762 { ISD::SETCC, MVT::v4f32, 1 }, 2763 2764 { ISD::SELECT, MVT::v2f64, 2 }, // blendvpd 2765 { ISD::SELECT, MVT::f64, 2 }, // blendvpd 2766 { ISD::SELECT, MVT::v4f32, 2 }, // blendvps 2767 { ISD::SELECT, MVT::f32 , 2 }, // blendvps 2768 { ISD::SELECT, MVT::v2i64, 2 }, // pblendvb 2769 { ISD::SELECT, MVT::v4i32, 2 }, // pblendvb 2770 { ISD::SELECT, MVT::v8i16, 2 }, // pblendvb 2771 { ISD::SELECT, MVT::v16i8, 2 }, // pblendvb 2772 }; 2773 2774 static const CostTblEntry SSE2CostTbl[] = { 2775 { ISD::SETCC, MVT::v2f64, 2 }, 2776 { ISD::SETCC, MVT::f64, 1 }, 2777 { ISD::SETCC, MVT::v2i64, 5 }, // pcmpeqd/pcmpgtd expansion 2778 { ISD::SETCC, MVT::v4i32, 1 }, 2779 { ISD::SETCC, MVT::v8i16, 1 }, 2780 { ISD::SETCC, MVT::v16i8, 1 }, 2781 2782 { ISD::SELECT, MVT::v2f64, 2 }, // andpd + andnpd + orpd 2783 { ISD::SELECT, MVT::f64, 2 }, // andpd + andnpd + orpd 2784 { ISD::SELECT, MVT::v2i64, 2 }, // pand + pandn + por 2785 { ISD::SELECT, MVT::v4i32, 2 }, // pand + pandn + por 2786 { ISD::SELECT, MVT::v8i16, 2 }, // pand + pandn + por 2787 { ISD::SELECT, MVT::v16i8, 2 }, // pand + pandn + por 2788 }; 2789 2790 static const CostTblEntry SSE1CostTbl[] = { 2791 { ISD::SETCC, MVT::v4f32, 2 }, 2792 { ISD::SETCC, MVT::f32, 1 }, 2793 2794 { ISD::SELECT, MVT::v4f32, 2 }, // andps + andnps + orps 2795 { ISD::SELECT, MVT::f32, 2 }, // andps + andnps + orps 2796 }; 2797 2798 if (ST->useSLMArithCosts()) 2799 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2800 return LT.first * (ExtraCost + Entry->Cost); 2801 2802 if (ST->hasBWI()) 2803 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2804 return LT.first * (ExtraCost + Entry->Cost); 2805 2806 if (ST->hasAVX512()) 2807 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2808 return LT.first * (ExtraCost + Entry->Cost); 2809 2810 if (ST->hasAVX2()) 2811 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2812 return LT.first * (ExtraCost + Entry->Cost); 2813 2814 if (ST->hasAVX()) 2815 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2816 return LT.first * (ExtraCost + Entry->Cost); 2817 2818 if (ST->hasSSE42()) 2819 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2820 return LT.first * (ExtraCost + Entry->Cost); 2821 2822 if (ST->hasSSE41()) 2823 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2824 return LT.first * (ExtraCost + Entry->Cost); 2825 2826 if (ST->hasSSE2()) 2827 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2828 return LT.first * (ExtraCost + Entry->Cost); 2829 2830 if (ST->hasSSE1()) 2831 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2832 return LT.first * (ExtraCost + Entry->Cost); 2833 2834 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 2835 } 2836 2837 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 2838 2839 InstructionCost 2840 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2841 TTI::TargetCostKind CostKind) { 2842 2843 // Costs should match the codegen from: 2844 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 2845 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 2846 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 2847 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 2848 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 2849 2850 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not 2851 // specialized in these tables yet. 2852 static const CostTblEntry AVX512BITALGCostTbl[] = { 2853 { ISD::CTPOP, MVT::v32i16, 1 }, 2854 { ISD::CTPOP, MVT::v64i8, 1 }, 2855 { ISD::CTPOP, MVT::v16i16, 1 }, 2856 { ISD::CTPOP, MVT::v32i8, 1 }, 2857 { ISD::CTPOP, MVT::v8i16, 1 }, 2858 { ISD::CTPOP, MVT::v16i8, 1 }, 2859 }; 2860 static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = { 2861 { ISD::CTPOP, MVT::v8i64, 1 }, 2862 { ISD::CTPOP, MVT::v16i32, 1 }, 2863 { ISD::CTPOP, MVT::v4i64, 1 }, 2864 { ISD::CTPOP, MVT::v8i32, 1 }, 2865 { ISD::CTPOP, MVT::v2i64, 1 }, 2866 { ISD::CTPOP, MVT::v4i32, 1 }, 2867 }; 2868 static const CostTblEntry AVX512CDCostTbl[] = { 2869 { ISD::CTLZ, MVT::v8i64, 1 }, 2870 { ISD::CTLZ, MVT::v16i32, 1 }, 2871 { ISD::CTLZ, MVT::v32i16, 8 }, 2872 { ISD::CTLZ, MVT::v64i8, 20 }, 2873 { ISD::CTLZ, MVT::v4i64, 1 }, 2874 { ISD::CTLZ, MVT::v8i32, 1 }, 2875 { ISD::CTLZ, MVT::v16i16, 4 }, 2876 { ISD::CTLZ, MVT::v32i8, 10 }, 2877 { ISD::CTLZ, MVT::v2i64, 1 }, 2878 { ISD::CTLZ, MVT::v4i32, 1 }, 2879 { ISD::CTLZ, MVT::v8i16, 4 }, 2880 { ISD::CTLZ, MVT::v16i8, 4 }, 2881 }; 2882 static const CostTblEntry AVX512BWCostTbl[] = { 2883 { ISD::ABS, MVT::v32i16, 1 }, 2884 { ISD::ABS, MVT::v64i8, 1 }, 2885 { ISD::BITREVERSE, MVT::v8i64, 3 }, 2886 { ISD::BITREVERSE, MVT::v16i32, 3 }, 2887 { ISD::BITREVERSE, MVT::v32i16, 3 }, 2888 { ISD::BITREVERSE, MVT::v64i8, 2 }, 2889 { ISD::BSWAP, MVT::v8i64, 1 }, 2890 { ISD::BSWAP, MVT::v16i32, 1 }, 2891 { ISD::BSWAP, MVT::v32i16, 1 }, 2892 { ISD::CTLZ, MVT::v8i64, 23 }, 2893 { ISD::CTLZ, MVT::v16i32, 22 }, 2894 { ISD::CTLZ, MVT::v32i16, 18 }, 2895 { ISD::CTLZ, MVT::v64i8, 17 }, 2896 { ISD::CTPOP, MVT::v8i64, 7 }, 2897 { ISD::CTPOP, MVT::v16i32, 11 }, 2898 { ISD::CTPOP, MVT::v32i16, 9 }, 2899 { ISD::CTPOP, MVT::v64i8, 6 }, 2900 { ISD::CTTZ, MVT::v8i64, 10 }, 2901 { ISD::CTTZ, MVT::v16i32, 14 }, 2902 { ISD::CTTZ, MVT::v32i16, 12 }, 2903 { ISD::CTTZ, MVT::v64i8, 9 }, 2904 { ISD::SADDSAT, MVT::v32i16, 1 }, 2905 { ISD::SADDSAT, MVT::v64i8, 1 }, 2906 { ISD::SMAX, MVT::v32i16, 1 }, 2907 { ISD::SMAX, MVT::v64i8, 1 }, 2908 { ISD::SMIN, MVT::v32i16, 1 }, 2909 { ISD::SMIN, MVT::v64i8, 1 }, 2910 { ISD::SSUBSAT, MVT::v32i16, 1 }, 2911 { ISD::SSUBSAT, MVT::v64i8, 1 }, 2912 { ISD::UADDSAT, MVT::v32i16, 1 }, 2913 { ISD::UADDSAT, MVT::v64i8, 1 }, 2914 { ISD::UMAX, MVT::v32i16, 1 }, 2915 { ISD::UMAX, MVT::v64i8, 1 }, 2916 { ISD::UMIN, MVT::v32i16, 1 }, 2917 { ISD::UMIN, MVT::v64i8, 1 }, 2918 { ISD::USUBSAT, MVT::v32i16, 1 }, 2919 { ISD::USUBSAT, MVT::v64i8, 1 }, 2920 }; 2921 static const CostTblEntry AVX512CostTbl[] = { 2922 { ISD::ABS, MVT::v8i64, 1 }, 2923 { ISD::ABS, MVT::v16i32, 1 }, 2924 { ISD::ABS, MVT::v32i16, 2 }, 2925 { ISD::ABS, MVT::v64i8, 2 }, 2926 { ISD::ABS, MVT::v4i64, 1 }, 2927 { ISD::ABS, MVT::v2i64, 1 }, 2928 { ISD::BITREVERSE, MVT::v8i64, 36 }, 2929 { ISD::BITREVERSE, MVT::v16i32, 24 }, 2930 { ISD::BITREVERSE, MVT::v32i16, 10 }, 2931 { ISD::BITREVERSE, MVT::v64i8, 10 }, 2932 { ISD::BSWAP, MVT::v8i64, 4 }, 2933 { ISD::BSWAP, MVT::v16i32, 4 }, 2934 { ISD::BSWAP, MVT::v32i16, 4 }, 2935 { ISD::CTLZ, MVT::v8i64, 29 }, 2936 { ISD::CTLZ, MVT::v16i32, 35 }, 2937 { ISD::CTLZ, MVT::v32i16, 28 }, 2938 { ISD::CTLZ, MVT::v64i8, 18 }, 2939 { ISD::CTPOP, MVT::v8i64, 16 }, 2940 { ISD::CTPOP, MVT::v16i32, 24 }, 2941 { ISD::CTPOP, MVT::v32i16, 18 }, 2942 { ISD::CTPOP, MVT::v64i8, 12 }, 2943 { ISD::CTTZ, MVT::v8i64, 20 }, 2944 { ISD::CTTZ, MVT::v16i32, 28 }, 2945 { ISD::CTTZ, MVT::v32i16, 24 }, 2946 { ISD::CTTZ, MVT::v64i8, 18 }, 2947 { ISD::SMAX, MVT::v8i64, 1 }, 2948 { ISD::SMAX, MVT::v16i32, 1 }, 2949 { ISD::SMAX, MVT::v32i16, 2 }, 2950 { ISD::SMAX, MVT::v64i8, 2 }, 2951 { ISD::SMAX, MVT::v4i64, 1 }, 2952 { ISD::SMAX, MVT::v2i64, 1 }, 2953 { ISD::SMIN, MVT::v8i64, 1 }, 2954 { ISD::SMIN, MVT::v16i32, 1 }, 2955 { ISD::SMIN, MVT::v32i16, 2 }, 2956 { ISD::SMIN, MVT::v64i8, 2 }, 2957 { ISD::SMIN, MVT::v4i64, 1 }, 2958 { ISD::SMIN, MVT::v2i64, 1 }, 2959 { ISD::UMAX, MVT::v8i64, 1 }, 2960 { ISD::UMAX, MVT::v16i32, 1 }, 2961 { ISD::UMAX, MVT::v32i16, 2 }, 2962 { ISD::UMAX, MVT::v64i8, 2 }, 2963 { ISD::UMAX, MVT::v4i64, 1 }, 2964 { ISD::UMAX, MVT::v2i64, 1 }, 2965 { ISD::UMIN, MVT::v8i64, 1 }, 2966 { ISD::UMIN, MVT::v16i32, 1 }, 2967 { ISD::UMIN, MVT::v32i16, 2 }, 2968 { ISD::UMIN, MVT::v64i8, 2 }, 2969 { ISD::UMIN, MVT::v4i64, 1 }, 2970 { ISD::UMIN, MVT::v2i64, 1 }, 2971 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd 2972 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq 2973 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq 2974 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq 2975 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd 2976 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq 2977 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq 2978 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq 2979 { ISD::SADDSAT, MVT::v32i16, 2 }, 2980 { ISD::SADDSAT, MVT::v64i8, 2 }, 2981 { ISD::SSUBSAT, MVT::v32i16, 2 }, 2982 { ISD::SSUBSAT, MVT::v64i8, 2 }, 2983 { ISD::UADDSAT, MVT::v32i16, 2 }, 2984 { ISD::UADDSAT, MVT::v64i8, 2 }, 2985 { ISD::USUBSAT, MVT::v32i16, 2 }, 2986 { ISD::USUBSAT, MVT::v64i8, 2 }, 2987 { ISD::FMAXNUM, MVT::f32, 2 }, 2988 { ISD::FMAXNUM, MVT::v4f32, 2 }, 2989 { ISD::FMAXNUM, MVT::v8f32, 2 }, 2990 { ISD::FMAXNUM, MVT::v16f32, 2 }, 2991 { ISD::FMAXNUM, MVT::f64, 2 }, 2992 { ISD::FMAXNUM, MVT::v2f64, 2 }, 2993 { ISD::FMAXNUM, MVT::v4f64, 2 }, 2994 { ISD::FMAXNUM, MVT::v8f64, 2 }, 2995 }; 2996 static const CostTblEntry XOPCostTbl[] = { 2997 { ISD::BITREVERSE, MVT::v4i64, 4 }, 2998 { ISD::BITREVERSE, MVT::v8i32, 4 }, 2999 { ISD::BITREVERSE, MVT::v16i16, 4 }, 3000 { ISD::BITREVERSE, MVT::v32i8, 4 }, 3001 { ISD::BITREVERSE, MVT::v2i64, 1 }, 3002 { ISD::BITREVERSE, MVT::v4i32, 1 }, 3003 { ISD::BITREVERSE, MVT::v8i16, 1 }, 3004 { ISD::BITREVERSE, MVT::v16i8, 1 }, 3005 { ISD::BITREVERSE, MVT::i64, 3 }, 3006 { ISD::BITREVERSE, MVT::i32, 3 }, 3007 { ISD::BITREVERSE, MVT::i16, 3 }, 3008 { ISD::BITREVERSE, MVT::i8, 3 } 3009 }; 3010 static const CostTblEntry AVX2CostTbl[] = { 3011 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 3012 { ISD::ABS, MVT::v8i32, 1 }, 3013 { ISD::ABS, MVT::v16i16, 1 }, 3014 { ISD::ABS, MVT::v32i8, 1 }, 3015 { ISD::BITREVERSE, MVT::v2i64, 3 }, 3016 { ISD::BITREVERSE, MVT::v4i64, 3 }, 3017 { ISD::BITREVERSE, MVT::v4i32, 3 }, 3018 { ISD::BITREVERSE, MVT::v8i32, 3 }, 3019 { ISD::BITREVERSE, MVT::v8i16, 3 }, 3020 { ISD::BITREVERSE, MVT::v16i16, 3 }, 3021 { ISD::BITREVERSE, MVT::v16i8, 3 }, 3022 { ISD::BITREVERSE, MVT::v32i8, 3 }, 3023 { ISD::BSWAP, MVT::v4i64, 1 }, 3024 { ISD::BSWAP, MVT::v8i32, 1 }, 3025 { ISD::BSWAP, MVT::v16i16, 1 }, 3026 { ISD::CTLZ, MVT::v2i64, 7 }, 3027 { ISD::CTLZ, MVT::v4i64, 7 }, 3028 { ISD::CTLZ, MVT::v4i32, 5 }, 3029 { ISD::CTLZ, MVT::v8i32, 5 }, 3030 { ISD::CTLZ, MVT::v8i16, 4 }, 3031 { ISD::CTLZ, MVT::v16i16, 4 }, 3032 { ISD::CTLZ, MVT::v16i8, 3 }, 3033 { ISD::CTLZ, MVT::v32i8, 3 }, 3034 { ISD::CTPOP, MVT::v2i64, 3 }, 3035 { ISD::CTPOP, MVT::v4i64, 3 }, 3036 { ISD::CTPOP, MVT::v4i32, 7 }, 3037 { ISD::CTPOP, MVT::v8i32, 7 }, 3038 { ISD::CTPOP, MVT::v8i16, 3 }, 3039 { ISD::CTPOP, MVT::v16i16, 3 }, 3040 { ISD::CTPOP, MVT::v16i8, 2 }, 3041 { ISD::CTPOP, MVT::v32i8, 2 }, 3042 { ISD::CTTZ, MVT::v2i64, 4 }, 3043 { ISD::CTTZ, MVT::v4i64, 4 }, 3044 { ISD::CTTZ, MVT::v4i32, 7 }, 3045 { ISD::CTTZ, MVT::v8i32, 7 }, 3046 { ISD::CTTZ, MVT::v8i16, 4 }, 3047 { ISD::CTTZ, MVT::v16i16, 4 }, 3048 { ISD::CTTZ, MVT::v16i8, 3 }, 3049 { ISD::CTTZ, MVT::v32i8, 3 }, 3050 { ISD::SADDSAT, MVT::v16i16, 1 }, 3051 { ISD::SADDSAT, MVT::v32i8, 1 }, 3052 { ISD::SMAX, MVT::v8i32, 1 }, 3053 { ISD::SMAX, MVT::v16i16, 1 }, 3054 { ISD::SMAX, MVT::v32i8, 1 }, 3055 { ISD::SMIN, MVT::v8i32, 1 }, 3056 { ISD::SMIN, MVT::v16i16, 1 }, 3057 { ISD::SMIN, MVT::v32i8, 1 }, 3058 { ISD::SSUBSAT, MVT::v16i16, 1 }, 3059 { ISD::SSUBSAT, MVT::v32i8, 1 }, 3060 { ISD::UADDSAT, MVT::v16i16, 1 }, 3061 { ISD::UADDSAT, MVT::v32i8, 1 }, 3062 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd 3063 { ISD::UMAX, MVT::v8i32, 1 }, 3064 { ISD::UMAX, MVT::v16i16, 1 }, 3065 { ISD::UMAX, MVT::v32i8, 1 }, 3066 { ISD::UMIN, MVT::v8i32, 1 }, 3067 { ISD::UMIN, MVT::v16i16, 1 }, 3068 { ISD::UMIN, MVT::v32i8, 1 }, 3069 { ISD::USUBSAT, MVT::v16i16, 1 }, 3070 { ISD::USUBSAT, MVT::v32i8, 1 }, 3071 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd 3072 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 3073 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 3074 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 3075 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 3076 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 3077 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 3078 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 3079 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 3080 }; 3081 static const CostTblEntry AVX1CostTbl[] = { 3082 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 3083 { ISD::ABS, MVT::v8i32, 3 }, 3084 { ISD::ABS, MVT::v16i16, 3 }, 3085 { ISD::ABS, MVT::v32i8, 3 }, 3086 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 3087 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 3088 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 3089 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 3090 { ISD::BSWAP, MVT::v4i64, 4 }, 3091 { ISD::BSWAP, MVT::v8i32, 4 }, 3092 { ISD::BSWAP, MVT::v16i16, 4 }, 3093 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 3094 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 3095 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 3096 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 3097 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 3098 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 3099 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 3100 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 3101 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 3102 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 3103 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 3104 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 3105 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3106 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3107 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3108 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3109 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3110 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3111 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3112 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3113 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3114 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3115 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3116 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3117 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert 3118 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3119 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3120 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3121 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3122 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3123 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3124 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3125 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3126 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert 3127 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS 3128 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 3129 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ? 3130 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD 3131 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 3132 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ? 3133 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 3134 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 3135 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 3136 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 3137 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 3138 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 3139 }; 3140 static const CostTblEntry GLMCostTbl[] = { 3141 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 3142 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 3143 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 3144 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 3145 }; 3146 static const CostTblEntry SLMCostTbl[] = { 3147 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 3148 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 3149 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 3150 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 3151 }; 3152 static const CostTblEntry SSE42CostTbl[] = { 3153 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd 3154 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd 3155 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 3156 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 3157 }; 3158 static const CostTblEntry SSE41CostTbl[] = { 3159 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X) 3160 { ISD::SMAX, MVT::v4i32, 1 }, 3161 { ISD::SMAX, MVT::v16i8, 1 }, 3162 { ISD::SMIN, MVT::v4i32, 1 }, 3163 { ISD::SMIN, MVT::v16i8, 1 }, 3164 { ISD::UMAX, MVT::v4i32, 1 }, 3165 { ISD::UMAX, MVT::v8i16, 1 }, 3166 { ISD::UMIN, MVT::v4i32, 1 }, 3167 { ISD::UMIN, MVT::v8i16, 1 }, 3168 }; 3169 static const CostTblEntry SSSE3CostTbl[] = { 3170 { ISD::ABS, MVT::v4i32, 1 }, 3171 { ISD::ABS, MVT::v8i16, 1 }, 3172 { ISD::ABS, MVT::v16i8, 1 }, 3173 { ISD::BITREVERSE, MVT::v2i64, 5 }, 3174 { ISD::BITREVERSE, MVT::v4i32, 5 }, 3175 { ISD::BITREVERSE, MVT::v8i16, 5 }, 3176 { ISD::BITREVERSE, MVT::v16i8, 5 }, 3177 { ISD::BSWAP, MVT::v2i64, 1 }, 3178 { ISD::BSWAP, MVT::v4i32, 1 }, 3179 { ISD::BSWAP, MVT::v8i16, 1 }, 3180 { ISD::CTLZ, MVT::v2i64, 23 }, 3181 { ISD::CTLZ, MVT::v4i32, 18 }, 3182 { ISD::CTLZ, MVT::v8i16, 14 }, 3183 { ISD::CTLZ, MVT::v16i8, 9 }, 3184 { ISD::CTPOP, MVT::v2i64, 7 }, 3185 { ISD::CTPOP, MVT::v4i32, 11 }, 3186 { ISD::CTPOP, MVT::v8i16, 9 }, 3187 { ISD::CTPOP, MVT::v16i8, 6 }, 3188 { ISD::CTTZ, MVT::v2i64, 10 }, 3189 { ISD::CTTZ, MVT::v4i32, 14 }, 3190 { ISD::CTTZ, MVT::v8i16, 12 }, 3191 { ISD::CTTZ, MVT::v16i8, 9 } 3192 }; 3193 static const CostTblEntry SSE2CostTbl[] = { 3194 { ISD::ABS, MVT::v2i64, 4 }, 3195 { ISD::ABS, MVT::v4i32, 3 }, 3196 { ISD::ABS, MVT::v8i16, 2 }, 3197 { ISD::ABS, MVT::v16i8, 2 }, 3198 { ISD::BITREVERSE, MVT::v2i64, 29 }, 3199 { ISD::BITREVERSE, MVT::v4i32, 27 }, 3200 { ISD::BITREVERSE, MVT::v8i16, 27 }, 3201 { ISD::BITREVERSE, MVT::v16i8, 20 }, 3202 { ISD::BSWAP, MVT::v2i64, 7 }, 3203 { ISD::BSWAP, MVT::v4i32, 7 }, 3204 { ISD::BSWAP, MVT::v8i16, 7 }, 3205 { ISD::CTLZ, MVT::v2i64, 25 }, 3206 { ISD::CTLZ, MVT::v4i32, 26 }, 3207 { ISD::CTLZ, MVT::v8i16, 20 }, 3208 { ISD::CTLZ, MVT::v16i8, 17 }, 3209 { ISD::CTPOP, MVT::v2i64, 12 }, 3210 { ISD::CTPOP, MVT::v4i32, 15 }, 3211 { ISD::CTPOP, MVT::v8i16, 13 }, 3212 { ISD::CTPOP, MVT::v16i8, 10 }, 3213 { ISD::CTTZ, MVT::v2i64, 14 }, 3214 { ISD::CTTZ, MVT::v4i32, 18 }, 3215 { ISD::CTTZ, MVT::v8i16, 16 }, 3216 { ISD::CTTZ, MVT::v16i8, 13 }, 3217 { ISD::SADDSAT, MVT::v8i16, 1 }, 3218 { ISD::SADDSAT, MVT::v16i8, 1 }, 3219 { ISD::SMAX, MVT::v8i16, 1 }, 3220 { ISD::SMIN, MVT::v8i16, 1 }, 3221 { ISD::SSUBSAT, MVT::v8i16, 1 }, 3222 { ISD::SSUBSAT, MVT::v16i8, 1 }, 3223 { ISD::UADDSAT, MVT::v8i16, 1 }, 3224 { ISD::UADDSAT, MVT::v16i8, 1 }, 3225 { ISD::UMAX, MVT::v8i16, 2 }, 3226 { ISD::UMAX, MVT::v16i8, 1 }, 3227 { ISD::UMIN, MVT::v8i16, 2 }, 3228 { ISD::UMIN, MVT::v16i8, 1 }, 3229 { ISD::USUBSAT, MVT::v8i16, 1 }, 3230 { ISD::USUBSAT, MVT::v16i8, 1 }, 3231 { ISD::FMAXNUM, MVT::f64, 4 }, 3232 { ISD::FMAXNUM, MVT::v2f64, 4 }, 3233 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 3234 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 3235 }; 3236 static const CostTblEntry SSE1CostTbl[] = { 3237 { ISD::FMAXNUM, MVT::f32, 4 }, 3238 { ISD::FMAXNUM, MVT::v4f32, 4 }, 3239 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 3240 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 3241 }; 3242 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets 3243 { ISD::CTTZ, MVT::i64, 1 }, 3244 }; 3245 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets 3246 { ISD::CTTZ, MVT::i32, 1 }, 3247 { ISD::CTTZ, MVT::i16, 1 }, 3248 { ISD::CTTZ, MVT::i8, 1 }, 3249 }; 3250 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets 3251 { ISD::CTLZ, MVT::i64, 1 }, 3252 }; 3253 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets 3254 { ISD::CTLZ, MVT::i32, 1 }, 3255 { ISD::CTLZ, MVT::i16, 1 }, 3256 { ISD::CTLZ, MVT::i8, 1 }, 3257 }; 3258 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets 3259 { ISD::CTPOP, MVT::i64, 1 }, 3260 }; 3261 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets 3262 { ISD::CTPOP, MVT::i32, 1 }, 3263 { ISD::CTPOP, MVT::i16, 1 }, 3264 { ISD::CTPOP, MVT::i8, 1 }, 3265 }; 3266 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3267 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV 3268 { ISD::BITREVERSE, MVT::i64, 14 }, 3269 { ISD::BSWAP, MVT::i64, 1 }, 3270 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV 3271 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH 3272 { ISD::CTPOP, MVT::i64, 10 }, 3273 { ISD::SADDO, MVT::i64, 1 }, 3274 { ISD::UADDO, MVT::i64, 1 }, 3275 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto 3276 }; 3277 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3278 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV 3279 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV 3280 { ISD::BITREVERSE, MVT::i32, 14 }, 3281 { ISD::BITREVERSE, MVT::i16, 14 }, 3282 { ISD::BITREVERSE, MVT::i8, 11 }, 3283 { ISD::BSWAP, MVT::i32, 1 }, 3284 { ISD::BSWAP, MVT::i16, 1 }, // ROL 3285 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV 3286 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV 3287 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV 3288 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH 3289 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH 3290 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH 3291 { ISD::CTPOP, MVT::i32, 8 }, 3292 { ISD::CTPOP, MVT::i16, 9 }, 3293 { ISD::CTPOP, MVT::i8, 7 }, 3294 { ISD::SADDO, MVT::i32, 1 }, 3295 { ISD::SADDO, MVT::i16, 1 }, 3296 { ISD::SADDO, MVT::i8, 1 }, 3297 { ISD::UADDO, MVT::i32, 1 }, 3298 { ISD::UADDO, MVT::i16, 1 }, 3299 { ISD::UADDO, MVT::i8, 1 }, 3300 { ISD::UMULO, MVT::i32, 2 }, // mul + seto 3301 { ISD::UMULO, MVT::i16, 2 }, 3302 { ISD::UMULO, MVT::i8, 2 }, 3303 }; 3304 3305 Type *RetTy = ICA.getReturnType(); 3306 Type *OpTy = RetTy; 3307 Intrinsic::ID IID = ICA.getID(); 3308 unsigned ISD = ISD::DELETED_NODE; 3309 switch (IID) { 3310 default: 3311 break; 3312 case Intrinsic::abs: 3313 ISD = ISD::ABS; 3314 break; 3315 case Intrinsic::bitreverse: 3316 ISD = ISD::BITREVERSE; 3317 break; 3318 case Intrinsic::bswap: 3319 ISD = ISD::BSWAP; 3320 break; 3321 case Intrinsic::ctlz: 3322 ISD = ISD::CTLZ; 3323 break; 3324 case Intrinsic::ctpop: 3325 ISD = ISD::CTPOP; 3326 break; 3327 case Intrinsic::cttz: 3328 ISD = ISD::CTTZ; 3329 break; 3330 case Intrinsic::maxnum: 3331 case Intrinsic::minnum: 3332 // FMINNUM has same costs so don't duplicate. 3333 ISD = ISD::FMAXNUM; 3334 break; 3335 case Intrinsic::sadd_sat: 3336 ISD = ISD::SADDSAT; 3337 break; 3338 case Intrinsic::smax: 3339 ISD = ISD::SMAX; 3340 break; 3341 case Intrinsic::smin: 3342 ISD = ISD::SMIN; 3343 break; 3344 case Intrinsic::ssub_sat: 3345 ISD = ISD::SSUBSAT; 3346 break; 3347 case Intrinsic::uadd_sat: 3348 ISD = ISD::UADDSAT; 3349 break; 3350 case Intrinsic::umax: 3351 ISD = ISD::UMAX; 3352 break; 3353 case Intrinsic::umin: 3354 ISD = ISD::UMIN; 3355 break; 3356 case Intrinsic::usub_sat: 3357 ISD = ISD::USUBSAT; 3358 break; 3359 case Intrinsic::sqrt: 3360 ISD = ISD::FSQRT; 3361 break; 3362 case Intrinsic::sadd_with_overflow: 3363 case Intrinsic::ssub_with_overflow: 3364 // SSUBO has same costs so don't duplicate. 3365 ISD = ISD::SADDO; 3366 OpTy = RetTy->getContainedType(0); 3367 break; 3368 case Intrinsic::uadd_with_overflow: 3369 case Intrinsic::usub_with_overflow: 3370 // USUBO has same costs so don't duplicate. 3371 ISD = ISD::UADDO; 3372 OpTy = RetTy->getContainedType(0); 3373 break; 3374 case Intrinsic::umul_with_overflow: 3375 case Intrinsic::smul_with_overflow: 3376 // SMULO has same costs so don't duplicate. 3377 ISD = ISD::UMULO; 3378 OpTy = RetTy->getContainedType(0); 3379 break; 3380 } 3381 3382 if (ISD != ISD::DELETED_NODE) { 3383 // Legalize the type. 3384 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); 3385 MVT MTy = LT.second; 3386 3387 // Attempt to lookup cost. 3388 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() && 3389 MTy.isVector()) { 3390 // With PSHUFB the code is very similar for all types. If we have integer 3391 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types 3392 // we also need a PSHUFB. 3393 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2; 3394 3395 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB 3396 // instructions. We also need an extract and an insert. 3397 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) || 3398 (ST->hasBWI() && MTy.is512BitVector()))) 3399 Cost = Cost * 2 + 2; 3400 3401 return LT.first * Cost; 3402 } 3403 3404 auto adjustTableCost = [](const CostTblEntry &Entry, 3405 InstructionCost LegalizationCost, 3406 FastMathFlags FMF) { 3407 // If there are no NANs to deal with, then these are reduced to a 3408 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we 3409 // assume is used in the non-fast case. 3410 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) { 3411 if (FMF.noNaNs()) 3412 return LegalizationCost * 1; 3413 } 3414 return LegalizationCost * (int)Entry.Cost; 3415 }; 3416 3417 if (ST->useGLMDivSqrtCosts()) 3418 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 3419 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3420 3421 if (ST->useSLMArithCosts()) 3422 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 3423 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3424 3425 if (ST->hasBITALG()) 3426 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy)) 3427 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3428 3429 if (ST->hasVPOPCNTDQ()) 3430 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy)) 3431 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3432 3433 if (ST->hasCDI()) 3434 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 3435 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3436 3437 if (ST->hasBWI()) 3438 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3439 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3440 3441 if (ST->hasAVX512()) 3442 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3443 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3444 3445 if (ST->hasXOP()) 3446 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3447 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3448 3449 if (ST->hasAVX2()) 3450 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 3451 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3452 3453 if (ST->hasAVX()) 3454 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 3455 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3456 3457 if (ST->hasSSE42()) 3458 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 3459 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3460 3461 if (ST->hasSSE41()) 3462 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 3463 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3464 3465 if (ST->hasSSSE3()) 3466 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 3467 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3468 3469 if (ST->hasSSE2()) 3470 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 3471 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3472 3473 if (ST->hasSSE1()) 3474 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 3475 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3476 3477 if (ST->hasBMI()) { 3478 if (ST->is64Bit()) 3479 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) 3480 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3481 3482 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) 3483 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3484 } 3485 3486 if (ST->hasLZCNT()) { 3487 if (ST->is64Bit()) 3488 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) 3489 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3490 3491 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) 3492 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3493 } 3494 3495 if (ST->hasPOPCNT()) { 3496 if (ST->is64Bit()) 3497 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) 3498 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3499 3500 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) 3501 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3502 } 3503 3504 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) { 3505 if (const Instruction *II = ICA.getInst()) { 3506 if (II->hasOneUse() && isa<StoreInst>(II->user_back())) 3507 return TTI::TCC_Free; 3508 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) { 3509 if (LI->hasOneUse()) 3510 return TTI::TCC_Free; 3511 } 3512 } 3513 } 3514 3515 if (ST->is64Bit()) 3516 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3517 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3518 3519 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3520 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3521 } 3522 3523 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3524 } 3525 3526 InstructionCost 3527 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 3528 TTI::TargetCostKind CostKind) { 3529 if (ICA.isTypeBasedOnly()) 3530 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 3531 3532 static const CostTblEntry AVX512BWCostTbl[] = { 3533 { ISD::ROTL, MVT::v32i16, 2 }, 3534 { ISD::ROTL, MVT::v16i16, 2 }, 3535 { ISD::ROTL, MVT::v8i16, 2 }, 3536 { ISD::ROTL, MVT::v64i8, 5 }, 3537 { ISD::ROTL, MVT::v32i8, 5 }, 3538 { ISD::ROTL, MVT::v16i8, 5 }, 3539 { ISD::ROTR, MVT::v32i16, 2 }, 3540 { ISD::ROTR, MVT::v16i16, 2 }, 3541 { ISD::ROTR, MVT::v8i16, 2 }, 3542 { ISD::ROTR, MVT::v64i8, 5 }, 3543 { ISD::ROTR, MVT::v32i8, 5 }, 3544 { ISD::ROTR, MVT::v16i8, 5 } 3545 }; 3546 static const CostTblEntry AVX512CostTbl[] = { 3547 { ISD::ROTL, MVT::v8i64, 1 }, 3548 { ISD::ROTL, MVT::v4i64, 1 }, 3549 { ISD::ROTL, MVT::v2i64, 1 }, 3550 { ISD::ROTL, MVT::v16i32, 1 }, 3551 { ISD::ROTL, MVT::v8i32, 1 }, 3552 { ISD::ROTL, MVT::v4i32, 1 }, 3553 { ISD::ROTR, MVT::v8i64, 1 }, 3554 { ISD::ROTR, MVT::v4i64, 1 }, 3555 { ISD::ROTR, MVT::v2i64, 1 }, 3556 { ISD::ROTR, MVT::v16i32, 1 }, 3557 { ISD::ROTR, MVT::v8i32, 1 }, 3558 { ISD::ROTR, MVT::v4i32, 1 } 3559 }; 3560 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) 3561 static const CostTblEntry XOPCostTbl[] = { 3562 { ISD::ROTL, MVT::v4i64, 4 }, 3563 { ISD::ROTL, MVT::v8i32, 4 }, 3564 { ISD::ROTL, MVT::v16i16, 4 }, 3565 { ISD::ROTL, MVT::v32i8, 4 }, 3566 { ISD::ROTL, MVT::v2i64, 1 }, 3567 { ISD::ROTL, MVT::v4i32, 1 }, 3568 { ISD::ROTL, MVT::v8i16, 1 }, 3569 { ISD::ROTL, MVT::v16i8, 1 }, 3570 { ISD::ROTR, MVT::v4i64, 6 }, 3571 { ISD::ROTR, MVT::v8i32, 6 }, 3572 { ISD::ROTR, MVT::v16i16, 6 }, 3573 { ISD::ROTR, MVT::v32i8, 6 }, 3574 { ISD::ROTR, MVT::v2i64, 2 }, 3575 { ISD::ROTR, MVT::v4i32, 2 }, 3576 { ISD::ROTR, MVT::v8i16, 2 }, 3577 { ISD::ROTR, MVT::v16i8, 2 } 3578 }; 3579 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3580 { ISD::ROTL, MVT::i64, 1 }, 3581 { ISD::ROTR, MVT::i64, 1 }, 3582 { ISD::FSHL, MVT::i64, 4 } 3583 }; 3584 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3585 { ISD::ROTL, MVT::i32, 1 }, 3586 { ISD::ROTL, MVT::i16, 1 }, 3587 { ISD::ROTL, MVT::i8, 1 }, 3588 { ISD::ROTR, MVT::i32, 1 }, 3589 { ISD::ROTR, MVT::i16, 1 }, 3590 { ISD::ROTR, MVT::i8, 1 }, 3591 { ISD::FSHL, MVT::i32, 4 }, 3592 { ISD::FSHL, MVT::i16, 4 }, 3593 { ISD::FSHL, MVT::i8, 4 } 3594 }; 3595 3596 Intrinsic::ID IID = ICA.getID(); 3597 Type *RetTy = ICA.getReturnType(); 3598 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 3599 unsigned ISD = ISD::DELETED_NODE; 3600 switch (IID) { 3601 default: 3602 break; 3603 case Intrinsic::fshl: 3604 ISD = ISD::FSHL; 3605 if (Args[0] == Args[1]) 3606 ISD = ISD::ROTL; 3607 break; 3608 case Intrinsic::fshr: 3609 // FSHR has same costs so don't duplicate. 3610 ISD = ISD::FSHL; 3611 if (Args[0] == Args[1]) 3612 ISD = ISD::ROTR; 3613 break; 3614 } 3615 3616 if (ISD != ISD::DELETED_NODE) { 3617 // Legalize the type. 3618 std::pair<InstructionCost, MVT> LT = 3619 TLI->getTypeLegalizationCost(DL, RetTy); 3620 MVT MTy = LT.second; 3621 3622 // Attempt to lookup cost. 3623 if (ST->hasBWI()) 3624 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3625 return LT.first * Entry->Cost; 3626 3627 if (ST->hasAVX512()) 3628 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3629 return LT.first * Entry->Cost; 3630 3631 if (ST->hasXOP()) 3632 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3633 return LT.first * Entry->Cost; 3634 3635 if (ST->is64Bit()) 3636 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3637 return LT.first * Entry->Cost; 3638 3639 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3640 return LT.first * Entry->Cost; 3641 } 3642 3643 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3644 } 3645 3646 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 3647 unsigned Index) { 3648 static const CostTblEntry SLMCostTbl[] = { 3649 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, 3650 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, 3651 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, 3652 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } 3653 }; 3654 3655 assert(Val->isVectorTy() && "This must be a vector type"); 3656 Type *ScalarType = Val->getScalarType(); 3657 int RegisterFileMoveCost = 0; 3658 3659 // Non-immediate extraction/insertion can be handled as a sequence of 3660 // aliased loads+stores via the stack. 3661 if (Index == -1U && (Opcode == Instruction::ExtractElement || 3662 Opcode == Instruction::InsertElement)) { 3663 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns: 3664 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0. 3665 3666 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling. 3667 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected"); 3668 Align VecAlign = DL.getPrefTypeAlign(Val); 3669 Align SclAlign = DL.getPrefTypeAlign(ScalarType); 3670 3671 // Extract - store vector to stack, load scalar. 3672 if (Opcode == Instruction::ExtractElement) { 3673 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3674 TTI::TargetCostKind::TCK_RecipThroughput) + 3675 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0, 3676 TTI::TargetCostKind::TCK_RecipThroughput); 3677 } 3678 // Insert - store vector to stack, store scalar, load vector. 3679 if (Opcode == Instruction::InsertElement) { 3680 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3681 TTI::TargetCostKind::TCK_RecipThroughput) + 3682 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0, 3683 TTI::TargetCostKind::TCK_RecipThroughput) + 3684 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, 3685 TTI::TargetCostKind::TCK_RecipThroughput); 3686 } 3687 } 3688 3689 if (Index != -1U && (Opcode == Instruction::ExtractElement || 3690 Opcode == Instruction::InsertElement)) { 3691 // Extraction of vXi1 elements are now efficiently handled by MOVMSK. 3692 if (Opcode == Instruction::ExtractElement && 3693 ScalarType->getScalarSizeInBits() == 1 && 3694 cast<FixedVectorType>(Val)->getNumElements() > 1) 3695 return 1; 3696 3697 // Legalize the type. 3698 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 3699 3700 // This type is legalized to a scalar type. 3701 if (!LT.second.isVector()) 3702 return 0; 3703 3704 // The type may be split. Normalize the index to the new type. 3705 unsigned SizeInBits = LT.second.getSizeInBits(); 3706 unsigned NumElts = LT.second.getVectorNumElements(); 3707 unsigned SubNumElts = NumElts; 3708 Index = Index % NumElts; 3709 3710 // For >128-bit vectors, we need to extract higher 128-bit subvectors. 3711 // For inserts, we also need to insert the subvector back. 3712 if (SizeInBits > 128) { 3713 assert((SizeInBits % 128) == 0 && "Illegal vector"); 3714 unsigned NumSubVecs = SizeInBits / 128; 3715 SubNumElts = NumElts / NumSubVecs; 3716 if (SubNumElts <= Index) { 3717 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1); 3718 Index %= SubNumElts; 3719 } 3720 } 3721 3722 if (Index == 0) { 3723 // Floating point scalars are already located in index #0. 3724 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume 3725 // true for all. 3726 if (ScalarType->isFloatingPointTy()) 3727 return RegisterFileMoveCost; 3728 3729 // Assume movd/movq XMM -> GPR is relatively cheap on all targets. 3730 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) 3731 return 1 + RegisterFileMoveCost; 3732 } 3733 3734 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3735 assert(ISD && "Unexpected vector opcode"); 3736 MVT MScalarTy = LT.second.getScalarType(); 3737 if (ST->useSLMArithCosts()) 3738 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) 3739 return Entry->Cost + RegisterFileMoveCost; 3740 3741 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. 3742 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3743 (MScalarTy.isInteger() && ST->hasSSE41())) 3744 return 1 + RegisterFileMoveCost; 3745 3746 // Assume insertps is relatively cheap on all targets. 3747 if (MScalarTy == MVT::f32 && ST->hasSSE41() && 3748 Opcode == Instruction::InsertElement) 3749 return 1 + RegisterFileMoveCost; 3750 3751 // For extractions we just need to shuffle the element to index 0, which 3752 // should be very cheap (assume cost = 1). For insertions we need to shuffle 3753 // the elements to its destination. In both cases we must handle the 3754 // subvector move(s). 3755 // If the vector type is already less than 128-bits then don't reduce it. 3756 // TODO: Under what circumstances should we shuffle using the full width? 3757 InstructionCost ShuffleCost = 1; 3758 if (Opcode == Instruction::InsertElement) { 3759 auto *SubTy = cast<VectorType>(Val); 3760 EVT VT = TLI->getValueType(DL, Val); 3761 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) 3762 SubTy = FixedVectorType::get(ScalarType, SubNumElts); 3763 ShuffleCost = 3764 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy); 3765 } 3766 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; 3767 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; 3768 } 3769 3770 // Add to the base cost if we know that the extracted element of a vector is 3771 // destined to be moved to and used in the integer register file. 3772 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 3773 RegisterFileMoveCost += 1; 3774 3775 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 3776 } 3777 3778 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty, 3779 const APInt &DemandedElts, 3780 bool Insert, 3781 bool Extract) { 3782 InstructionCost Cost = 0; 3783 3784 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much 3785 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. 3786 if (Insert) { 3787 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3788 MVT MScalarTy = LT.second.getScalarType(); 3789 unsigned SizeInBits = LT.second.getSizeInBits(); 3790 3791 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3792 (MScalarTy.isInteger() && ST->hasSSE41()) || 3793 (MScalarTy == MVT::f32 && ST->hasSSE41())) { 3794 // For types we can insert directly, insertion into 128-bit sub vectors is 3795 // cheap, followed by a cheap chain of concatenations. 3796 if (SizeInBits <= 128) { 3797 Cost += 3798 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); 3799 } else { 3800 // In each 128-lane, if at least one index is demanded but not all 3801 // indices are demanded and this 128-lane is not the first 128-lane of 3802 // the legalized-vector, then this 128-lane needs a extracti128; If in 3803 // each 128-lane, there is at least one demanded index, this 128-lane 3804 // needs a inserti128. 3805 3806 // The following cases will help you build a better understanding: 3807 // Assume we insert several elements into a v8i32 vector in avx2, 3808 // Case#1: inserting into 1th index needs vpinsrd + inserti128. 3809 // Case#2: inserting into 5th index needs extracti128 + vpinsrd + 3810 // inserti128. 3811 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128. 3812 const int CostValue = *LT.first.getValue(); 3813 assert(CostValue >= 0 && "Negative cost!"); 3814 unsigned Num128Lanes = SizeInBits / 128 * CostValue; 3815 unsigned NumElts = LT.second.getVectorNumElements() * CostValue; 3816 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); 3817 unsigned Scale = NumElts / Num128Lanes; 3818 // We iterate each 128-lane, and check if we need a 3819 // extracti128/inserti128 for this 128-lane. 3820 for (unsigned I = 0; I < NumElts; I += Scale) { 3821 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale); 3822 APInt MaskedDE = Mask & WidenedDemandedElts; 3823 unsigned Population = MaskedDE.countPopulation(); 3824 Cost += (Population > 0 && Population != Scale && 3825 I % LT.second.getVectorNumElements() != 0); 3826 Cost += Population > 0; 3827 } 3828 Cost += DemandedElts.countPopulation(); 3829 3830 // For vXf32 cases, insertion into the 0'th index in each v4f32 3831 // 128-bit vector is free. 3832 // NOTE: This assumes legalization widens vXf32 vectors. 3833 if (MScalarTy == MVT::f32) 3834 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements(); 3835 i < e; i += 4) 3836 if (DemandedElts[i]) 3837 Cost--; 3838 } 3839 } else if (LT.second.isVector()) { 3840 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded 3841 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a 3842 // series of UNPCK followed by CONCAT_VECTORS - all of these can be 3843 // considered cheap. 3844 if (Ty->isIntOrIntVectorTy()) 3845 Cost += DemandedElts.countPopulation(); 3846 3847 // Get the smaller of the legalized or original pow2-extended number of 3848 // vector elements, which represents the number of unpacks we'll end up 3849 // performing. 3850 unsigned NumElts = LT.second.getVectorNumElements(); 3851 unsigned Pow2Elts = 3852 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements()); 3853 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; 3854 } 3855 } 3856 3857 if (Extract) { 3858 // vXi1 can be efficiently extracted with MOVMSK. 3859 // TODO: AVX512 predicate mask handling. 3860 // NOTE: This doesn't work well for roundtrip scalarization. 3861 if (!Insert && Ty->getScalarSizeInBits() == 1 && !ST->hasAVX512()) { 3862 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements(); 3863 unsigned MaxElts = ST->hasAVX2() ? 32 : 16; 3864 unsigned MOVMSKCost = (NumElts + MaxElts - 1) / MaxElts; 3865 return MOVMSKCost; 3866 } 3867 3868 // TODO: Use default extraction for now, but we should investigate extending 3869 // this to handle repeated subvector extraction. 3870 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); 3871 } 3872 3873 return Cost; 3874 } 3875 3876 InstructionCost 3877 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, 3878 int VF, const APInt &DemandedDstElts, 3879 TTI::TargetCostKind CostKind) { 3880 const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy); 3881 // We don't differentiate element types here, only element bit width. 3882 EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits); 3883 3884 auto bailout = [&]() { 3885 return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF, 3886 DemandedDstElts, CostKind); 3887 }; 3888 3889 // For now, only deal with AVX512 cases. 3890 if (!ST->hasAVX512()) 3891 return bailout(); 3892 3893 // Do we have a native shuffle for this element type, or should we promote? 3894 unsigned PromEltTyBits = EltTyBits; 3895 switch (EltTyBits) { 3896 case 32: 3897 case 64: 3898 break; // AVX512F. 3899 case 16: 3900 if (!ST->hasBWI()) 3901 PromEltTyBits = 32; // promote to i32, AVX512F. 3902 break; // AVX512BW 3903 case 8: 3904 if (!ST->hasVBMI()) 3905 PromEltTyBits = 32; // promote to i32, AVX512F. 3906 break; // AVX512VBMI 3907 case 1: 3908 // There is no support for shuffling i1 elements. We *must* promote. 3909 if (ST->hasBWI()) { 3910 if (ST->hasVBMI()) 3911 PromEltTyBits = 8; // promote to i8, AVX512VBMI. 3912 else 3913 PromEltTyBits = 16; // promote to i16, AVX512BW. 3914 break; 3915 } 3916 if (ST->hasDQI()) { 3917 PromEltTyBits = 32; // promote to i32, AVX512F. 3918 break; 3919 } 3920 return bailout(); 3921 default: 3922 return bailout(); 3923 } 3924 auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits); 3925 3926 auto *SrcVecTy = FixedVectorType::get(EltTy, VF); 3927 auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF); 3928 3929 int NumDstElements = VF * ReplicationFactor; 3930 auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements); 3931 auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements); 3932 3933 // Legalize the types. 3934 MVT LegalSrcVecTy = TLI->getTypeLegalizationCost(DL, SrcVecTy).second; 3935 MVT LegalPromSrcVecTy = TLI->getTypeLegalizationCost(DL, PromSrcVecTy).second; 3936 MVT LegalPromDstVecTy = TLI->getTypeLegalizationCost(DL, PromDstVecTy).second; 3937 MVT LegalDstVecTy = TLI->getTypeLegalizationCost(DL, DstVecTy).second; 3938 // They should have legalized into vector types. 3939 if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() || 3940 !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector()) 3941 return bailout(); 3942 3943 if (PromEltTyBits != EltTyBits) { 3944 // If we have to perform the shuffle with wider elt type than our data type, 3945 // then we will first need to anyext (we don't care about the new bits) 3946 // the source elements, and then truncate Dst elements. 3947 InstructionCost PromotionCost; 3948 PromotionCost += getCastInstrCost( 3949 Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy, 3950 TargetTransformInfo::CastContextHint::None, CostKind); 3951 PromotionCost += 3952 getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy, 3953 /*Src=*/PromDstVecTy, 3954 TargetTransformInfo::CastContextHint::None, CostKind); 3955 return PromotionCost + getReplicationShuffleCost(PromEltTy, 3956 ReplicationFactor, VF, 3957 DemandedDstElts, CostKind); 3958 } 3959 3960 assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && 3961 LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && 3962 "We expect that the legalization doesn't affect the element width, " 3963 "doesn't coalesce/split elements."); 3964 3965 unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements(); 3966 unsigned NumDstVectors = 3967 divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec); 3968 3969 auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec); 3970 3971 // Not all the produced Dst elements may be demanded. In our case, 3972 // given that a single Dst vector is formed by a single shuffle, 3973 // if all elements that will form a single Dst vector aren't demanded, 3974 // then we won't need to do that shuffle, so adjust the cost accordingly. 3975 APInt DemandedDstVectors = APIntOps::ScaleBitMask( 3976 DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec), 3977 NumDstVectors); 3978 unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation(); 3979 3980 InstructionCost SingleShuffleCost = 3981 getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy, 3982 /*Mask=*/None, /*Index=*/0, /*SubTp=*/nullptr); 3983 return NumDstVectorsDemanded * SingleShuffleCost; 3984 } 3985 3986 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 3987 MaybeAlign Alignment, 3988 unsigned AddressSpace, 3989 TTI::TargetCostKind CostKind, 3990 const Instruction *I) { 3991 // TODO: Handle other cost kinds. 3992 if (CostKind != TTI::TCK_RecipThroughput) { 3993 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3994 // Store instruction with index and scale costs 2 Uops. 3995 // Check the preceding GEP to identify non-const indices. 3996 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) { 3997 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 3998 return TTI::TCC_Basic * 2; 3999 } 4000 } 4001 return TTI::TCC_Basic; 4002 } 4003 4004 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 4005 "Invalid Opcode"); 4006 // Type legalization can't handle structs 4007 if (TLI->getValueType(DL, Src, true) == MVT::Other) 4008 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 4009 CostKind); 4010 4011 // Legalize the type. 4012 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 4013 4014 auto *VTy = dyn_cast<FixedVectorType>(Src); 4015 4016 // Handle the simple case of non-vectors. 4017 // NOTE: this assumes that legalization never creates vector from scalars! 4018 if (!VTy || !LT.second.isVector()) 4019 // Each load/store unit costs 1. 4020 return LT.first * 1; 4021 4022 bool IsLoad = Opcode == Instruction::Load; 4023 4024 Type *EltTy = VTy->getElementType(); 4025 4026 const int EltTyBits = DL.getTypeSizeInBits(EltTy); 4027 4028 InstructionCost Cost = 0; 4029 4030 // Source of truth: how many elements were there in the original IR vector? 4031 const unsigned SrcNumElt = VTy->getNumElements(); 4032 4033 // How far have we gotten? 4034 int NumEltRemaining = SrcNumElt; 4035 // Note that we intentionally capture by-reference, NumEltRemaining changes. 4036 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; }; 4037 4038 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8); 4039 4040 // Note that even if we can store 64 bits of an XMM, we still operate on XMM. 4041 const unsigned XMMBits = 128; 4042 if (XMMBits % EltTyBits != 0) 4043 // Vector size must be a multiple of the element size. I.e. no padding. 4044 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 4045 CostKind); 4046 const int NumEltPerXMM = XMMBits / EltTyBits; 4047 4048 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM); 4049 4050 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0; 4051 NumEltRemaining > 0; CurrOpSizeBytes /= 2) { 4052 // How many elements would a single op deal with at once? 4053 if ((8 * CurrOpSizeBytes) % EltTyBits != 0) 4054 // Vector size must be a multiple of the element size. I.e. no padding. 4055 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 4056 CostKind); 4057 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits; 4058 4059 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?"); 4060 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || 4061 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && 4062 "Unless we haven't halved the op size yet, " 4063 "we have less than two op's sized units of work left."); 4064 4065 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM 4066 ? FixedVectorType::get(EltTy, CurrNumEltPerOp) 4067 : XMMVecTy; 4068 4069 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && 4070 "After halving sizes, the vector elt count is no longer a multiple " 4071 "of number of elements per operation?"); 4072 auto *CoalescedVecTy = 4073 CurrNumEltPerOp == 1 4074 ? CurrVecTy 4075 : FixedVectorType::get( 4076 IntegerType::get(Src->getContext(), 4077 EltTyBits * CurrNumEltPerOp), 4078 CurrVecTy->getNumElements() / CurrNumEltPerOp); 4079 assert(DL.getTypeSizeInBits(CoalescedVecTy) == 4080 DL.getTypeSizeInBits(CurrVecTy) && 4081 "coalesciing elements doesn't change vector width."); 4082 4083 while (NumEltRemaining > 0) { 4084 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"); 4085 4086 // Can we use this vector size, as per the remaining element count? 4087 // Iff the vector is naturally aligned, we can do a wide load regardless. 4088 if (NumEltRemaining < CurrNumEltPerOp && 4089 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) && 4090 CurrOpSizeBytes != 1) 4091 break; // Try smalled vector size. 4092 4093 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0; 4094 4095 // If we have fully processed the previous reg, we need to replenish it. 4096 if (SubVecEltsLeft == 0) { 4097 SubVecEltsLeft += CurrVecTy->getNumElements(); 4098 // And that's free only for the 0'th subvector of a legalized vector. 4099 if (!Is0thSubVec) 4100 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector 4101 : TTI::ShuffleKind::SK_ExtractSubvector, 4102 VTy, None, NumEltDone(), CurrVecTy); 4103 } 4104 4105 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM, 4106 // for smaller widths (32/16/8) we have to insert/extract them separately. 4107 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide, 4108 // but let's pretend that it is also true for 16/8 bit wide ops...) 4109 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) { 4110 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM; 4111 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && ""); 4112 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp; 4113 APInt DemandedElts = 4114 APInt::getBitsSet(CoalescedVecTy->getNumElements(), 4115 CoalescedVecEltIdx, CoalescedVecEltIdx + 1); 4116 assert(DemandedElts.countPopulation() == 1 && "Inserting single value"); 4117 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad, 4118 !IsLoad); 4119 } 4120 4121 // This isn't exactly right. We're using slow unaligned 32-byte accesses 4122 // as a proxy for a double-pumped AVX memory interface such as on 4123 // Sandybridge. 4124 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow()) 4125 Cost += 2; 4126 else 4127 Cost += 1; 4128 4129 SubVecEltsLeft -= CurrNumEltPerOp; 4130 NumEltRemaining -= CurrNumEltPerOp; 4131 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes); 4132 } 4133 } 4134 4135 assert(NumEltRemaining <= 0 && "Should have processed all the elements."); 4136 4137 return Cost; 4138 } 4139 4140 InstructionCost 4141 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, 4142 unsigned AddressSpace, 4143 TTI::TargetCostKind CostKind) { 4144 bool IsLoad = (Instruction::Load == Opcode); 4145 bool IsStore = (Instruction::Store == Opcode); 4146 4147 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy); 4148 if (!SrcVTy) 4149 // To calculate scalar take the regular cost, without mask 4150 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind); 4151 4152 unsigned NumElem = SrcVTy->getNumElements(); 4153 auto *MaskTy = 4154 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 4155 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) || 4156 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) { 4157 // Scalarization 4158 APInt DemandedElts = APInt::getAllOnes(NumElem); 4159 InstructionCost MaskSplitCost = 4160 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 4161 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 4162 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, 4163 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4164 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 4165 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 4166 InstructionCost ValueSplitCost = 4167 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); 4168 InstructionCost MemopCost = 4169 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4170 Alignment, AddressSpace, CostKind); 4171 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 4172 } 4173 4174 // Legalize the type. 4175 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 4176 auto VT = TLI->getValueType(DL, SrcVTy); 4177 InstructionCost Cost = 0; 4178 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 4179 LT.second.getVectorNumElements() == NumElem) 4180 // Promotion requires extend/truncate for data and a shuffle for mask. 4181 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) + 4182 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr); 4183 4184 else if (LT.first * LT.second.getVectorNumElements() > NumElem) { 4185 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), 4186 LT.second.getVectorNumElements()); 4187 // Expanding requires fill mask with zeroes 4188 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy); 4189 } 4190 4191 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. 4192 if (!ST->hasAVX512()) 4193 return Cost + LT.first * (IsLoad ? 2 : 8); 4194 4195 // AVX-512 masked load/store is cheapper 4196 return Cost + LT.first; 4197 } 4198 4199 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty, 4200 ScalarEvolution *SE, 4201 const SCEV *Ptr) { 4202 // Address computations in vectorized code with non-consecutive addresses will 4203 // likely result in more instructions compared to scalar code where the 4204 // computation can more often be merged into the index mode. The resulting 4205 // extra micro-ops can significantly decrease throughput. 4206 const unsigned NumVectorInstToHideOverhead = 10; 4207 4208 // Cost modeling of Strided Access Computation is hidden by the indexing 4209 // modes of X86 regardless of the stride value. We dont believe that there 4210 // is a difference between constant strided access in gerenal and constant 4211 // strided value which is less than or equal to 64. 4212 // Even in the case of (loop invariant) stride whose value is not known at 4213 // compile time, the address computation will not incur more than one extra 4214 // ADD instruction. 4215 if (Ty->isVectorTy() && SE && !ST->hasAVX2()) { 4216 // TODO: AVX2 is the current cut-off because we don't have correct 4217 // interleaving costs for prior ISA's. 4218 if (!BaseT::isStridedAccess(Ptr)) 4219 return NumVectorInstToHideOverhead; 4220 if (!BaseT::getConstantStrideStep(SE, Ptr)) 4221 return 1; 4222 } 4223 4224 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 4225 } 4226 4227 InstructionCost 4228 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 4229 Optional<FastMathFlags> FMF, 4230 TTI::TargetCostKind CostKind) { 4231 if (TTI::requiresOrderedReduction(FMF)) 4232 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 4233 4234 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 4235 // and make it as the cost. 4236 4237 static const CostTblEntry SLMCostTblNoPairWise[] = { 4238 { ISD::FADD, MVT::v2f64, 3 }, 4239 { ISD::ADD, MVT::v2i64, 5 }, 4240 }; 4241 4242 static const CostTblEntry SSE2CostTblNoPairWise[] = { 4243 { ISD::FADD, MVT::v2f64, 2 }, 4244 { ISD::FADD, MVT::v2f32, 2 }, 4245 { ISD::FADD, MVT::v4f32, 4 }, 4246 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 4247 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 4248 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 4249 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". 4250 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". 4251 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 4252 { ISD::ADD, MVT::v2i8, 2 }, 4253 { ISD::ADD, MVT::v4i8, 2 }, 4254 { ISD::ADD, MVT::v8i8, 2 }, 4255 { ISD::ADD, MVT::v16i8, 3 }, 4256 }; 4257 4258 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4259 { ISD::FADD, MVT::v4f64, 3 }, 4260 { ISD::FADD, MVT::v4f32, 3 }, 4261 { ISD::FADD, MVT::v8f32, 4 }, 4262 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 4263 { ISD::ADD, MVT::v4i64, 3 }, 4264 { ISD::ADD, MVT::v8i32, 5 }, 4265 { ISD::ADD, MVT::v16i16, 5 }, 4266 { ISD::ADD, MVT::v32i8, 4 }, 4267 }; 4268 4269 int ISD = TLI->InstructionOpcodeToISD(Opcode); 4270 assert(ISD && "Invalid opcode"); 4271 4272 // Before legalizing the type, give a chance to look up illegal narrow types 4273 // in the table. 4274 // FIXME: Is there a better way to do this? 4275 EVT VT = TLI->getValueType(DL, ValTy); 4276 if (VT.isSimple()) { 4277 MVT MTy = VT.getSimpleVT(); 4278 if (ST->useSLMArithCosts()) 4279 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 4280 return Entry->Cost; 4281 4282 if (ST->hasAVX()) 4283 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4284 return Entry->Cost; 4285 4286 if (ST->hasSSE2()) 4287 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4288 return Entry->Cost; 4289 } 4290 4291 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4292 4293 MVT MTy = LT.second; 4294 4295 auto *ValVTy = cast<FixedVectorType>(ValTy); 4296 4297 // Special case: vXi8 mul reductions are performed as vXi16. 4298 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) { 4299 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16); 4300 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements()); 4301 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy, 4302 TargetTransformInfo::CastContextHint::None, 4303 CostKind) + 4304 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind); 4305 } 4306 4307 InstructionCost ArithmeticCost = 0; 4308 if (LT.first != 1 && MTy.isVector() && 4309 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4310 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4311 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 4312 MTy.getVectorNumElements()); 4313 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 4314 ArithmeticCost *= LT.first - 1; 4315 } 4316 4317 if (ST->useSLMArithCosts()) 4318 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 4319 return ArithmeticCost + Entry->Cost; 4320 4321 if (ST->hasAVX()) 4322 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4323 return ArithmeticCost + Entry->Cost; 4324 4325 if (ST->hasSSE2()) 4326 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4327 return ArithmeticCost + Entry->Cost; 4328 4329 // FIXME: These assume a naive kshift+binop lowering, which is probably 4330 // conservative in most cases. 4331 static const CostTblEntry AVX512BoolReduction[] = { 4332 { ISD::AND, MVT::v2i1, 3 }, 4333 { ISD::AND, MVT::v4i1, 5 }, 4334 { ISD::AND, MVT::v8i1, 7 }, 4335 { ISD::AND, MVT::v16i1, 9 }, 4336 { ISD::AND, MVT::v32i1, 11 }, 4337 { ISD::AND, MVT::v64i1, 13 }, 4338 { ISD::OR, MVT::v2i1, 3 }, 4339 { ISD::OR, MVT::v4i1, 5 }, 4340 { ISD::OR, MVT::v8i1, 7 }, 4341 { ISD::OR, MVT::v16i1, 9 }, 4342 { ISD::OR, MVT::v32i1, 11 }, 4343 { ISD::OR, MVT::v64i1, 13 }, 4344 }; 4345 4346 static const CostTblEntry AVX2BoolReduction[] = { 4347 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp 4348 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp 4349 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp 4350 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp 4351 }; 4352 4353 static const CostTblEntry AVX1BoolReduction[] = { 4354 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp 4355 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp 4356 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 4357 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 4358 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp 4359 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp 4360 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 4361 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 4362 }; 4363 4364 static const CostTblEntry SSE2BoolReduction[] = { 4365 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp 4366 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp 4367 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp 4368 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp 4369 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp 4370 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp 4371 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp 4372 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp 4373 }; 4374 4375 // Handle bool allof/anyof patterns. 4376 if (ValVTy->getElementType()->isIntegerTy(1)) { 4377 InstructionCost ArithmeticCost = 0; 4378 if (LT.first != 1 && MTy.isVector() && 4379 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4380 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4381 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 4382 MTy.getVectorNumElements()); 4383 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 4384 ArithmeticCost *= LT.first - 1; 4385 } 4386 4387 if (ST->hasAVX512()) 4388 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) 4389 return ArithmeticCost + Entry->Cost; 4390 if (ST->hasAVX2()) 4391 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) 4392 return ArithmeticCost + Entry->Cost; 4393 if (ST->hasAVX()) 4394 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) 4395 return ArithmeticCost + Entry->Cost; 4396 if (ST->hasSSE2()) 4397 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) 4398 return ArithmeticCost + Entry->Cost; 4399 4400 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 4401 } 4402 4403 unsigned NumVecElts = ValVTy->getNumElements(); 4404 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); 4405 4406 // Special case power of 2 reductions where the scalar type isn't changed 4407 // by type legalization. 4408 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) 4409 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 4410 4411 InstructionCost ReductionCost = 0; 4412 4413 auto *Ty = ValVTy; 4414 if (LT.first != 1 && MTy.isVector() && 4415 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4416 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4417 Ty = FixedVectorType::get(ValVTy->getElementType(), 4418 MTy.getVectorNumElements()); 4419 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 4420 ReductionCost *= LT.first - 1; 4421 NumVecElts = MTy.getVectorNumElements(); 4422 } 4423 4424 // Now handle reduction with the legal type, taking into account size changes 4425 // at each level. 4426 while (NumVecElts > 1) { 4427 // Determine the size of the remaining vector we need to reduce. 4428 unsigned Size = NumVecElts * ScalarSize; 4429 NumVecElts /= 2; 4430 // If we're reducing from 256/512 bits, use an extract_subvector. 4431 if (Size > 128) { 4432 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4433 ReductionCost += 4434 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4435 Ty = SubTy; 4436 } else if (Size == 128) { 4437 // Reducing from 128 bits is a permute of v2f64/v2i64. 4438 FixedVectorType *ShufTy; 4439 if (ValVTy->isFloatingPointTy()) 4440 ShufTy = 4441 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); 4442 else 4443 ShufTy = 4444 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); 4445 ReductionCost += 4446 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4447 } else if (Size == 64) { 4448 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4449 FixedVectorType *ShufTy; 4450 if (ValVTy->isFloatingPointTy()) 4451 ShufTy = 4452 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); 4453 else 4454 ShufTy = 4455 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); 4456 ReductionCost += 4457 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4458 } else { 4459 // Reducing from smaller size is a shift by immediate. 4460 auto *ShiftTy = FixedVectorType::get( 4461 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); 4462 ReductionCost += getArithmeticInstrCost( 4463 Instruction::LShr, ShiftTy, CostKind, 4464 TargetTransformInfo::OK_AnyValue, 4465 TargetTransformInfo::OK_UniformConstantValue, 4466 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4467 } 4468 4469 // Add the arithmetic op for this level. 4470 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); 4471 } 4472 4473 // Add the final extract element to the cost. 4474 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4475 } 4476 4477 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, 4478 bool IsUnsigned) { 4479 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 4480 4481 MVT MTy = LT.second; 4482 4483 int ISD; 4484 if (Ty->isIntOrIntVectorTy()) { 4485 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4486 } else { 4487 assert(Ty->isFPOrFPVectorTy() && 4488 "Expected float point or integer vector type."); 4489 ISD = ISD::FMINNUM; 4490 } 4491 4492 static const CostTblEntry SSE1CostTbl[] = { 4493 {ISD::FMINNUM, MVT::v4f32, 1}, 4494 }; 4495 4496 static const CostTblEntry SSE2CostTbl[] = { 4497 {ISD::FMINNUM, MVT::v2f64, 1}, 4498 {ISD::SMIN, MVT::v8i16, 1}, 4499 {ISD::UMIN, MVT::v16i8, 1}, 4500 }; 4501 4502 static const CostTblEntry SSE41CostTbl[] = { 4503 {ISD::SMIN, MVT::v4i32, 1}, 4504 {ISD::UMIN, MVT::v4i32, 1}, 4505 {ISD::UMIN, MVT::v8i16, 1}, 4506 {ISD::SMIN, MVT::v16i8, 1}, 4507 }; 4508 4509 static const CostTblEntry SSE42CostTbl[] = { 4510 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd 4511 }; 4512 4513 static const CostTblEntry AVX1CostTbl[] = { 4514 {ISD::FMINNUM, MVT::v8f32, 1}, 4515 {ISD::FMINNUM, MVT::v4f64, 1}, 4516 {ISD::SMIN, MVT::v8i32, 3}, 4517 {ISD::UMIN, MVT::v8i32, 3}, 4518 {ISD::SMIN, MVT::v16i16, 3}, 4519 {ISD::UMIN, MVT::v16i16, 3}, 4520 {ISD::SMIN, MVT::v32i8, 3}, 4521 {ISD::UMIN, MVT::v32i8, 3}, 4522 }; 4523 4524 static const CostTblEntry AVX2CostTbl[] = { 4525 {ISD::SMIN, MVT::v8i32, 1}, 4526 {ISD::UMIN, MVT::v8i32, 1}, 4527 {ISD::SMIN, MVT::v16i16, 1}, 4528 {ISD::UMIN, MVT::v16i16, 1}, 4529 {ISD::SMIN, MVT::v32i8, 1}, 4530 {ISD::UMIN, MVT::v32i8, 1}, 4531 }; 4532 4533 static const CostTblEntry AVX512CostTbl[] = { 4534 {ISD::FMINNUM, MVT::v16f32, 1}, 4535 {ISD::FMINNUM, MVT::v8f64, 1}, 4536 {ISD::SMIN, MVT::v2i64, 1}, 4537 {ISD::UMIN, MVT::v2i64, 1}, 4538 {ISD::SMIN, MVT::v4i64, 1}, 4539 {ISD::UMIN, MVT::v4i64, 1}, 4540 {ISD::SMIN, MVT::v8i64, 1}, 4541 {ISD::UMIN, MVT::v8i64, 1}, 4542 {ISD::SMIN, MVT::v16i32, 1}, 4543 {ISD::UMIN, MVT::v16i32, 1}, 4544 }; 4545 4546 static const CostTblEntry AVX512BWCostTbl[] = { 4547 {ISD::SMIN, MVT::v32i16, 1}, 4548 {ISD::UMIN, MVT::v32i16, 1}, 4549 {ISD::SMIN, MVT::v64i8, 1}, 4550 {ISD::UMIN, MVT::v64i8, 1}, 4551 }; 4552 4553 // If we have a native MIN/MAX instruction for this type, use it. 4554 if (ST->hasBWI()) 4555 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 4556 return LT.first * Entry->Cost; 4557 4558 if (ST->hasAVX512()) 4559 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 4560 return LT.first * Entry->Cost; 4561 4562 if (ST->hasAVX2()) 4563 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 4564 return LT.first * Entry->Cost; 4565 4566 if (ST->hasAVX()) 4567 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 4568 return LT.first * Entry->Cost; 4569 4570 if (ST->hasSSE42()) 4571 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 4572 return LT.first * Entry->Cost; 4573 4574 if (ST->hasSSE41()) 4575 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 4576 return LT.first * Entry->Cost; 4577 4578 if (ST->hasSSE2()) 4579 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 4580 return LT.first * Entry->Cost; 4581 4582 if (ST->hasSSE1()) 4583 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 4584 return LT.first * Entry->Cost; 4585 4586 unsigned CmpOpcode; 4587 if (Ty->isFPOrFPVectorTy()) { 4588 CmpOpcode = Instruction::FCmp; 4589 } else { 4590 assert(Ty->isIntOrIntVectorTy() && 4591 "expecting floating point or integer type for min/max reduction"); 4592 CmpOpcode = Instruction::ICmp; 4593 } 4594 4595 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4596 // Otherwise fall back to cmp+select. 4597 InstructionCost Result = 4598 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE, 4599 CostKind) + 4600 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, 4601 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4602 return Result; 4603 } 4604 4605 InstructionCost 4606 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, 4607 bool IsUnsigned, 4608 TTI::TargetCostKind CostKind) { 4609 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4610 4611 MVT MTy = LT.second; 4612 4613 int ISD; 4614 if (ValTy->isIntOrIntVectorTy()) { 4615 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4616 } else { 4617 assert(ValTy->isFPOrFPVectorTy() && 4618 "Expected float point or integer vector type."); 4619 ISD = ISD::FMINNUM; 4620 } 4621 4622 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 4623 // and make it as the cost. 4624 4625 static const CostTblEntry SSE2CostTblNoPairWise[] = { 4626 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw 4627 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw 4628 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw 4629 }; 4630 4631 static const CostTblEntry SSE41CostTblNoPairWise[] = { 4632 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 4633 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 4634 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 4635 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 4636 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor 4637 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax 4638 {ISD::SMIN, MVT::v2i8, 3}, // pminsb 4639 {ISD::SMIN, MVT::v4i8, 5}, // pminsb 4640 {ISD::SMIN, MVT::v8i8, 7}, // pminsb 4641 {ISD::SMIN, MVT::v16i8, 6}, 4642 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 4643 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 4644 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 4645 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax 4646 }; 4647 4648 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4649 {ISD::SMIN, MVT::v16i16, 6}, 4650 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax 4651 {ISD::SMIN, MVT::v32i8, 8}, 4652 {ISD::UMIN, MVT::v32i8, 8}, 4653 }; 4654 4655 static const CostTblEntry AVX512BWCostTblNoPairWise[] = { 4656 {ISD::SMIN, MVT::v32i16, 8}, 4657 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax 4658 {ISD::SMIN, MVT::v64i8, 10}, 4659 {ISD::UMIN, MVT::v64i8, 10}, 4660 }; 4661 4662 // Before legalizing the type, give a chance to look up illegal narrow types 4663 // in the table. 4664 // FIXME: Is there a better way to do this? 4665 EVT VT = TLI->getValueType(DL, ValTy); 4666 if (VT.isSimple()) { 4667 MVT MTy = VT.getSimpleVT(); 4668 if (ST->hasBWI()) 4669 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4670 return Entry->Cost; 4671 4672 if (ST->hasAVX()) 4673 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4674 return Entry->Cost; 4675 4676 if (ST->hasSSE41()) 4677 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4678 return Entry->Cost; 4679 4680 if (ST->hasSSE2()) 4681 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4682 return Entry->Cost; 4683 } 4684 4685 auto *ValVTy = cast<FixedVectorType>(ValTy); 4686 unsigned NumVecElts = ValVTy->getNumElements(); 4687 4688 auto *Ty = ValVTy; 4689 InstructionCost MinMaxCost = 0; 4690 if (LT.first != 1 && MTy.isVector() && 4691 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4692 // Type needs to be split. We need LT.first - 1 operations ops. 4693 Ty = FixedVectorType::get(ValVTy->getElementType(), 4694 MTy.getVectorNumElements()); 4695 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(), 4696 MTy.getVectorNumElements()); 4697 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4698 MinMaxCost *= LT.first - 1; 4699 NumVecElts = MTy.getVectorNumElements(); 4700 } 4701 4702 if (ST->hasBWI()) 4703 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4704 return MinMaxCost + Entry->Cost; 4705 4706 if (ST->hasAVX()) 4707 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4708 return MinMaxCost + Entry->Cost; 4709 4710 if (ST->hasSSE41()) 4711 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4712 return MinMaxCost + Entry->Cost; 4713 4714 if (ST->hasSSE2()) 4715 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4716 return MinMaxCost + Entry->Cost; 4717 4718 unsigned ScalarSize = ValTy->getScalarSizeInBits(); 4719 4720 // Special case power of 2 reductions where the scalar type isn't changed 4721 // by type legalization. 4722 if (!isPowerOf2_32(ValVTy->getNumElements()) || 4723 ScalarSize != MTy.getScalarSizeInBits()) 4724 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind); 4725 4726 // Now handle reduction with the legal type, taking into account size changes 4727 // at each level. 4728 while (NumVecElts > 1) { 4729 // Determine the size of the remaining vector we need to reduce. 4730 unsigned Size = NumVecElts * ScalarSize; 4731 NumVecElts /= 2; 4732 // If we're reducing from 256/512 bits, use an extract_subvector. 4733 if (Size > 128) { 4734 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4735 MinMaxCost += 4736 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4737 Ty = SubTy; 4738 } else if (Size == 128) { 4739 // Reducing from 128 bits is a permute of v2f64/v2i64. 4740 VectorType *ShufTy; 4741 if (ValTy->isFloatingPointTy()) 4742 ShufTy = 4743 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); 4744 else 4745 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); 4746 MinMaxCost += 4747 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4748 } else if (Size == 64) { 4749 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4750 FixedVectorType *ShufTy; 4751 if (ValTy->isFloatingPointTy()) 4752 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); 4753 else 4754 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); 4755 MinMaxCost += 4756 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4757 } else { 4758 // Reducing from smaller size is a shift by immediate. 4759 auto *ShiftTy = FixedVectorType::get( 4760 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); 4761 MinMaxCost += getArithmeticInstrCost( 4762 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, 4763 TargetTransformInfo::OK_AnyValue, 4764 TargetTransformInfo::OK_UniformConstantValue, 4765 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4766 } 4767 4768 // Add the arithmetic op for this level. 4769 auto *SubCondTy = 4770 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); 4771 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4772 } 4773 4774 // Add the final extract element to the cost. 4775 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4776 } 4777 4778 /// Calculate the cost of materializing a 64-bit value. This helper 4779 /// method might only calculate a fraction of a larger immediate. Therefore it 4780 /// is valid to return a cost of ZERO. 4781 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) { 4782 if (Val == 0) 4783 return TTI::TCC_Free; 4784 4785 if (isInt<32>(Val)) 4786 return TTI::TCC_Basic; 4787 4788 return 2 * TTI::TCC_Basic; 4789 } 4790 4791 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 4792 TTI::TargetCostKind CostKind) { 4793 assert(Ty->isIntegerTy()); 4794 4795 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4796 if (BitSize == 0) 4797 return ~0U; 4798 4799 // Never hoist constants larger than 128bit, because this might lead to 4800 // incorrect code generation or assertions in codegen. 4801 // Fixme: Create a cost model for types larger than i128 once the codegen 4802 // issues have been fixed. 4803 if (BitSize > 128) 4804 return TTI::TCC_Free; 4805 4806 if (Imm == 0) 4807 return TTI::TCC_Free; 4808 4809 // Sign-extend all constants to a multiple of 64-bit. 4810 APInt ImmVal = Imm; 4811 if (BitSize % 64 != 0) 4812 ImmVal = Imm.sext(alignTo(BitSize, 64)); 4813 4814 // Split the constant into 64-bit chunks and calculate the cost for each 4815 // chunk. 4816 InstructionCost Cost = 0; 4817 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 4818 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 4819 int64_t Val = Tmp.getSExtValue(); 4820 Cost += getIntImmCost(Val); 4821 } 4822 // We need at least one instruction to materialize the constant. 4823 return std::max<InstructionCost>(1, Cost); 4824 } 4825 4826 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 4827 const APInt &Imm, Type *Ty, 4828 TTI::TargetCostKind CostKind, 4829 Instruction *Inst) { 4830 assert(Ty->isIntegerTy()); 4831 4832 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4833 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4834 // here, so that constant hoisting will ignore this constant. 4835 if (BitSize == 0) 4836 return TTI::TCC_Free; 4837 4838 unsigned ImmIdx = ~0U; 4839 switch (Opcode) { 4840 default: 4841 return TTI::TCC_Free; 4842 case Instruction::GetElementPtr: 4843 // Always hoist the base address of a GetElementPtr. This prevents the 4844 // creation of new constants for every base constant that gets constant 4845 // folded with the offset. 4846 if (Idx == 0) 4847 return 2 * TTI::TCC_Basic; 4848 return TTI::TCC_Free; 4849 case Instruction::Store: 4850 ImmIdx = 0; 4851 break; 4852 case Instruction::ICmp: 4853 // This is an imperfect hack to prevent constant hoisting of 4854 // compares that might be trying to check if a 64-bit value fits in 4855 // 32-bits. The backend can optimize these cases using a right shift by 32. 4856 // Ideally we would check the compare predicate here. There also other 4857 // similar immediates the backend can use shifts for. 4858 if (Idx == 1 && Imm.getBitWidth() == 64) { 4859 uint64_t ImmVal = Imm.getZExtValue(); 4860 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 4861 return TTI::TCC_Free; 4862 } 4863 ImmIdx = 1; 4864 break; 4865 case Instruction::And: 4866 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 4867 // by using a 32-bit operation with implicit zero extension. Detect such 4868 // immediates here as the normal path expects bit 31 to be sign extended. 4869 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 4870 return TTI::TCC_Free; 4871 ImmIdx = 1; 4872 break; 4873 case Instruction::Add: 4874 case Instruction::Sub: 4875 // For add/sub, we can use the opposite instruction for INT32_MIN. 4876 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 4877 return TTI::TCC_Free; 4878 ImmIdx = 1; 4879 break; 4880 case Instruction::UDiv: 4881 case Instruction::SDiv: 4882 case Instruction::URem: 4883 case Instruction::SRem: 4884 // Division by constant is typically expanded later into a different 4885 // instruction sequence. This completely changes the constants. 4886 // Report them as "free" to stop ConstantHoist from marking them as opaque. 4887 return TTI::TCC_Free; 4888 case Instruction::Mul: 4889 case Instruction::Or: 4890 case Instruction::Xor: 4891 ImmIdx = 1; 4892 break; 4893 // Always return TCC_Free for the shift value of a shift instruction. 4894 case Instruction::Shl: 4895 case Instruction::LShr: 4896 case Instruction::AShr: 4897 if (Idx == 1) 4898 return TTI::TCC_Free; 4899 break; 4900 case Instruction::Trunc: 4901 case Instruction::ZExt: 4902 case Instruction::SExt: 4903 case Instruction::IntToPtr: 4904 case Instruction::PtrToInt: 4905 case Instruction::BitCast: 4906 case Instruction::PHI: 4907 case Instruction::Call: 4908 case Instruction::Select: 4909 case Instruction::Ret: 4910 case Instruction::Load: 4911 break; 4912 } 4913 4914 if (Idx == ImmIdx) { 4915 int NumConstants = divideCeil(BitSize, 64); 4916 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4917 return (Cost <= NumConstants * TTI::TCC_Basic) 4918 ? static_cast<int>(TTI::TCC_Free) 4919 : Cost; 4920 } 4921 4922 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4923 } 4924 4925 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 4926 const APInt &Imm, Type *Ty, 4927 TTI::TargetCostKind CostKind) { 4928 assert(Ty->isIntegerTy()); 4929 4930 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4931 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4932 // here, so that constant hoisting will ignore this constant. 4933 if (BitSize == 0) 4934 return TTI::TCC_Free; 4935 4936 switch (IID) { 4937 default: 4938 return TTI::TCC_Free; 4939 case Intrinsic::sadd_with_overflow: 4940 case Intrinsic::uadd_with_overflow: 4941 case Intrinsic::ssub_with_overflow: 4942 case Intrinsic::usub_with_overflow: 4943 case Intrinsic::smul_with_overflow: 4944 case Intrinsic::umul_with_overflow: 4945 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 4946 return TTI::TCC_Free; 4947 break; 4948 case Intrinsic::experimental_stackmap: 4949 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4950 return TTI::TCC_Free; 4951 break; 4952 case Intrinsic::experimental_patchpoint_void: 4953 case Intrinsic::experimental_patchpoint_i64: 4954 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4955 return TTI::TCC_Free; 4956 break; 4957 } 4958 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4959 } 4960 4961 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, 4962 TTI::TargetCostKind CostKind, 4963 const Instruction *I) { 4964 if (CostKind != TTI::TCK_RecipThroughput) 4965 return Opcode == Instruction::PHI ? 0 : 1; 4966 // Branches are assumed to be predicted. 4967 return 0; 4968 } 4969 4970 int X86TTIImpl::getGatherOverhead() const { 4971 // Some CPUs have more overhead for gather. The specified overhead is relative 4972 // to the Load operation. "2" is the number provided by Intel architects. This 4973 // parameter is used for cost estimation of Gather Op and comparison with 4974 // other alternatives. 4975 // TODO: Remove the explicit hasAVX512()?, That would mean we would only 4976 // enable gather with a -march. 4977 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather())) 4978 return 2; 4979 4980 return 1024; 4981 } 4982 4983 int X86TTIImpl::getScatterOverhead() const { 4984 if (ST->hasAVX512()) 4985 return 2; 4986 4987 return 1024; 4988 } 4989 4990 // Return an average cost of Gather / Scatter instruction, maybe improved later. 4991 // FIXME: Add TargetCostKind support. 4992 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, 4993 const Value *Ptr, Align Alignment, 4994 unsigned AddressSpace) { 4995 4996 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 4997 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4998 4999 // Try to reduce index size from 64 bit (default for GEP) 5000 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 5001 // operation will use 16 x 64 indices which do not fit in a zmm and needs 5002 // to split. Also check that the base pointer is the same for all lanes, 5003 // and that there's at most one variable index. 5004 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { 5005 unsigned IndexSize = DL.getPointerSizeInBits(); 5006 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 5007 if (IndexSize < 64 || !GEP) 5008 return IndexSize; 5009 5010 unsigned NumOfVarIndices = 0; 5011 const Value *Ptrs = GEP->getPointerOperand(); 5012 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 5013 return IndexSize; 5014 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 5015 if (isa<Constant>(GEP->getOperand(i))) 5016 continue; 5017 Type *IndxTy = GEP->getOperand(i)->getType(); 5018 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) 5019 IndxTy = IndexVTy->getElementType(); 5020 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 5021 !isa<SExtInst>(GEP->getOperand(i))) || 5022 ++NumOfVarIndices > 1) 5023 return IndexSize; // 64 5024 } 5025 return (unsigned)32; 5026 }; 5027 5028 // Trying to reduce IndexSize to 32 bits for vector 16. 5029 // By default the IndexSize is equal to pointer size. 5030 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 5031 ? getIndexSizeInBits(Ptr, DL) 5032 : DL.getPointerSizeInBits(); 5033 5034 auto *IndexVTy = FixedVectorType::get( 5035 IntegerType::get(SrcVTy->getContext(), IndexSize), VF); 5036 std::pair<InstructionCost, MVT> IdxsLT = 5037 TLI->getTypeLegalizationCost(DL, IndexVTy); 5038 std::pair<InstructionCost, MVT> SrcLT = 5039 TLI->getTypeLegalizationCost(DL, SrcVTy); 5040 InstructionCost::CostType SplitFactor = 5041 *std::max(IdxsLT.first, SrcLT.first).getValue(); 5042 if (SplitFactor > 1) { 5043 // Handle splitting of vector of pointers 5044 auto *SplitSrcTy = 5045 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 5046 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 5047 AddressSpace); 5048 } 5049 5050 // The gather / scatter cost is given by Intel architects. It is a rough 5051 // number since we are looking at one instruction in a time. 5052 const int GSOverhead = (Opcode == Instruction::Load) 5053 ? getGatherOverhead() 5054 : getScatterOverhead(); 5055 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 5056 MaybeAlign(Alignment), AddressSpace, 5057 TTI::TCK_RecipThroughput); 5058 } 5059 5060 /// Return the cost of full scalarization of gather / scatter operation. 5061 /// 5062 /// Opcode - Load or Store instruction. 5063 /// SrcVTy - The type of the data vector that should be gathered or scattered. 5064 /// VariableMask - The mask is non-constant at compile time. 5065 /// Alignment - Alignment for one element. 5066 /// AddressSpace - pointer[s] address space. 5067 /// 5068 /// FIXME: Add TargetCostKind support. 5069 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 5070 bool VariableMask, Align Alignment, 5071 unsigned AddressSpace) { 5072 Type *ScalarTy = SrcVTy->getScalarType(); 5073 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 5074 APInt DemandedElts = APInt::getAllOnes(VF); 5075 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 5076 5077 InstructionCost MaskUnpackCost = 0; 5078 if (VariableMask) { 5079 auto *MaskTy = 5080 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 5081 MaskUnpackCost = getScalarizationOverhead( 5082 MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true); 5083 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 5084 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, 5085 CmpInst::BAD_ICMP_PREDICATE, CostKind); 5086 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 5087 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 5088 } 5089 5090 InstructionCost AddressUnpackCost = getScalarizationOverhead( 5091 FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts, 5092 /*Insert=*/false, /*Extract=*/true); 5093 5094 // The cost of the scalar loads/stores. 5095 InstructionCost MemoryOpCost = 5096 VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment), 5097 AddressSpace, CostKind); 5098 5099 // The cost of forming the vector from loaded scalars/ 5100 // scalarizing the vector to perform scalar stores. 5101 InstructionCost InsertExtractCost = 5102 getScalarizationOverhead(cast<FixedVectorType>(SrcVTy), DemandedElts, 5103 /*Insert=*/Opcode == Instruction::Load, 5104 /*Extract=*/Opcode == Instruction::Store); 5105 5106 return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost; 5107 } 5108 5109 /// Calculate the cost of Gather / Scatter operation 5110 InstructionCost X86TTIImpl::getGatherScatterOpCost( 5111 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, 5112 Align Alignment, TTI::TargetCostKind CostKind, 5113 const Instruction *I = nullptr) { 5114 if (CostKind != TTI::TCK_RecipThroughput) { 5115 if ((Opcode == Instruction::Load && 5116 isLegalMaskedGather(SrcVTy, Align(Alignment)) && 5117 !forceScalarizeMaskedGather(cast<VectorType>(SrcVTy), 5118 Align(Alignment))) || 5119 (Opcode == Instruction::Store && 5120 isLegalMaskedScatter(SrcVTy, Align(Alignment)) && 5121 !forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy), 5122 Align(Alignment)))) 5123 return 1; 5124 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask, 5125 Alignment, CostKind, I); 5126 } 5127 5128 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 5129 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 5130 if (!PtrTy && Ptr->getType()->isVectorTy()) 5131 PtrTy = dyn_cast<PointerType>( 5132 cast<VectorType>(Ptr->getType())->getElementType()); 5133 assert(PtrTy && "Unexpected type for Ptr argument"); 5134 unsigned AddressSpace = PtrTy->getAddressSpace(); 5135 5136 if ((Opcode == Instruction::Load && 5137 (!isLegalMaskedGather(SrcVTy, Align(Alignment)) || 5138 forceScalarizeMaskedGather(cast<VectorType>(SrcVTy), 5139 Align(Alignment)))) || 5140 (Opcode == Instruction::Store && 5141 (!isLegalMaskedScatter(SrcVTy, Align(Alignment)) || 5142 forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy), 5143 Align(Alignment))))) 5144 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 5145 AddressSpace); 5146 5147 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 5148 } 5149 5150 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 5151 TargetTransformInfo::LSRCost &C2) { 5152 // X86 specific here are "instruction number 1st priority". 5153 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 5154 C1.NumIVMuls, C1.NumBaseAdds, 5155 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 5156 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 5157 C2.NumIVMuls, C2.NumBaseAdds, 5158 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 5159 } 5160 5161 bool X86TTIImpl::canMacroFuseCmp() { 5162 return ST->hasMacroFusion() || ST->hasBranchFusion(); 5163 } 5164 5165 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 5166 if (!ST->hasAVX()) 5167 return false; 5168 5169 // The backend can't handle a single element vector. 5170 if (isa<VectorType>(DataTy) && 5171 cast<FixedVectorType>(DataTy)->getNumElements() == 1) 5172 return false; 5173 Type *ScalarTy = DataTy->getScalarType(); 5174 5175 if (ScalarTy->isPointerTy()) 5176 return true; 5177 5178 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5179 return true; 5180 5181 if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16()) 5182 return true; 5183 5184 if (!ScalarTy->isIntegerTy()) 5185 return false; 5186 5187 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5188 return IntWidth == 32 || IntWidth == 64 || 5189 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); 5190 } 5191 5192 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { 5193 return isLegalMaskedLoad(DataType, Alignment); 5194 } 5195 5196 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { 5197 unsigned DataSize = DL.getTypeStoreSize(DataType); 5198 // The only supported nontemporal loads are for aligned vectors of 16 or 32 5199 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 5200 // (the equivalent stores only require AVX). 5201 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) 5202 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); 5203 5204 return false; 5205 } 5206 5207 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { 5208 unsigned DataSize = DL.getTypeStoreSize(DataType); 5209 5210 // SSE4A supports nontemporal stores of float and double at arbitrary 5211 // alignment. 5212 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) 5213 return true; 5214 5215 // Besides the SSE4A subtarget exception above, only aligned stores are 5216 // available nontemporaly on any other subtarget. And only stores with a size 5217 // of 4..32 bytes (powers of 2, only) are permitted. 5218 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || 5219 !isPowerOf2_32(DataSize)) 5220 return false; 5221 5222 // 32-byte vector nontemporal stores are supported by AVX (the equivalent 5223 // loads require AVX2). 5224 if (DataSize == 32) 5225 return ST->hasAVX(); 5226 if (DataSize == 16) 5227 return ST->hasSSE1(); 5228 return true; 5229 } 5230 5231 bool X86TTIImpl::isLegalBroadcastLoad(Type *ElementTy, 5232 ElementCount NumElements) const { 5233 // movddup 5234 return ST->hasSSE3() && !NumElements.isScalable() && 5235 NumElements.getFixedValue() == 2 && 5236 ElementTy == Type::getDoubleTy(ElementTy->getContext()); 5237 } 5238 5239 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { 5240 if (!isa<VectorType>(DataTy)) 5241 return false; 5242 5243 if (!ST->hasAVX512()) 5244 return false; 5245 5246 // The backend can't handle a single element vector. 5247 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1) 5248 return false; 5249 5250 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); 5251 5252 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5253 return true; 5254 5255 if (!ScalarTy->isIntegerTy()) 5256 return false; 5257 5258 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5259 return IntWidth == 32 || IntWidth == 64 || 5260 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); 5261 } 5262 5263 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { 5264 return isLegalMaskedExpandLoad(DataTy); 5265 } 5266 5267 bool X86TTIImpl::supportsGather() const { 5268 // Some CPUs have better gather performance than others. 5269 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 5270 // enable gather with a -march. 5271 return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()); 5272 } 5273 5274 bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) { 5275 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 5276 // Vector-4 of gather/scatter instruction does not exist on KNL. We can extend 5277 // it to 8 elements, but zeroing upper bits of the mask vector will add more 5278 // instructions. Right now we give the scalar cost of vector-4 for KNL. TODO: 5279 // Check, maybe the gather/scatter instruction is better in the VariableMask 5280 // case. 5281 unsigned NumElts = cast<FixedVectorType>(VTy)->getNumElements(); 5282 return NumElts == 1 || 5283 (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX()))); 5284 } 5285 5286 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { 5287 if (!supportsGather()) 5288 return false; 5289 Type *ScalarTy = DataTy->getScalarType(); 5290 if (ScalarTy->isPointerTy()) 5291 return true; 5292 5293 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5294 return true; 5295 5296 if (!ScalarTy->isIntegerTy()) 5297 return false; 5298 5299 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5300 return IntWidth == 32 || IntWidth == 64; 5301 } 5302 5303 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { 5304 // AVX2 doesn't support scatter 5305 if (!ST->hasAVX512()) 5306 return false; 5307 return isLegalMaskedGather(DataType, Alignment); 5308 } 5309 5310 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 5311 EVT VT = TLI->getValueType(DL, DataType); 5312 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 5313 } 5314 5315 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 5316 return false; 5317 } 5318 5319 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 5320 const Function *Callee) const { 5321 const TargetMachine &TM = getTLI()->getTargetMachine(); 5322 5323 // Work this as a subsetting of subtarget features. 5324 const FeatureBitset &CallerBits = 5325 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 5326 const FeatureBitset &CalleeBits = 5327 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 5328 5329 // Check whether features are the same (apart from the ignore list). 5330 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 5331 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 5332 if (RealCallerBits == RealCalleeBits) 5333 return true; 5334 5335 // If the features are a subset, we need to additionally check for calls 5336 // that may become ABI-incompatible as a result of inlining. 5337 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits) 5338 return false; 5339 5340 for (const Instruction &I : instructions(Callee)) { 5341 if (const auto *CB = dyn_cast<CallBase>(&I)) { 5342 SmallVector<Type *, 8> Types; 5343 for (Value *Arg : CB->args()) 5344 Types.push_back(Arg->getType()); 5345 if (!CB->getType()->isVoidTy()) 5346 Types.push_back(CB->getType()); 5347 5348 // Simple types are always ABI compatible. 5349 auto IsSimpleTy = [](Type *Ty) { 5350 return !Ty->isVectorTy() && !Ty->isAggregateType(); 5351 }; 5352 if (all_of(Types, IsSimpleTy)) 5353 continue; 5354 5355 if (Function *NestedCallee = CB->getCalledFunction()) { 5356 // Assume that intrinsics are always ABI compatible. 5357 if (NestedCallee->isIntrinsic()) 5358 continue; 5359 5360 // Do a precise compatibility check. 5361 if (!areTypesABICompatible(Caller, NestedCallee, Types)) 5362 return false; 5363 } else { 5364 // We don't know the target features of the callee, 5365 // assume it is incompatible. 5366 return false; 5367 } 5368 } 5369 } 5370 return true; 5371 } 5372 5373 bool X86TTIImpl::areTypesABICompatible(const Function *Caller, 5374 const Function *Callee, 5375 const ArrayRef<Type *> &Types) const { 5376 if (!BaseT::areTypesABICompatible(Caller, Callee, Types)) 5377 return false; 5378 5379 // If we get here, we know the target features match. If one function 5380 // considers 512-bit vectors legal and the other does not, consider them 5381 // incompatible. 5382 const TargetMachine &TM = getTLI()->getTargetMachine(); 5383 5384 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == 5385 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) 5386 return true; 5387 5388 // Consider the arguments compatible if they aren't vectors or aggregates. 5389 // FIXME: Look at the size of vectors. 5390 // FIXME: Look at the element types of aggregates to see if there are vectors. 5391 return llvm::none_of(Types, 5392 [](Type *T) { return T->isVectorTy() || T->isAggregateType(); }); 5393 } 5394 5395 X86TTIImpl::TTI::MemCmpExpansionOptions 5396 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 5397 TTI::MemCmpExpansionOptions Options; 5398 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 5399 Options.NumLoadsPerBlock = 2; 5400 // All GPR and vector loads can be unaligned. 5401 Options.AllowOverlappingLoads = true; 5402 if (IsZeroCmp) { 5403 // Only enable vector loads for equality comparison. Right now the vector 5404 // version is not as fast for three way compare (see #33329). 5405 const unsigned PreferredWidth = ST->getPreferVectorWidth(); 5406 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); 5407 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); 5408 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); 5409 } 5410 if (ST->is64Bit()) { 5411 Options.LoadSizes.push_back(8); 5412 } 5413 Options.LoadSizes.push_back(4); 5414 Options.LoadSizes.push_back(2); 5415 Options.LoadSizes.push_back(1); 5416 return Options; 5417 } 5418 5419 bool X86TTIImpl::prefersVectorizedAddressing() const { 5420 return supportsGather(); 5421 } 5422 5423 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const { 5424 return false; 5425 } 5426 5427 bool X86TTIImpl::enableInterleavedAccessVectorization() { 5428 // TODO: We expect this to be beneficial regardless of arch, 5429 // but there are currently some unexplained performance artifacts on Atom. 5430 // As a temporary solution, disable on Atom. 5431 return !(ST->isAtom()); 5432 } 5433 5434 // Get estimation for interleaved load/store operations and strided load. 5435 // \p Indices contains indices for strided load. 5436 // \p Factor - the factor of interleaving. 5437 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 5438 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( 5439 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 5440 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 5441 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 5442 // VecTy for interleave memop is <VF*Factor x Elt>. 5443 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5444 // VecTy = <12 x i32>. 5445 5446 // Calculate the number of memory operations (NumOfMemOps), required 5447 // for load/store the VecTy. 5448 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5449 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 5450 unsigned LegalVTSize = LegalVT.getStoreSize(); 5451 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 5452 5453 // Get the cost of one memory operation. 5454 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), 5455 LegalVT.getVectorNumElements()); 5456 InstructionCost MemOpCost; 5457 bool UseMaskedMemOp = UseMaskForCond || UseMaskForGaps; 5458 if (UseMaskedMemOp) 5459 MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment, 5460 AddressSpace, CostKind); 5461 else 5462 MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment), 5463 AddressSpace, CostKind); 5464 5465 unsigned VF = VecTy->getNumElements() / Factor; 5466 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 5467 5468 InstructionCost MaskCost; 5469 if (UseMaskedMemOp) { 5470 APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements()); 5471 for (unsigned Index : Indices) { 5472 assert(Index < Factor && "Invalid index for interleaved memory op"); 5473 for (unsigned Elm = 0; Elm < VF; Elm++) 5474 DemandedLoadStoreElts.setBit(Index + Elm * Factor); 5475 } 5476 5477 Type *I1Type = Type::getInt1Ty(VecTy->getContext()); 5478 5479 MaskCost = getReplicationShuffleCost( 5480 I1Type, Factor, VF, 5481 UseMaskForGaps ? DemandedLoadStoreElts 5482 : APInt::getAllOnes(VecTy->getNumElements()), 5483 CostKind); 5484 5485 // The Gaps mask is invariant and created outside the loop, therefore the 5486 // cost of creating it is not accounted for here. However if we have both 5487 // a MaskForGaps and some other mask that guards the execution of the 5488 // memory access, we need to account for the cost of And-ing the two masks 5489 // inside the loop. 5490 if (UseMaskForGaps) { 5491 auto *MaskVT = FixedVectorType::get(I1Type, VecTy->getNumElements()); 5492 MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind); 5493 } 5494 } 5495 5496 if (Opcode == Instruction::Load) { 5497 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 5498 // contain the cost of the optimized shuffle sequence that the 5499 // X86InterleavedAccess pass will generate. 5500 // The cost of loads and stores are computed separately from the table. 5501 5502 // X86InterleavedAccess support only the following interleaved-access group. 5503 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 5504 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 5505 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 5506 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 5507 }; 5508 5509 if (const auto *Entry = 5510 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 5511 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost; 5512 //If an entry does not exist, fallback to the default implementation. 5513 5514 // Kind of shuffle depends on number of loaded values. 5515 // If we load the entire data in one register, we can use a 1-src shuffle. 5516 // Otherwise, we'll merge 2 sources in each operation. 5517 TTI::ShuffleKind ShuffleKind = 5518 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 5519 5520 InstructionCost ShuffleCost = 5521 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr); 5522 5523 unsigned NumOfLoadsInInterleaveGrp = 5524 Indices.size() ? Indices.size() : Factor; 5525 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(), 5526 VecTy->getNumElements() / Factor); 5527 InstructionCost NumOfResults = 5528 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 5529 NumOfLoadsInInterleaveGrp; 5530 5531 // About a half of the loads may be folded in shuffles when we have only 5532 // one result. If we have more than one result, or the loads are masked, 5533 // we do not fold loads at all. 5534 unsigned NumOfUnfoldedLoads = 5535 UseMaskedMemOp || NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 5536 5537 // Get a number of shuffle operations per result. 5538 unsigned NumOfShufflesPerResult = 5539 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 5540 5541 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5542 // When we have more than one destination, we need additional instructions 5543 // to keep sources. 5544 InstructionCost NumOfMoves = 0; 5545 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 5546 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 5547 5548 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 5549 MaskCost + NumOfUnfoldedLoads * MemOpCost + 5550 NumOfMoves; 5551 5552 return Cost; 5553 } 5554 5555 // Store. 5556 assert(Opcode == Instruction::Store && 5557 "Expected Store Instruction at this point"); 5558 // X86InterleavedAccess support only the following interleaved-access group. 5559 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 5560 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 5561 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 5562 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 5563 5564 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 5565 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 5566 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 5567 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 5568 }; 5569 5570 if (const auto *Entry = 5571 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 5572 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost; 5573 //If an entry does not exist, fallback to the default implementation. 5574 5575 // There is no strided stores meanwhile. And store can't be folded in 5576 // shuffle. 5577 unsigned NumOfSources = Factor; // The number of values to be merged. 5578 InstructionCost ShuffleCost = 5579 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr); 5580 unsigned NumOfShufflesPerStore = NumOfSources - 1; 5581 5582 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5583 // We need additional instructions to keep sources. 5584 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 5585 InstructionCost Cost = 5586 MaskCost + 5587 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 5588 NumOfMoves; 5589 return Cost; 5590 } 5591 5592 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost( 5593 unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices, 5594 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 5595 bool UseMaskForCond, bool UseMaskForGaps) { 5596 auto *VecTy = cast<FixedVectorType>(BaseTy); 5597 5598 auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) { 5599 Type *EltTy = cast<VectorType>(VecTy)->getElementType(); 5600 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 5601 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 5602 return true; 5603 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) || 5604 (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy())) 5605 return HasBW; 5606 return false; 5607 }; 5608 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 5609 return getInterleavedMemoryOpCostAVX512( 5610 Opcode, VecTy, Factor, Indices, Alignment, 5611 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 5612 5613 if (UseMaskForCond || UseMaskForGaps) 5614 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5615 Alignment, AddressSpace, CostKind, 5616 UseMaskForCond, UseMaskForGaps); 5617 5618 // Get estimation for interleaved load/store operations for SSE-AVX2. 5619 // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow 5620 // computing the cost using a generic formula as a function of generic 5621 // shuffles. We therefore use a lookup table instead, filled according to 5622 // the instruction sequences that codegen currently generates. 5623 5624 // VecTy for interleave memop is <VF*Factor x Elt>. 5625 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5626 // VecTy = <12 x i32>. 5627 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5628 5629 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 5630 // the VF=2, while v2i128 is an unsupported MVT vector type 5631 // (see MachineValueType.h::getVectorVT()). 5632 if (!LegalVT.isVector()) 5633 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5634 Alignment, AddressSpace, CostKind); 5635 5636 unsigned VF = VecTy->getNumElements() / Factor; 5637 Type *ScalarTy = VecTy->getElementType(); 5638 // Deduplicate entries, model floats/pointers as appropriately-sized integers. 5639 if (!ScalarTy->isIntegerTy()) 5640 ScalarTy = 5641 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy)); 5642 5643 // Get the cost of all the memory operations. 5644 // FIXME: discount dead loads. 5645 InstructionCost MemOpCosts = getMemoryOpCost( 5646 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind); 5647 5648 auto *VT = FixedVectorType::get(ScalarTy, VF); 5649 EVT ETy = TLI->getValueType(DL, VT); 5650 if (!ETy.isSimple()) 5651 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5652 Alignment, AddressSpace, CostKind); 5653 5654 // TODO: Complete for other data-types and strides. 5655 // Each combination of Stride, element bit width and VF results in a different 5656 // sequence; The cost tables are therefore accessed with: 5657 // Factor (stride) and VectorType=VFxiN. 5658 // The Cost accounts only for the shuffle sequence; 5659 // The cost of the loads/stores is accounted for separately. 5660 // 5661 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 5662 {2, MVT::v2i8, 2}, // (load 4i8 and) deinterleave into 2 x 2i8 5663 {2, MVT::v4i8, 2}, // (load 8i8 and) deinterleave into 2 x 4i8 5664 {2, MVT::v8i8, 2}, // (load 16i8 and) deinterleave into 2 x 8i8 5665 {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8 5666 {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8 5667 5668 {2, MVT::v8i16, 6}, // (load 16i16 and) deinterleave into 2 x 8i16 5669 {2, MVT::v16i16, 9}, // (load 32i16 and) deinterleave into 2 x 16i16 5670 {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16 5671 5672 {2, MVT::v8i32, 4}, // (load 16i32 and) deinterleave into 2 x 8i32 5673 {2, MVT::v16i32, 8}, // (load 32i32 and) deinterleave into 2 x 16i32 5674 {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32 5675 5676 {2, MVT::v4i64, 4}, // (load 8i64 and) deinterleave into 2 x 4i64 5677 {2, MVT::v8i64, 8}, // (load 16i64 and) deinterleave into 2 x 8i64 5678 {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64 5679 {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64 5680 5681 {3, MVT::v2i8, 3}, // (load 6i8 and) deinterleave into 3 x 2i8 5682 {3, MVT::v4i8, 3}, // (load 12i8 and) deinterleave into 3 x 4i8 5683 {3, MVT::v8i8, 6}, // (load 24i8 and) deinterleave into 3 x 8i8 5684 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8 5685 {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8 5686 5687 {3, MVT::v2i16, 5}, // (load 6i16 and) deinterleave into 3 x 2i16 5688 {3, MVT::v4i16, 7}, // (load 12i16 and) deinterleave into 3 x 4i16 5689 {3, MVT::v8i16, 9}, // (load 24i16 and) deinterleave into 3 x 8i16 5690 {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16 5691 {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16 5692 5693 {3, MVT::v2i32, 3}, // (load 6i32 and) deinterleave into 3 x 2i32 5694 {3, MVT::v4i32, 3}, // (load 12i32 and) deinterleave into 3 x 4i32 5695 {3, MVT::v8i32, 7}, // (load 24i32 and) deinterleave into 3 x 8i32 5696 {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32 5697 {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32 5698 5699 {3, MVT::v2i64, 1}, // (load 6i64 and) deinterleave into 3 x 2i64 5700 {3, MVT::v4i64, 5}, // (load 12i64 and) deinterleave into 3 x 4i64 5701 {3, MVT::v8i64, 10}, // (load 24i64 and) deinterleave into 3 x 8i64 5702 {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64 5703 5704 {4, MVT::v2i8, 4}, // (load 8i8 and) deinterleave into 4 x 2i8 5705 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8 5706 {4, MVT::v8i8, 12}, // (load 32i8 and) deinterleave into 4 x 8i8 5707 {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8 5708 {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8 5709 5710 {4, MVT::v2i16, 6}, // (load 8i16 and) deinterleave into 4 x 2i16 5711 {4, MVT::v4i16, 17}, // (load 16i16 and) deinterleave into 4 x 4i16 5712 {4, MVT::v8i16, 33}, // (load 32i16 and) deinterleave into 4 x 8i16 5713 {4, MVT::v16i16, 75}, // (load 64i16 and) deinterleave into 4 x 16i16 5714 {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16 5715 5716 {4, MVT::v2i32, 4}, // (load 8i32 and) deinterleave into 4 x 2i32 5717 {4, MVT::v4i32, 8}, // (load 16i32 and) deinterleave into 4 x 4i32 5718 {4, MVT::v8i32, 16}, // (load 32i32 and) deinterleave into 4 x 8i32 5719 {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32 5720 {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32 5721 5722 {4, MVT::v2i64, 6}, // (load 8i64 and) deinterleave into 4 x 2i64 5723 {4, MVT::v4i64, 8}, // (load 16i64 and) deinterleave into 4 x 4i64 5724 {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64 5725 {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64 5726 5727 {6, MVT::v2i8, 6}, // (load 12i8 and) deinterleave into 6 x 2i8 5728 {6, MVT::v4i8, 14}, // (load 24i8 and) deinterleave into 6 x 4i8 5729 {6, MVT::v8i8, 18}, // (load 48i8 and) deinterleave into 6 x 8i8 5730 {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8 5731 {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8 5732 5733 {6, MVT::v2i16, 13}, // (load 12i16 and) deinterleave into 6 x 2i16 5734 {6, MVT::v4i16, 9}, // (load 24i16 and) deinterleave into 6 x 4i16 5735 {6, MVT::v8i16, 39}, // (load 48i16 and) deinterleave into 6 x 8i16 5736 {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16 5737 {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16 5738 5739 {6, MVT::v2i32, 6}, // (load 12i32 and) deinterleave into 6 x 2i32 5740 {6, MVT::v4i32, 15}, // (load 24i32 and) deinterleave into 6 x 4i32 5741 {6, MVT::v8i32, 31}, // (load 48i32 and) deinterleave into 6 x 8i32 5742 {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32 5743 5744 {6, MVT::v2i64, 6}, // (load 12i64 and) deinterleave into 6 x 2i64 5745 {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64 5746 {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64 5747 5748 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32 5749 }; 5750 5751 static const CostTblEntry SSSE3InterleavedLoadTbl[] = { 5752 {2, MVT::v4i16, 2}, // (load 8i16 and) deinterleave into 2 x 4i16 5753 }; 5754 5755 static const CostTblEntry SSE2InterleavedLoadTbl[] = { 5756 {2, MVT::v2i16, 2}, // (load 4i16 and) deinterleave into 2 x 2i16 5757 {2, MVT::v4i16, 7}, // (load 8i16 and) deinterleave into 2 x 4i16 5758 5759 {2, MVT::v2i32, 2}, // (load 4i32 and) deinterleave into 2 x 2i32 5760 {2, MVT::v4i32, 2}, // (load 8i32 and) deinterleave into 2 x 4i32 5761 5762 {2, MVT::v2i64, 2}, // (load 4i64 and) deinterleave into 2 x 2i64 5763 }; 5764 5765 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 5766 {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store) 5767 {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store) 5768 5769 {2, MVT::v8i16, 3}, // interleave 2 x 8i16 into 16i16 (and store) 5770 {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store) 5771 {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store) 5772 5773 {2, MVT::v4i32, 2}, // interleave 2 x 4i32 into 8i32 (and store) 5774 {2, MVT::v8i32, 4}, // interleave 2 x 8i32 into 16i32 (and store) 5775 {2, MVT::v16i32, 8}, // interleave 2 x 16i32 into 32i32 (and store) 5776 {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store) 5777 5778 {2, MVT::v2i64, 2}, // interleave 2 x 2i64 into 4i64 (and store) 5779 {2, MVT::v4i64, 4}, // interleave 2 x 4i64 into 8i64 (and store) 5780 {2, MVT::v8i64, 8}, // interleave 2 x 8i64 into 16i64 (and store) 5781 {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store) 5782 {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store) 5783 5784 {3, MVT::v2i8, 4}, // interleave 3 x 2i8 into 6i8 (and store) 5785 {3, MVT::v4i8, 4}, // interleave 3 x 4i8 into 12i8 (and store) 5786 {3, MVT::v8i8, 6}, // interleave 3 x 8i8 into 24i8 (and store) 5787 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store) 5788 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store) 5789 5790 {3, MVT::v2i16, 4}, // interleave 3 x 2i16 into 6i16 (and store) 5791 {3, MVT::v4i16, 6}, // interleave 3 x 4i16 into 12i16 (and store) 5792 {3, MVT::v8i16, 12}, // interleave 3 x 8i16 into 24i16 (and store) 5793 {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store) 5794 {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store) 5795 5796 {3, MVT::v2i32, 4}, // interleave 3 x 2i32 into 6i32 (and store) 5797 {3, MVT::v4i32, 5}, // interleave 3 x 4i32 into 12i32 (and store) 5798 {3, MVT::v8i32, 11}, // interleave 3 x 8i32 into 24i32 (and store) 5799 {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store) 5800 {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store) 5801 5802 {3, MVT::v2i64, 4}, // interleave 3 x 2i64 into 6i64 (and store) 5803 {3, MVT::v4i64, 6}, // interleave 3 x 4i64 into 12i64 (and store) 5804 {3, MVT::v8i64, 12}, // interleave 3 x 8i64 into 24i64 (and store) 5805 {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store) 5806 5807 {4, MVT::v2i8, 4}, // interleave 4 x 2i8 into 8i8 (and store) 5808 {4, MVT::v4i8, 4}, // interleave 4 x 4i8 into 16i8 (and store) 5809 {4, MVT::v8i8, 4}, // interleave 4 x 8i8 into 32i8 (and store) 5810 {4, MVT::v16i8, 8}, // interleave 4 x 16i8 into 64i8 (and store) 5811 {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store) 5812 5813 {4, MVT::v2i16, 2}, // interleave 4 x 2i16 into 8i16 (and store) 5814 {4, MVT::v4i16, 6}, // interleave 4 x 4i16 into 16i16 (and store) 5815 {4, MVT::v8i16, 10}, // interleave 4 x 8i16 into 32i16 (and store) 5816 {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store) 5817 {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store) 5818 5819 {4, MVT::v2i32, 5}, // interleave 4 x 2i32 into 8i32 (and store) 5820 {4, MVT::v4i32, 6}, // interleave 4 x 4i32 into 16i32 (and store) 5821 {4, MVT::v8i32, 16}, // interleave 4 x 8i32 into 32i32 (and store) 5822 {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store) 5823 {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store) 5824 5825 {4, MVT::v2i64, 6}, // interleave 4 x 2i64 into 8i64 (and store) 5826 {4, MVT::v4i64, 8}, // interleave 4 x 4i64 into 16i64 (and store) 5827 {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store) 5828 {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store) 5829 5830 {6, MVT::v2i8, 7}, // interleave 6 x 2i8 into 12i8 (and store) 5831 {6, MVT::v4i8, 9}, // interleave 6 x 4i8 into 24i8 (and store) 5832 {6, MVT::v8i8, 16}, // interleave 6 x 8i8 into 48i8 (and store) 5833 {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store) 5834 {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store) 5835 5836 {6, MVT::v2i16, 10}, // interleave 6 x 2i16 into 12i16 (and store) 5837 {6, MVT::v4i16, 15}, // interleave 6 x 4i16 into 24i16 (and store) 5838 {6, MVT::v8i16, 21}, // interleave 6 x 8i16 into 48i16 (and store) 5839 {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store) 5840 {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store) 5841 5842 {6, MVT::v2i32, 9}, // interleave 6 x 2i32 into 12i32 (and store) 5843 {6, MVT::v4i32, 12}, // interleave 6 x 4i32 into 24i32 (and store) 5844 {6, MVT::v8i32, 33}, // interleave 6 x 8i32 into 48i32 (and store) 5845 {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store) 5846 5847 {6, MVT::v2i64, 8}, // interleave 6 x 2i64 into 12i64 (and store) 5848 {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store) 5849 {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store) 5850 }; 5851 5852 static const CostTblEntry SSE2InterleavedStoreTbl[] = { 5853 {2, MVT::v2i8, 1}, // interleave 2 x 2i8 into 4i8 (and store) 5854 {2, MVT::v4i8, 1}, // interleave 2 x 4i8 into 8i8 (and store) 5855 {2, MVT::v8i8, 1}, // interleave 2 x 8i8 into 16i8 (and store) 5856 5857 {2, MVT::v2i16, 1}, // interleave 2 x 2i16 into 4i16 (and store) 5858 {2, MVT::v4i16, 1}, // interleave 2 x 4i16 into 8i16 (and store) 5859 5860 {2, MVT::v2i32, 1}, // interleave 2 x 2i32 into 4i32 (and store) 5861 }; 5862 5863 if (Opcode == Instruction::Load) { 5864 auto GetDiscountedCost = [Factor, NumMembers = Indices.size(), 5865 MemOpCosts](const CostTblEntry *Entry) { 5866 // NOTE: this is just an approximation! 5867 // It can over/under -estimate the cost! 5868 return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor); 5869 }; 5870 5871 if (ST->hasAVX2()) 5872 if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor, 5873 ETy.getSimpleVT())) 5874 return GetDiscountedCost(Entry); 5875 5876 if (ST->hasSSSE3()) 5877 if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor, 5878 ETy.getSimpleVT())) 5879 return GetDiscountedCost(Entry); 5880 5881 if (ST->hasSSE2()) 5882 if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor, 5883 ETy.getSimpleVT())) 5884 return GetDiscountedCost(Entry); 5885 } else { 5886 assert(Opcode == Instruction::Store && 5887 "Expected Store Instruction at this point"); 5888 assert((!Indices.size() || Indices.size() == Factor) && 5889 "Interleaved store only supports fully-interleaved groups."); 5890 if (ST->hasAVX2()) 5891 if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor, 5892 ETy.getSimpleVT())) 5893 return MemOpCosts + Entry->Cost; 5894 5895 if (ST->hasSSE2()) 5896 if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor, 5897 ETy.getSimpleVT())) 5898 return MemOpCosts + Entry->Cost; 5899 } 5900 5901 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5902 Alignment, AddressSpace, CostKind, 5903 UseMaskForCond, UseMaskForGaps); 5904 } 5905