1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements a TargetTransformInfo analysis pass specific to the 10 /// X86 target machine. It uses the target's detailed information to provide 11 /// more precise answers to certain TTI queries, while letting the target 12 /// independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 /// About Cost Model numbers used below it's necessary to say the following: 16 /// the numbers correspond to some "generic" X86 CPU instead of usage of 17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 19 /// the lookups below the cost is based on Nehalem as that was the first CPU 20 /// to support that feature level and thus has most likely the worst case cost. 21 /// Some examples of other technologies/CPUs: 22 /// SSE 3 - Pentium4 / Athlon64 23 /// SSE 4.1 - Penryn 24 /// SSE 4.2 - Nehalem 25 /// AVX - Sandy Bridge 26 /// AVX2 - Haswell 27 /// AVX-512 - Xeon Phi / Skylake 28 /// And some examples of instruction target dependent costs (latency) 29 /// divss sqrtss rsqrtss 30 /// AMD K7 11-16 19 3 31 /// Piledriver 9-24 13-15 5 32 /// Jaguar 14 16 2 33 /// Pentium II,III 18 30 2 34 /// Nehalem 7-14 7-18 3 35 /// Haswell 10-13 11 5 36 /// TODO: Develop and implement the target dependent cost model and 37 /// specialize cost numbers for different Cost Model Targets such as throughput, 38 /// code size, latency and uop count. 39 //===----------------------------------------------------------------------===// 40 41 #include "X86TargetTransformInfo.h" 42 #include "llvm/Analysis/TargetTransformInfo.h" 43 #include "llvm/CodeGen/BasicTTIImpl.h" 44 #include "llvm/CodeGen/CostTable.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/IR/InstIterator.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/Support/Debug.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 70 TargetTransformInfo::CacheLevel Level) const { 71 switch (Level) { 72 case TargetTransformInfo::CacheLevel::L1D: 73 // - Penryn 74 // - Nehalem 75 // - Westmere 76 // - Sandy Bridge 77 // - Ivy Bridge 78 // - Haswell 79 // - Broadwell 80 // - Skylake 81 // - Kabylake 82 return 32 * 1024; // 32 KByte 83 case TargetTransformInfo::CacheLevel::L2D: 84 // - Penryn 85 // - Nehalem 86 // - Westmere 87 // - Sandy Bridge 88 // - Ivy Bridge 89 // - Haswell 90 // - Broadwell 91 // - Skylake 92 // - Kabylake 93 return 256 * 1024; // 256 KByte 94 } 95 96 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 97 } 98 99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 100 TargetTransformInfo::CacheLevel Level) const { 101 // - Penryn 102 // - Nehalem 103 // - Westmere 104 // - Sandy Bridge 105 // - Ivy Bridge 106 // - Haswell 107 // - Broadwell 108 // - Skylake 109 // - Kabylake 110 switch (Level) { 111 case TargetTransformInfo::CacheLevel::L1D: 112 LLVM_FALLTHROUGH; 113 case TargetTransformInfo::CacheLevel::L2D: 114 return 8; 115 } 116 117 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 118 } 119 120 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { 121 bool Vector = (ClassID == 1); 122 if (Vector && !ST->hasSSE1()) 123 return 0; 124 125 if (ST->is64Bit()) { 126 if (Vector && ST->hasAVX512()) 127 return 32; 128 return 16; 129 } 130 return 8; 131 } 132 133 TypeSize 134 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 135 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 136 switch (K) { 137 case TargetTransformInfo::RGK_Scalar: 138 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32); 139 case TargetTransformInfo::RGK_FixedWidthVector: 140 if (ST->hasAVX512() && PreferVectorWidth >= 512) 141 return TypeSize::getFixed(512); 142 if (ST->hasAVX() && PreferVectorWidth >= 256) 143 return TypeSize::getFixed(256); 144 if (ST->hasSSE1() && PreferVectorWidth >= 128) 145 return TypeSize::getFixed(128); 146 return TypeSize::getFixed(0); 147 case TargetTransformInfo::RGK_ScalableVector: 148 return TypeSize::getScalable(0); 149 } 150 151 llvm_unreachable("Unsupported register kind"); 152 } 153 154 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 155 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 156 .getFixedSize(); 157 } 158 159 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 160 // If the loop will not be vectorized, don't interleave the loop. 161 // Let regular unroll to unroll the loop, which saves the overflow 162 // check and memory check cost. 163 if (VF == 1) 164 return 1; 165 166 if (ST->isAtom()) 167 return 1; 168 169 // Sandybridge and Haswell have multiple execution ports and pipelined 170 // vector units. 171 if (ST->hasAVX()) 172 return 4; 173 174 return 2; 175 } 176 177 InstructionCost X86TTIImpl::getArithmeticInstrCost( 178 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 179 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 180 TTI::OperandValueProperties Opd1PropInfo, 181 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 182 const Instruction *CxtI) { 183 // TODO: Handle more cost kinds. 184 if (CostKind != TTI::TCK_RecipThroughput) 185 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 186 Op2Info, Opd1PropInfo, 187 Opd2PropInfo, Args, CxtI); 188 189 // vXi8 multiplications are always promoted to vXi16. 190 if (Opcode == Instruction::Mul && Ty->isVectorTy() && 191 Ty->getScalarSizeInBits() == 8) { 192 Type *WideVecTy = 193 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty)); 194 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty, 195 TargetTransformInfo::CastContextHint::None, 196 CostKind) + 197 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy, 198 TargetTransformInfo::CastContextHint::None, 199 CostKind) + 200 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info, 201 Opd1PropInfo, Opd2PropInfo); 202 } 203 204 // Legalize the type. 205 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 206 207 int ISD = TLI->InstructionOpcodeToISD(Opcode); 208 assert(ISD && "Invalid opcode"); 209 210 if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() && 211 LT.second.getScalarType() == MVT::i32) { 212 // Check if the operands can be represented as a smaller datatype. 213 bool Op1Signed = false, Op2Signed = false; 214 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 215 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 216 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 217 218 // If both are representable as i15 and at least one is constant, 219 // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we 220 // can treat this as PMADDWD which has the same costs as a vXi16 multiply. 221 if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) { 222 bool Op1Constant = 223 isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]); 224 bool Op2Constant = 225 isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]); 226 bool Op1Sext = isa<SExtInst>(Args[0]) && 227 (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41())); 228 bool Op2Sext = isa<SExtInst>(Args[1]) && 229 (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41())); 230 231 bool IsZeroExtended = !Op1Signed || !Op2Signed; 232 bool IsConstant = Op1Constant || Op2Constant; 233 bool IsSext = Op1Sext || Op2Sext; 234 if (IsConstant || IsZeroExtended || IsSext) 235 LT.second = 236 MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements()); 237 } 238 } 239 240 // Vector multiply by pow2 will be simplified to shifts. 241 if (ISD == ISD::MUL && 242 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 243 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 244 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) 245 return getArithmeticInstrCost(Instruction::Shl, Ty, CostKind, Op1Info, 246 Op2Info, TargetTransformInfo::OP_None, 247 TargetTransformInfo::OP_None); 248 249 // On X86, vector signed division by constants power-of-two are 250 // normally expanded to the sequence SRA + SRL + ADD + SRA. 251 // The OperandValue properties may not be the same as that of the previous 252 // operation; conservatively assume OP_None. 253 if ((ISD == ISD::SDIV || ISD == ISD::SREM) && 254 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 255 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 256 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 257 InstructionCost Cost = 258 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, 259 Op2Info, TargetTransformInfo::OP_None, 260 TargetTransformInfo::OP_None); 261 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 262 Op2Info, TargetTransformInfo::OP_None, 263 TargetTransformInfo::OP_None); 264 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, 265 Op2Info, TargetTransformInfo::OP_None, 266 TargetTransformInfo::OP_None); 267 268 if (ISD == ISD::SREM) { 269 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 270 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, 271 Op2Info); 272 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, 273 Op2Info); 274 } 275 276 return Cost; 277 } 278 279 // Vector unsigned division/remainder will be simplified to shifts/masks. 280 if ((ISD == ISD::UDIV || ISD == ISD::UREM) && 281 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 282 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 283 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 284 if (ISD == ISD::UDIV) 285 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 286 Op2Info, TargetTransformInfo::OP_None, 287 TargetTransformInfo::OP_None); 288 // UREM 289 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info, 290 Op2Info, TargetTransformInfo::OP_None, 291 TargetTransformInfo::OP_None); 292 } 293 294 static const CostTblEntry GLMCostTable[] = { 295 { ISD::FDIV, MVT::f32, 18 }, // divss 296 { ISD::FDIV, MVT::v4f32, 35 }, // divps 297 { ISD::FDIV, MVT::f64, 33 }, // divsd 298 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 299 }; 300 301 if (ST->useGLMDivSqrtCosts()) 302 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 303 LT.second)) 304 return LT.first * Entry->Cost; 305 306 static const CostTblEntry SLMCostTable[] = { 307 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 308 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 309 { ISD::FMUL, MVT::f64, 2 }, // mulsd 310 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 311 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 312 { ISD::FDIV, MVT::f32, 17 }, // divss 313 { ISD::FDIV, MVT::v4f32, 39 }, // divps 314 { ISD::FDIV, MVT::f64, 32 }, // divsd 315 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 316 { ISD::FADD, MVT::v2f64, 2 }, // addpd 317 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 318 // v2i64/v4i64 mul is custom lowered as a series of long: 319 // multiplies(3), shifts(3) and adds(2) 320 // slm muldq version throughput is 2 and addq throughput 4 321 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 322 // 3X4 (addq throughput) = 17 323 { ISD::MUL, MVT::v2i64, 17 }, 324 // slm addq\subq throughput is 4 325 { ISD::ADD, MVT::v2i64, 4 }, 326 { ISD::SUB, MVT::v2i64, 4 }, 327 }; 328 329 if (ST->useSLMArithCosts()) { 330 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 331 // Check if the operands can be shrinked into a smaller datatype. 332 // TODO: Merge this into generiic vXi32 MUL patterns above. 333 bool Op1Signed = false; 334 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 335 bool Op2Signed = false; 336 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 337 338 bool SignedMode = Op1Signed || Op2Signed; 339 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 340 341 if (OpMinSize <= 7) 342 return LT.first * 3; // pmullw/sext 343 if (!SignedMode && OpMinSize <= 8) 344 return LT.first * 3; // pmullw/zext 345 if (OpMinSize <= 15) 346 return LT.first * 5; // pmullw/pmulhw/pshuf 347 if (!SignedMode && OpMinSize <= 16) 348 return LT.first * 5; // pmullw/pmulhw/pshuf 349 } 350 351 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 352 LT.second)) { 353 return LT.first * Entry->Cost; 354 } 355 } 356 357 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 358 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 359 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 360 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 361 }; 362 363 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 364 ST->hasBWI()) { 365 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 366 LT.second)) 367 return LT.first * Entry->Cost; 368 } 369 370 static const CostTblEntry AVX512UniformConstCostTable[] = { 371 { ISD::SRA, MVT::v2i64, 1 }, 372 { ISD::SRA, MVT::v4i64, 1 }, 373 { ISD::SRA, MVT::v8i64, 1 }, 374 375 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. 376 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. 377 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. 378 379 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence 380 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence 381 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence 382 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence 383 }; 384 385 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 386 ST->hasAVX512()) { 387 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 388 LT.second)) 389 return LT.first * Entry->Cost; 390 } 391 392 static const CostTblEntry AVX2UniformConstCostTable[] = { 393 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 394 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 395 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 396 397 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 398 399 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence 400 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence 401 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence 402 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence 403 }; 404 405 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 406 ST->hasAVX2()) { 407 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 408 LT.second)) 409 return LT.first * Entry->Cost; 410 } 411 412 static const CostTblEntry SSE2UniformConstCostTable[] = { 413 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 414 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 415 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 416 417 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 418 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 419 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 420 421 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split. 422 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split. 423 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence 424 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence 425 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split. 426 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split. 427 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence 428 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence 429 }; 430 431 // XOP has faster vXi8 shifts. 432 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 433 ST->hasSSE2() && !ST->hasXOP()) { 434 if (const auto *Entry = 435 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 436 return LT.first * Entry->Cost; 437 } 438 439 static const CostTblEntry AVX512BWConstCostTable[] = { 440 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 441 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 442 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 443 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 444 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 445 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 446 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 447 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 448 }; 449 450 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 451 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 452 ST->hasBWI()) { 453 if (const auto *Entry = 454 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 455 return LT.first * Entry->Cost; 456 } 457 458 static const CostTblEntry AVX512ConstCostTable[] = { 459 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 460 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 461 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 462 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 463 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 464 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 465 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 466 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 467 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence 468 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence 469 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence 470 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence 471 }; 472 473 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 474 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 475 ST->hasAVX512()) { 476 if (const auto *Entry = 477 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 478 return LT.first * Entry->Cost; 479 } 480 481 static const CostTblEntry AVX2ConstCostTable[] = { 482 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 483 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 484 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 485 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 486 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 487 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 488 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 489 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 490 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 491 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 492 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 493 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 494 }; 495 496 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 497 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 498 ST->hasAVX2()) { 499 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 500 return LT.first * Entry->Cost; 501 } 502 503 static const CostTblEntry SSE2ConstCostTable[] = { 504 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 505 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 506 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 507 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 508 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 509 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 510 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 511 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 512 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 513 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 514 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 515 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 516 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 517 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 518 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 519 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 520 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 521 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 522 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 523 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 524 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 525 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 526 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 527 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 528 }; 529 530 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 531 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 532 ST->hasSSE2()) { 533 // pmuldq sequence. 534 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 535 return LT.first * 32; 536 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 537 return LT.first * 38; 538 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 539 return LT.first * 15; 540 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 541 return LT.first * 20; 542 543 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 544 return LT.first * Entry->Cost; 545 } 546 547 static const CostTblEntry AVX512BWShiftCostTable[] = { 548 { ISD::SHL, MVT::v16i8, 4 }, // extend/vpsllvw/pack sequence. 549 { ISD::SRL, MVT::v16i8, 4 }, // extend/vpsrlvw/pack sequence. 550 { ISD::SRA, MVT::v16i8, 4 }, // extend/vpsravw/pack sequence. 551 { ISD::SHL, MVT::v32i8, 4 }, // extend/vpsllvw/pack sequence. 552 { ISD::SRL, MVT::v32i8, 4 }, // extend/vpsrlvw/pack sequence. 553 { ISD::SRA, MVT::v32i8, 6 }, // extend/vpsravw/pack sequence. 554 { ISD::SHL, MVT::v64i8, 6 }, // extend/vpsllvw/pack sequence. 555 { ISD::SRL, MVT::v64i8, 7 }, // extend/vpsrlvw/pack sequence. 556 { ISD::SRA, MVT::v64i8, 15 }, // extend/vpsravw/pack sequence. 557 558 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 559 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 560 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 561 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 562 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 563 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 564 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 565 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 566 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 567 }; 568 569 if (ST->hasBWI()) 570 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) 571 return LT.first * Entry->Cost; 572 573 static const CostTblEntry AVX2UniformCostTable[] = { 574 // Uniform splats are cheaper for the following instructions. 575 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 576 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 577 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 578 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. 579 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. 580 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. 581 582 { ISD::SHL, MVT::v8i32, 1 }, // pslld 583 { ISD::SRL, MVT::v8i32, 1 }, // psrld 584 { ISD::SRA, MVT::v8i32, 1 }, // psrad 585 { ISD::SHL, MVT::v4i64, 1 }, // psllq 586 { ISD::SRL, MVT::v4i64, 1 }, // psrlq 587 }; 588 589 if (ST->hasAVX2() && 590 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 591 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 592 if (const auto *Entry = 593 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 594 return LT.first * Entry->Cost; 595 } 596 597 static const CostTblEntry SSE2UniformCostTable[] = { 598 // Uniform splats are cheaper for the following instructions. 599 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 600 { ISD::SHL, MVT::v4i32, 1 }, // pslld 601 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 602 603 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 604 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 605 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 606 607 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 608 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 609 }; 610 611 if (ST->hasSSE2() && 612 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 613 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 614 if (const auto *Entry = 615 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 616 return LT.first * Entry->Cost; 617 } 618 619 static const CostTblEntry AVX512DQCostTable[] = { 620 { ISD::MUL, MVT::v2i64, 2 }, // pmullq 621 { ISD::MUL, MVT::v4i64, 2 }, // pmullq 622 { ISD::MUL, MVT::v8i64, 2 } // pmullq 623 }; 624 625 // Look for AVX512DQ lowering tricks for custom cases. 626 if (ST->hasDQI()) 627 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 628 return LT.first * Entry->Cost; 629 630 static const CostTblEntry AVX512BWCostTable[] = { 631 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 632 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 633 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 634 }; 635 636 // Look for AVX512BW lowering tricks for custom cases. 637 if (ST->hasBWI()) 638 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 639 return LT.first * Entry->Cost; 640 641 static const CostTblEntry AVX512CostTable[] = { 642 { ISD::SHL, MVT::v4i32, 1 }, 643 { ISD::SRL, MVT::v4i32, 1 }, 644 { ISD::SRA, MVT::v4i32, 1 }, 645 { ISD::SHL, MVT::v8i32, 1 }, 646 { ISD::SRL, MVT::v8i32, 1 }, 647 { ISD::SRA, MVT::v8i32, 1 }, 648 { ISD::SHL, MVT::v16i32, 1 }, 649 { ISD::SRL, MVT::v16i32, 1 }, 650 { ISD::SRA, MVT::v16i32, 1 }, 651 652 { ISD::SHL, MVT::v2i64, 1 }, 653 { ISD::SRL, MVT::v2i64, 1 }, 654 { ISD::SHL, MVT::v4i64, 1 }, 655 { ISD::SRL, MVT::v4i64, 1 }, 656 { ISD::SHL, MVT::v8i64, 1 }, 657 { ISD::SRL, MVT::v8i64, 1 }, 658 659 { ISD::SRA, MVT::v2i64, 1 }, 660 { ISD::SRA, MVT::v4i64, 1 }, 661 { ISD::SRA, MVT::v8i64, 1 }, 662 663 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 664 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 665 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 666 { ISD::MUL, MVT::v8i64, 6 }, // 3*pmuludq/3*shift/2*add 667 { ISD::MUL, MVT::i64, 1 }, // Skylake from http://www.agner.org/ 668 669 { ISD::FNEG, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 670 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 671 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 672 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 673 { ISD::FDIV, MVT::f64, 4 }, // Skylake from http://www.agner.org/ 674 { ISD::FDIV, MVT::v2f64, 4 }, // Skylake from http://www.agner.org/ 675 { ISD::FDIV, MVT::v4f64, 8 }, // Skylake from http://www.agner.org/ 676 { ISD::FDIV, MVT::v8f64, 16 }, // Skylake from http://www.agner.org/ 677 678 { ISD::FNEG, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 679 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 680 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 681 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 682 { ISD::FDIV, MVT::f32, 3 }, // Skylake from http://www.agner.org/ 683 { ISD::FDIV, MVT::v4f32, 3 }, // Skylake from http://www.agner.org/ 684 { ISD::FDIV, MVT::v8f32, 5 }, // Skylake from http://www.agner.org/ 685 { ISD::FDIV, MVT::v16f32, 10 }, // Skylake from http://www.agner.org/ 686 }; 687 688 if (ST->hasAVX512()) 689 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 690 return LT.first * Entry->Cost; 691 692 static const CostTblEntry AVX2ShiftCostTable[] = { 693 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to 694 // customize them to detect the cases where shift amount is a scalar one. 695 { ISD::SHL, MVT::v4i32, 2 }, // vpsllvd (Haswell from agner.org) 696 { ISD::SRL, MVT::v4i32, 2 }, // vpsrlvd (Haswell from agner.org) 697 { ISD::SRA, MVT::v4i32, 2 }, // vpsravd (Haswell from agner.org) 698 { ISD::SHL, MVT::v8i32, 2 }, // vpsllvd (Haswell from agner.org) 699 { ISD::SRL, MVT::v8i32, 2 }, // vpsrlvd (Haswell from agner.org) 700 { ISD::SRA, MVT::v8i32, 2 }, // vpsravd (Haswell from agner.org) 701 { ISD::SHL, MVT::v2i64, 1 }, // vpsllvq (Haswell from agner.org) 702 { ISD::SRL, MVT::v2i64, 1 }, // vpsrlvq (Haswell from agner.org) 703 { ISD::SHL, MVT::v4i64, 1 }, // vpsllvq (Haswell from agner.org) 704 { ISD::SRL, MVT::v4i64, 1 }, // vpsrlvq (Haswell from agner.org) 705 }; 706 707 if (ST->hasAVX512()) { 708 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && 709 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 710 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 711 // On AVX512, a packed v32i16 shift left by a constant build_vector 712 // is lowered into a vector multiply (vpmullw). 713 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 714 Op1Info, Op2Info, 715 TargetTransformInfo::OP_None, 716 TargetTransformInfo::OP_None); 717 } 718 719 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts). 720 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) { 721 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 722 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 723 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 724 // On AVX2, a packed v16i16 shift left by a constant build_vector 725 // is lowered into a vector multiply (vpmullw). 726 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 727 Op1Info, Op2Info, 728 TargetTransformInfo::OP_None, 729 TargetTransformInfo::OP_None); 730 731 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 732 return LT.first * Entry->Cost; 733 } 734 735 static const CostTblEntry XOPShiftCostTable[] = { 736 // 128bit shifts take 1cy, but right shifts require negation beforehand. 737 { ISD::SHL, MVT::v16i8, 1 }, 738 { ISD::SRL, MVT::v16i8, 2 }, 739 { ISD::SRA, MVT::v16i8, 2 }, 740 { ISD::SHL, MVT::v8i16, 1 }, 741 { ISD::SRL, MVT::v8i16, 2 }, 742 { ISD::SRA, MVT::v8i16, 2 }, 743 { ISD::SHL, MVT::v4i32, 1 }, 744 { ISD::SRL, MVT::v4i32, 2 }, 745 { ISD::SRA, MVT::v4i32, 2 }, 746 { ISD::SHL, MVT::v2i64, 1 }, 747 { ISD::SRL, MVT::v2i64, 2 }, 748 { ISD::SRA, MVT::v2i64, 2 }, 749 // 256bit shifts require splitting if AVX2 didn't catch them above. 750 { ISD::SHL, MVT::v32i8, 2+2 }, 751 { ISD::SRL, MVT::v32i8, 4+2 }, 752 { ISD::SRA, MVT::v32i8, 4+2 }, 753 { ISD::SHL, MVT::v16i16, 2+2 }, 754 { ISD::SRL, MVT::v16i16, 4+2 }, 755 { ISD::SRA, MVT::v16i16, 4+2 }, 756 { ISD::SHL, MVT::v8i32, 2+2 }, 757 { ISD::SRL, MVT::v8i32, 4+2 }, 758 { ISD::SRA, MVT::v8i32, 4+2 }, 759 { ISD::SHL, MVT::v4i64, 2+2 }, 760 { ISD::SRL, MVT::v4i64, 4+2 }, 761 { ISD::SRA, MVT::v4i64, 4+2 }, 762 }; 763 764 // Look for XOP lowering tricks. 765 if (ST->hasXOP()) { 766 // If the right shift is constant then we'll fold the negation so 767 // it's as cheap as a left shift. 768 int ShiftISD = ISD; 769 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && 770 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 771 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 772 ShiftISD = ISD::SHL; 773 if (const auto *Entry = 774 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) 775 return LT.first * Entry->Cost; 776 } 777 778 static const CostTblEntry SSE2UniformShiftCostTable[] = { 779 // Uniform splats are cheaper for the following instructions. 780 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 781 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 782 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 783 784 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 785 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 786 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 787 788 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 789 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 790 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 791 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 792 }; 793 794 if (ST->hasSSE2() && 795 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 796 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 797 798 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 799 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 800 return LT.first * 4; // 2*psrad + shuffle. 801 802 if (const auto *Entry = 803 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 804 return LT.first * Entry->Cost; 805 } 806 807 if (ISD == ISD::SHL && 808 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 809 MVT VT = LT.second; 810 // Vector shift left by non uniform constant can be lowered 811 // into vector multiply. 812 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 813 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 814 ISD = ISD::MUL; 815 } 816 817 static const CostTblEntry AVX2CostTable[] = { 818 { ISD::SHL, MVT::v16i8, 6 }, // vpblendvb sequence. 819 { ISD::SHL, MVT::v32i8, 6 }, // vpblendvb sequence. 820 { ISD::SHL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 821 { ISD::SHL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 822 { ISD::SHL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 823 { ISD::SHL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 824 825 { ISD::SRL, MVT::v16i8, 6 }, // vpblendvb sequence. 826 { ISD::SRL, MVT::v32i8, 6 }, // vpblendvb sequence. 827 { ISD::SRL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 828 { ISD::SRL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 829 { ISD::SRL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 830 { ISD::SRL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 831 832 { ISD::SRA, MVT::v16i8, 17 }, // vpblendvb sequence. 833 { ISD::SRA, MVT::v32i8, 17 }, // vpblendvb sequence. 834 { ISD::SRA, MVT::v64i8, 34 }, // 2*vpblendvb sequence. 835 { ISD::SRA, MVT::v8i16, 5 }, // extend/vpsravd/pack sequence. 836 { ISD::SRA, MVT::v16i16, 7 }, // extend/vpsravd/pack sequence. 837 { ISD::SRA, MVT::v32i16, 14 }, // 2*extend/vpsravd/pack sequence. 838 { ISD::SRA, MVT::v2i64, 2 }, // srl/xor/sub sequence. 839 { ISD::SRA, MVT::v4i64, 2 }, // srl/xor/sub sequence. 840 841 { ISD::SUB, MVT::v32i8, 1 }, // psubb 842 { ISD::ADD, MVT::v32i8, 1 }, // paddb 843 { ISD::SUB, MVT::v16i16, 1 }, // psubw 844 { ISD::ADD, MVT::v16i16, 1 }, // paddw 845 { ISD::SUB, MVT::v8i32, 1 }, // psubd 846 { ISD::ADD, MVT::v8i32, 1 }, // paddd 847 { ISD::SUB, MVT::v4i64, 1 }, // psubq 848 { ISD::ADD, MVT::v4i64, 1 }, // paddq 849 850 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 851 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 852 { ISD::MUL, MVT::v4i64, 6 }, // 3*pmuludq/3*shift/2*add 853 854 { ISD::FNEG, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 855 { ISD::FNEG, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 856 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 857 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 858 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 859 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 860 { ISD::FMUL, MVT::f64, 1 }, // Haswell from http://www.agner.org/ 861 { ISD::FMUL, MVT::v2f64, 1 }, // Haswell from http://www.agner.org/ 862 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 863 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 864 865 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 866 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 867 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 868 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 869 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 870 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 871 }; 872 873 // Look for AVX2 lowering tricks for custom cases. 874 if (ST->hasAVX2()) 875 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 876 return LT.first * Entry->Cost; 877 878 static const CostTblEntry AVX1CostTable[] = { 879 // We don't have to scalarize unsupported ops. We can issue two half-sized 880 // operations and we only need to extract the upper YMM half. 881 // Two ops + 1 extract + 1 insert = 4. 882 { ISD::MUL, MVT::v16i16, 4 }, 883 { ISD::MUL, MVT::v8i32, 5 }, // BTVER2 from http://www.agner.org/ 884 { ISD::MUL, MVT::v4i64, 12 }, 885 886 { ISD::SUB, MVT::v32i8, 4 }, 887 { ISD::ADD, MVT::v32i8, 4 }, 888 { ISD::SUB, MVT::v16i16, 4 }, 889 { ISD::ADD, MVT::v16i16, 4 }, 890 { ISD::SUB, MVT::v8i32, 4 }, 891 { ISD::ADD, MVT::v8i32, 4 }, 892 { ISD::SUB, MVT::v4i64, 4 }, 893 { ISD::ADD, MVT::v4i64, 4 }, 894 895 { ISD::SHL, MVT::v32i8, 22 }, // pblendvb sequence + split. 896 { ISD::SHL, MVT::v8i16, 6 }, // pblendvb sequence. 897 { ISD::SHL, MVT::v16i16, 13 }, // pblendvb sequence + split. 898 { ISD::SHL, MVT::v4i32, 3 }, // pslld/paddd/cvttps2dq/pmulld 899 { ISD::SHL, MVT::v8i32, 9 }, // pslld/paddd/cvttps2dq/pmulld + split 900 { ISD::SHL, MVT::v2i64, 2 }, // Shift each lane + blend. 901 { ISD::SHL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 902 903 { ISD::SRL, MVT::v32i8, 23 }, // pblendvb sequence + split. 904 { ISD::SRL, MVT::v16i16, 28 }, // pblendvb sequence + split. 905 { ISD::SRL, MVT::v4i32, 6 }, // Shift each lane + blend. 906 { ISD::SRL, MVT::v8i32, 14 }, // Shift each lane + blend + split. 907 { ISD::SRL, MVT::v2i64, 2 }, // Shift each lane + blend. 908 { ISD::SRL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 909 910 { ISD::SRA, MVT::v32i8, 44 }, // pblendvb sequence + split. 911 { ISD::SRA, MVT::v16i16, 28 }, // pblendvb sequence + split. 912 { ISD::SRA, MVT::v4i32, 6 }, // Shift each lane + blend. 913 { ISD::SRA, MVT::v8i32, 14 }, // Shift each lane + blend + split. 914 { ISD::SRA, MVT::v2i64, 5 }, // Shift each lane + blend. 915 { ISD::SRA, MVT::v4i64, 12 }, // Shift each lane + blend + split. 916 917 { ISD::FNEG, MVT::v4f64, 2 }, // BTVER2 from http://www.agner.org/ 918 { ISD::FNEG, MVT::v8f32, 2 }, // BTVER2 from http://www.agner.org/ 919 920 { ISD::FMUL, MVT::f64, 2 }, // BTVER2 from http://www.agner.org/ 921 { ISD::FMUL, MVT::v2f64, 2 }, // BTVER2 from http://www.agner.org/ 922 { ISD::FMUL, MVT::v4f64, 4 }, // BTVER2 from http://www.agner.org/ 923 924 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 925 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 926 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 927 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 928 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 929 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 930 }; 931 932 if (ST->hasAVX()) 933 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 934 return LT.first * Entry->Cost; 935 936 static const CostTblEntry SSE42CostTable[] = { 937 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 938 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 939 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 940 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 941 942 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 943 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 944 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 945 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 946 947 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 948 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 949 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 950 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 951 952 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 953 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 954 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 955 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 956 957 { ISD::MUL, MVT::v2i64, 6 } // 3*pmuludq/3*shift/2*add 958 }; 959 960 if (ST->hasSSE42()) 961 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 962 return LT.first * Entry->Cost; 963 964 static const CostTblEntry SSE41CostTable[] = { 965 { ISD::SHL, MVT::v16i8, 10 }, // pblendvb sequence. 966 { ISD::SHL, MVT::v8i16, 11 }, // pblendvb sequence. 967 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 968 969 { ISD::SRL, MVT::v16i8, 11 }, // pblendvb sequence. 970 { ISD::SRL, MVT::v8i16, 13 }, // pblendvb sequence. 971 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 972 973 { ISD::SRA, MVT::v16i8, 21 }, // pblendvb sequence. 974 { ISD::SRA, MVT::v8i16, 13 }, // pblendvb sequence. 975 976 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 977 }; 978 979 if (ST->hasSSE41()) 980 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 981 return LT.first * Entry->Cost; 982 983 static const CostTblEntry SSE2CostTable[] = { 984 // We don't correctly identify costs of casts because they are marked as 985 // custom. 986 { ISD::SHL, MVT::v16i8, 13 }, // cmpgtb sequence. 987 { ISD::SHL, MVT::v8i16, 25 }, // cmpgtw sequence. 988 { ISD::SHL, MVT::v4i32, 16 }, // pslld/paddd/cvttps2dq/pmuludq. 989 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 990 991 { ISD::SRL, MVT::v16i8, 14 }, // cmpgtb sequence. 992 { ISD::SRL, MVT::v8i16, 16 }, // cmpgtw sequence. 993 { ISD::SRL, MVT::v4i32, 12 }, // Shift each lane + blend. 994 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 995 996 { ISD::SRA, MVT::v16i8, 27 }, // unpacked cmpgtb sequence. 997 { ISD::SRA, MVT::v8i16, 16 }, // cmpgtw sequence. 998 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 999 { ISD::SRA, MVT::v2i64, 8 }, // srl/xor/sub splat+shuffle sequence. 1000 1001 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 1002 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 1003 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 1004 1005 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 1006 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 1007 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 1008 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 1009 1010 { ISD::FNEG, MVT::f32, 1 }, // Pentium IV from http://www.agner.org/ 1011 { ISD::FNEG, MVT::f64, 1 }, // Pentium IV from http://www.agner.org/ 1012 { ISD::FNEG, MVT::v4f32, 1 }, // Pentium IV from http://www.agner.org/ 1013 { ISD::FNEG, MVT::v2f64, 1 }, // Pentium IV from http://www.agner.org/ 1014 1015 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 1016 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 1017 1018 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 1019 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 1020 }; 1021 1022 if (ST->hasSSE2()) 1023 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 1024 return LT.first * Entry->Cost; 1025 1026 static const CostTblEntry SSE1CostTable[] = { 1027 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 1028 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 1029 1030 { ISD::FNEG, MVT::f32, 2 }, // Pentium III from http://www.agner.org/ 1031 { ISD::FNEG, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1032 1033 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 1034 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1035 1036 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 1037 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1038 }; 1039 1040 if (ST->hasSSE1()) 1041 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 1042 return LT.first * Entry->Cost; 1043 1044 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1045 { ISD::ADD, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1046 { ISD::SUB, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1047 { ISD::MUL, MVT::i64, 2 }, // Nehalem from http://www.agner.org/ 1048 }; 1049 1050 if (ST->is64Bit()) 1051 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second)) 1052 return LT.first * Entry->Cost; 1053 1054 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1055 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1056 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1057 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1058 1059 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1060 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1061 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1062 }; 1063 1064 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second)) 1065 return LT.first * Entry->Cost; 1066 1067 // It is not a good idea to vectorize division. We have to scalarize it and 1068 // in the process we will often end up having to spilling regular 1069 // registers. The overhead of division is going to dominate most kernels 1070 // anyways so try hard to prevent vectorization of division - it is 1071 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 1072 // to hide "20 cycles" for each lane. 1073 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 1074 ISD == ISD::UDIV || ISD == ISD::UREM)) { 1075 InstructionCost ScalarCost = getArithmeticInstrCost( 1076 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, 1077 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1078 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 1079 } 1080 1081 // Fallback to the default implementation. 1082 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); 1083 } 1084 1085 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1086 VectorType *BaseTp, 1087 ArrayRef<int> Mask, int Index, 1088 VectorType *SubTp) { 1089 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 1090 // 64-bit packed integer vectors (v2i32) are widened to type v4i32. 1091 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); 1092 1093 Kind = improveShuffleKindFromMask(Kind, Mask); 1094 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 1095 if (Kind == TTI::SK_Transpose) 1096 Kind = TTI::SK_PermuteTwoSrc; 1097 1098 // For Broadcasts we are splatting the first element from the first input 1099 // register, so only need to reference that input and all the output 1100 // registers are the same. 1101 if (Kind == TTI::SK_Broadcast) 1102 LT.first = 1; 1103 1104 // Subvector extractions are free if they start at the beginning of a 1105 // vector and cheap if the subvectors are aligned. 1106 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { 1107 int NumElts = LT.second.getVectorNumElements(); 1108 if ((Index % NumElts) == 0) 1109 return 0; 1110 std::pair<InstructionCost, MVT> SubLT = 1111 TLI->getTypeLegalizationCost(DL, SubTp); 1112 if (SubLT.second.isVector()) { 1113 int NumSubElts = SubLT.second.getVectorNumElements(); 1114 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1115 return SubLT.first; 1116 // Handle some cases for widening legalization. For now we only handle 1117 // cases where the original subvector was naturally aligned and evenly 1118 // fit in its legalized subvector type. 1119 // FIXME: Remove some of the alignment restrictions. 1120 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit 1121 // vectors. 1122 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements(); 1123 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && 1124 (NumSubElts % OrigSubElts) == 0 && 1125 LT.second.getVectorElementType() == 1126 SubLT.second.getVectorElementType() && 1127 LT.second.getVectorElementType().getSizeInBits() == 1128 BaseTp->getElementType()->getPrimitiveSizeInBits()) { 1129 assert(NumElts >= NumSubElts && NumElts > OrigSubElts && 1130 "Unexpected number of elements!"); 1131 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(), 1132 LT.second.getVectorNumElements()); 1133 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), 1134 SubLT.second.getVectorNumElements()); 1135 int ExtractIndex = alignDown((Index % NumElts), NumSubElts); 1136 InstructionCost ExtractCost = getShuffleCost( 1137 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy); 1138 1139 // If the original size is 32-bits or more, we can use pshufd. Otherwise 1140 // if we have SSSE3 we can use pshufb. 1141 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) 1142 return ExtractCost + 1; // pshufd or pshufb 1143 1144 assert(SubTp->getPrimitiveSizeInBits() == 16 && 1145 "Unexpected vector size"); 1146 1147 return ExtractCost + 2; // worst case pshufhw + pshufd 1148 } 1149 } 1150 } 1151 1152 // Subvector insertions are cheap if the subvectors are aligned. 1153 // Note that in general, the insertion starting at the beginning of a vector 1154 // isn't free, because we need to preserve the rest of the wide vector. 1155 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) { 1156 int NumElts = LT.second.getVectorNumElements(); 1157 std::pair<InstructionCost, MVT> SubLT = 1158 TLI->getTypeLegalizationCost(DL, SubTp); 1159 if (SubLT.second.isVector()) { 1160 int NumSubElts = SubLT.second.getVectorNumElements(); 1161 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1162 return SubLT.first; 1163 } 1164 1165 // If the insertion isn't aligned, treat it like a 2-op shuffle. 1166 Kind = TTI::SK_PermuteTwoSrc; 1167 } 1168 1169 // Handle some common (illegal) sub-vector types as they are often very cheap 1170 // to shuffle even on targets without PSHUFB. 1171 EVT VT = TLI->getValueType(DL, BaseTp); 1172 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && 1173 !ST->hasSSSE3()) { 1174 static const CostTblEntry SSE2SubVectorShuffleTbl[] = { 1175 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw 1176 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw 1177 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw 1178 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw 1179 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck 1180 1181 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw 1182 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw 1183 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus 1184 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck 1185 1186 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw 1187 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw 1188 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw 1189 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw 1190 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck 1191 1192 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw 1193 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw 1194 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw 1195 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw 1196 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck 1197 }; 1198 1199 if (ST->hasSSE2()) 1200 if (const auto *Entry = 1201 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) 1202 return Entry->Cost; 1203 } 1204 1205 // We are going to permute multiple sources and the result will be in multiple 1206 // destinations. Providing an accurate cost only for splits where the element 1207 // type remains the same. 1208 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 1209 MVT LegalVT = LT.second; 1210 if (LegalVT.isVector() && 1211 LegalVT.getVectorElementType().getSizeInBits() == 1212 BaseTp->getElementType()->getPrimitiveSizeInBits() && 1213 LegalVT.getVectorNumElements() < 1214 cast<FixedVectorType>(BaseTp)->getNumElements()) { 1215 1216 unsigned VecTySize = DL.getTypeStoreSize(BaseTp); 1217 unsigned LegalVTSize = LegalVT.getStoreSize(); 1218 // Number of source vectors after legalization: 1219 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 1220 // Number of destination vectors after legalization: 1221 InstructionCost NumOfDests = LT.first; 1222 1223 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(), 1224 LegalVT.getVectorNumElements()); 1225 1226 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 1227 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 1228 None, 0, nullptr); 1229 } 1230 1231 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1232 } 1233 1234 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 1235 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 1236 // We assume that source and destination have the same vector type. 1237 InstructionCost NumOfDests = LT.first; 1238 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1; 1239 LT.first = NumOfDests * NumOfShufflesPerDest; 1240 } 1241 1242 static const CostTblEntry AVX512FP16ShuffleTbl[] = { 1243 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw 1244 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw 1245 {TTI::SK_Broadcast, MVT::v8f16, 1}, // vpbroadcastw 1246 1247 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw 1248 {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw 1249 {TTI::SK_Reverse, MVT::v8f16, 1}, // vpshufb 1250 1251 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw 1252 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw 1253 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // vpshufb 1254 1255 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w 1256 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w 1257 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2} // vpermt2w 1258 }; 1259 1260 if (!ST->useSoftFloat() && ST->hasFP16()) 1261 if (const auto *Entry = 1262 CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second)) 1263 return LT.first * Entry->Cost; 1264 1265 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 1266 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb 1267 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb 1268 1269 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb 1270 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb 1271 1272 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b 1273 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b 1274 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b 1275 }; 1276 1277 if (ST->hasVBMI()) 1278 if (const auto *Entry = 1279 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 1280 return LT.first * Entry->Cost; 1281 1282 static const CostTblEntry AVX512BWShuffleTbl[] = { 1283 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1284 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1285 1286 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw 1287 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw 1288 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 1289 1290 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw 1291 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw 1292 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 1293 1294 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w 1295 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w 1296 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w 1297 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 1298 1299 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw 1300 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb 1301 }; 1302 1303 if (ST->hasBWI()) 1304 if (const auto *Entry = 1305 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 1306 return LT.first * Entry->Cost; 1307 1308 static const CostTblEntry AVX512ShuffleTbl[] = { 1309 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd 1310 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps 1311 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq 1312 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd 1313 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1314 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1315 1316 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd 1317 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps 1318 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq 1319 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd 1320 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca 1321 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca 1322 1323 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd 1324 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1325 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd 1326 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps 1327 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1328 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps 1329 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq 1330 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1331 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq 1332 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd 1333 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1334 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd 1335 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1336 1337 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd 1338 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps 1339 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q 1340 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d 1341 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd 1342 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps 1343 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q 1344 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d 1345 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd 1346 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps 1347 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q 1348 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d 1349 1350 // FIXME: This just applies the type legalization cost rules above 1351 // assuming these completely split. 1352 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, 1353 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, 1354 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, 1355 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, 1356 1357 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq 1358 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq 1359 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd 1360 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps 1361 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq 1362 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd 1363 }; 1364 1365 if (ST->hasAVX512()) 1366 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 1367 return LT.first * Entry->Cost; 1368 1369 static const CostTblEntry AVX2ShuffleTbl[] = { 1370 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd 1371 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps 1372 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq 1373 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd 1374 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw 1375 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb 1376 1377 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd 1378 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps 1379 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq 1380 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd 1381 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb 1382 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb 1383 1384 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb 1385 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb 1386 1387 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1388 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1389 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1390 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1391 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb 1392 // + vpblendvb 1393 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb 1394 // + vpblendvb 1395 1396 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd 1397 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps 1398 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd 1399 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd 1400 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb 1401 // + vpblendvb 1402 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb 1403 // + vpblendvb 1404 }; 1405 1406 if (ST->hasAVX2()) 1407 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1408 return LT.first * Entry->Cost; 1409 1410 static const CostTblEntry XOPShuffleTbl[] = { 1411 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd 1412 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps 1413 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd 1414 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps 1415 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm 1416 // + vinsertf128 1417 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm 1418 // + vinsertf128 1419 1420 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm 1421 // + vinsertf128 1422 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm 1423 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm 1424 // + vinsertf128 1425 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm 1426 }; 1427 1428 if (ST->hasXOP()) 1429 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1430 return LT.first * Entry->Cost; 1431 1432 static const CostTblEntry AVX1ShuffleTbl[] = { 1433 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1434 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1435 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1436 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1437 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 1438 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 1439 1440 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1441 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1442 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1443 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1444 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb 1445 // + vinsertf128 1446 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb 1447 // + vinsertf128 1448 1449 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd 1450 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd 1451 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps 1452 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps 1453 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor 1454 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor 1455 1456 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd 1457 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd 1458 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1459 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1460 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb 1461 // + 2*por + vinsertf128 1462 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb 1463 // + 2*por + vinsertf128 1464 1465 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd 1466 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd 1467 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1468 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1469 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb 1470 // + 4*por + vinsertf128 1471 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb 1472 // + 4*por + vinsertf128 1473 }; 1474 1475 if (ST->hasAVX()) 1476 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1477 return LT.first * Entry->Cost; 1478 1479 static const CostTblEntry SSE41ShuffleTbl[] = { 1480 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw 1481 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1482 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw 1483 {TTI::SK_Select, MVT::v4f32, 1}, // blendps 1484 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw 1485 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb 1486 }; 1487 1488 if (ST->hasSSE41()) 1489 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1490 return LT.first * Entry->Cost; 1491 1492 static const CostTblEntry SSSE3ShuffleTbl[] = { 1493 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb 1494 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb 1495 1496 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb 1497 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb 1498 1499 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por 1500 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por 1501 1502 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb 1503 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1504 1505 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por 1506 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por 1507 }; 1508 1509 if (ST->hasSSSE3()) 1510 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1511 return LT.first * Entry->Cost; 1512 1513 static const CostTblEntry SSE2ShuffleTbl[] = { 1514 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd 1515 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd 1516 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd 1517 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd 1518 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd 1519 1520 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd 1521 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd 1522 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd 1523 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd 1524 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw 1525 // + 2*pshufd + 2*unpck + packus 1526 1527 {TTI::SK_Select, MVT::v2i64, 1}, // movsd 1528 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1529 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps 1530 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por 1531 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por 1532 1533 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd 1534 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd 1535 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd 1536 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw 1537 // + pshufd/unpck 1538 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1539 // + 2*pshufd + 2*unpck + 2*packus 1540 1541 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1542 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1543 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1544 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1545 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1546 }; 1547 1548 if (ST->hasSSE2()) 1549 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1550 return LT.first * Entry->Cost; 1551 1552 static const CostTblEntry SSE1ShuffleTbl[] = { 1553 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1554 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1555 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1556 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1557 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1558 }; 1559 1560 if (ST->hasSSE1()) 1561 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1562 return LT.first * Entry->Cost; 1563 1564 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1565 } 1566 1567 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1568 Type *Src, 1569 TTI::CastContextHint CCH, 1570 TTI::TargetCostKind CostKind, 1571 const Instruction *I) { 1572 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1573 assert(ISD && "Invalid opcode"); 1574 1575 // TODO: Allow non-throughput costs that aren't binary. 1576 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1577 if (CostKind != TTI::TCK_RecipThroughput) 1578 return Cost == 0 ? 0 : 1; 1579 return Cost; 1580 }; 1581 1582 // The cost tables include both specific, custom (non-legal) src/dst type 1583 // conversions and generic, legalized types. We test for customs first, before 1584 // falling back to legalization. 1585 // FIXME: Need a better design of the cost table to handle non-simple types of 1586 // potential massive combinations (elem_num x src_type x dst_type). 1587 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { 1588 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1589 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1590 1591 // Mask sign extend has an instruction. 1592 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1593 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 }, 1594 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1595 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 }, 1596 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1597 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 }, 1598 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1599 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 }, 1600 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1601 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 }, 1602 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1603 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1604 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1605 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1606 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, 1607 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, 1608 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1, 1 }, 1609 1610 // Mask zero extend is a sext + shift. 1611 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1612 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 }, 1613 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1614 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 }, 1615 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1616 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 }, 1617 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1618 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 }, 1619 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1620 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 }, 1621 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1622 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1623 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1624 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1625 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, 1626 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, 1627 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1, 2 }, 1628 1629 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, 1630 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 }, 1631 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, 1632 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 }, 1633 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, 1634 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 }, 1635 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, 1636 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 }, 1637 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, 1638 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 }, 1639 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, 1640 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, 1641 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, 1642 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, 1643 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, 1644 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, 1645 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i16, 2 }, 1646 1647 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, 1648 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm 1649 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb 1650 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb 1651 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb 1652 }; 1653 1654 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1655 // Mask sign extend has an instruction. 1656 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, 1657 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 }, 1658 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, 1659 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, 1660 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, 1661 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i1, 1 }, 1662 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, 1663 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, 1664 1665 // Mask zero extend is a sext + shift. 1666 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, 1667 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 }, 1668 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, 1669 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, 1670 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, 1671 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i1, 2 }, 1672 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, 1673 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 1674 1675 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, 1676 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 }, 1677 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, 1678 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, 1679 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1680 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, 1681 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, 1682 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i64, 2 }, 1683 1684 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1685 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1686 1687 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1688 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1689 1690 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1691 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1692 1693 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1694 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1695 }; 1696 1697 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1698 // 256-bit wide vectors. 1699 1700 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1701 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1702 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1703 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1704 1705 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1706 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1707 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1708 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd 1709 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1710 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1711 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1712 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd 1713 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd 1714 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd 1715 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd 1716 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd 1717 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq 1718 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq 1719 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq 1720 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb 1721 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb 1722 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb 1723 { ISD::TRUNCATE, MVT::v32i8, MVT::v16i32, 2 }, // vpmovdb 1724 { ISD::TRUNCATE, MVT::v64i8, MVT::v16i32, 2 }, // vpmovdb 1725 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdw 1726 { ISD::TRUNCATE, MVT::v32i16, MVT::v16i32, 2 }, // vpmovdw 1727 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb 1728 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb 1729 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb 1730 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i64, 2 }, // vpmovqb 1731 { ISD::TRUNCATE, MVT::v32i8, MVT::v8i64, 2 }, // vpmovqb 1732 { ISD::TRUNCATE, MVT::v64i8, MVT::v8i64, 2 }, // vpmovqb 1733 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw 1734 { ISD::TRUNCATE, MVT::v16i16, MVT::v8i64, 2 }, // vpmovqw 1735 { ISD::TRUNCATE, MVT::v32i16, MVT::v8i64, 2 }, // vpmovqw 1736 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd 1737 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd 1738 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb 1739 1740 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 1741 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, 1742 { ISD::TRUNCATE, MVT::v64i8, MVT::v32i16, 8 }, 1743 1744 // Sign extend is zmm vpternlogd+vptruncdb. 1745 // Zero extend is zmm broadcast load+vptruncdw. 1746 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, 1747 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, 1748 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, 1749 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, 1750 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, 1751 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, 1752 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, 1753 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, 1754 1755 // Sign extend is zmm vpternlogd+vptruncdw. 1756 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. 1757 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, 1758 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1759 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, 1760 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1761 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, 1762 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1763 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, 1764 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1765 1766 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd 1767 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld 1768 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd 1769 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld 1770 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd 1771 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld 1772 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq 1773 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq 1774 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq 1775 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq 1776 1777 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd 1778 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld 1779 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq 1780 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq 1781 1782 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1783 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1784 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1785 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1786 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1787 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1788 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1789 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1790 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1791 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1792 1793 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1794 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1795 1796 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1797 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1798 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1799 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1800 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1801 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1802 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1803 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1804 1805 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1806 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1807 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1808 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1809 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1810 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1811 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1812 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1813 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1814 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1815 1816 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 1817 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 }, 1818 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 }, 1819 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 }, 1820 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 }, 1821 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, 1822 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 }, 1823 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 }, 1824 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 }, 1825 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 }, 1826 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 }, 1827 1828 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1829 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, 1830 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, 1831 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1832 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, 1833 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, 1834 }; 1835 1836 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { 1837 // Mask sign extend has an instruction. 1838 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1839 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 }, 1840 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1841 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 }, 1842 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1843 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 }, 1844 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1845 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 }, 1846 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1847 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 }, 1848 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1849 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1850 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1851 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1852 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v32i1, 1 }, 1853 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v64i1, 1 }, 1854 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v64i1, 1 }, 1855 1856 // Mask zero extend is a sext + shift. 1857 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1858 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 }, 1859 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1860 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 }, 1861 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1862 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 }, 1863 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1864 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 }, 1865 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1866 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 }, 1867 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1868 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1869 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1870 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1871 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v32i1, 2 }, 1872 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v64i1, 2 }, 1873 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v64i1, 2 }, 1874 1875 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, 1876 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 }, 1877 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, 1878 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 }, 1879 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, 1880 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 }, 1881 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, 1882 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 }, 1883 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, 1884 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 }, 1885 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, 1886 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, 1887 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, 1888 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, 1889 { ISD::TRUNCATE, MVT::v32i1, MVT::v16i16, 2 }, 1890 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i8, 2 }, 1891 { ISD::TRUNCATE, MVT::v64i1, MVT::v16i16, 2 }, 1892 1893 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, 1894 }; 1895 1896 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { 1897 // Mask sign extend has an instruction. 1898 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, 1899 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 }, 1900 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, 1901 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i1, 1 }, 1902 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, 1903 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i1, 1 }, 1904 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 }, 1905 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, 1906 1907 // Mask zero extend is a sext + shift. 1908 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, 1909 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 }, 1910 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, 1911 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i1, 2 }, 1912 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, 1913 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i1, 2 }, 1914 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 }, 1915 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, 1916 1917 { ISD::TRUNCATE, MVT::v16i1, MVT::v4i64, 2 }, 1918 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 }, 1919 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, 1920 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 }, 1921 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, 1922 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, 1923 { ISD::TRUNCATE, MVT::v8i1, MVT::v4i64, 2 }, 1924 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1925 1926 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1927 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1928 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1929 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1930 1931 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1932 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1933 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1934 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1935 1936 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 }, 1937 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1938 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1939 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1940 1941 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 }, 1942 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1943 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1944 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1945 }; 1946 1947 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { 1948 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1949 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1950 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1951 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 1952 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1953 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1954 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1955 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 1956 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd 1957 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd 1958 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd 1959 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq 1960 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq 1961 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd 1962 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb 1963 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw 1964 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb 1965 1966 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb 1967 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb 1968 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, 1969 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, 1970 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, 1971 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, 1972 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, 1973 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, 1974 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, 1975 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, 1976 1977 // sign extend is vpcmpeq+maskedmove+vpmovdw 1978 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw 1979 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1980 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, 1981 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1982 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, 1983 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1984 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, 1985 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, 1986 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, 1987 1988 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd 1989 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld 1990 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd 1991 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld 1992 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd 1993 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld 1994 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq 1995 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq 1996 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq 1997 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq 1998 1999 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 2000 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 2001 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 2002 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 2003 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 2004 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 2005 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 2006 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 2007 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 2008 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 2009 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 2010 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 2011 2012 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2013 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 2014 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2015 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 2016 2017 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, 2018 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 2019 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2020 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 2021 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2022 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 2023 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 2024 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2025 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 2026 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 2027 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 2028 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 2029 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 2030 2031 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 2032 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 2033 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 }, 2034 2035 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, 2036 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, 2037 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 2038 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 }, 2039 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 2040 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 2041 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 2042 }; 2043 2044 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 2045 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 2046 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 2047 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 2048 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 2049 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 2050 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 2051 2052 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 2053 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 2054 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 2055 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 2056 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 2057 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 2058 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 2059 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 2060 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 2061 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 2062 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 2063 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 2064 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 2065 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 2066 2067 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 2068 2069 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 4 }, 2070 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4 }, 2071 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 }, 2072 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 }, 2073 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 }, 2074 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 }, 2075 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 }, 2076 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 }, 2077 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 }, 2078 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 }, 2079 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 2080 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 2081 2082 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 2083 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 2084 2085 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 }, 2086 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 }, 2087 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 }, 2088 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 }, 2089 2090 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 }, 2091 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 }, 2092 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 }, 2093 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 2094 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2095 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 }, 2096 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 }, 2097 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 }, 2098 2099 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 2100 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 2101 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 2102 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 2103 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 2104 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 2105 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 }, 2106 2107 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 2108 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 2109 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 2110 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 2111 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 2112 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 2113 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 }, 2114 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2115 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 2116 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 2117 }; 2118 2119 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 2120 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 2121 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 2122 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 2123 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 2124 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 2125 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 2126 2127 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 2128 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 2129 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 2130 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 2131 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 2132 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 2133 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 2134 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 2135 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 2136 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 2137 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 2138 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 2139 2140 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, 2141 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, 2142 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, 2143 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, 2144 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, 2145 2146 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 2147 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 2148 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb 2149 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 }, 2150 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2151 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 }, 2152 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw 2153 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 2154 2155 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 2156 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 2157 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 2158 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 2159 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 2160 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2161 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 2162 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2163 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 2164 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 2165 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 }, 2166 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 }, 2167 2168 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 2169 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 2170 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 2171 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 2172 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 2173 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2174 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 2175 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 }, 2176 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 }, 2177 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2178 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 2179 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 2180 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 }, 2181 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 }, 2182 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 }, 2183 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 2184 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 }, 2185 2186 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 2187 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 }, 2188 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 }, 2189 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 }, 2190 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 }, 2191 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 }, 2192 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 }, 2193 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 }, 2194 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 }, 2195 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 }, 2196 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 }, 2197 2198 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 }, 2199 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 }, 2200 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 }, 2201 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 }, 2202 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 }, 2203 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 }, 2204 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 }, 2205 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 }, 2206 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 2207 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2208 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 }, 2209 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 }, 2210 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 }, 2211 2212 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 2213 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 2214 }; 2215 2216 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 2217 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2218 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2219 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2220 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2221 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2222 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2223 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2224 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2225 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2226 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2227 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2228 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2229 2230 // These truncates end up widening elements. 2231 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ 2232 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ 2233 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD 2234 2235 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 }, 2236 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 }, 2237 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 }, 2238 2239 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2240 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2241 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 }, 2242 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 }, 2243 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2244 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2245 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2246 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2247 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2248 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 }, 2249 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2250 2251 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2252 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2253 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, 2254 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 2255 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2256 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2257 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2258 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2259 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 }, 2260 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2261 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 }, 2262 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 }, 2263 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 }, 2264 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 }, 2265 2266 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 }, 2267 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 }, 2268 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 }, 2269 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 }, 2270 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 }, 2271 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 }, 2272 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 }, 2273 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 }, 2274 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 2275 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 }, 2276 2277 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 }, 2278 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2279 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 }, 2280 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, 2281 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 }, 2282 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 }, 2283 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 }, 2284 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 }, 2285 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 }, 2286 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2287 }; 2288 2289 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 2290 // These are somewhat magic numbers justified by comparing the 2291 // output of llvm-mca for our various supported scheduler models 2292 // and basing it off the worst case scenario. 2293 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2294 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2295 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 }, 2296 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 }, 2297 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 }, 2298 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2299 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 }, 2300 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2301 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2302 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 }, 2303 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 }, 2304 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 }, 2305 2306 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2307 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2308 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 }, 2309 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 }, 2310 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2311 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 }, 2312 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 }, 2313 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2314 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 }, 2315 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 }, 2316 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2317 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 }, 2318 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 }, 2319 2320 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 }, 2321 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 }, 2322 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 }, 2323 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 }, 2324 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 }, 2325 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 }, 2326 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 }, 2327 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 }, 2328 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 }, 2329 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 }, 2330 2331 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 }, 2332 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2333 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 }, 2334 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 }, 2335 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 }, 2336 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 }, 2337 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 }, 2338 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 }, 2339 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 }, 2340 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 }, 2341 2342 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2343 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2344 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 }, 2345 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 }, 2346 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2347 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 }, 2348 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 }, 2349 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 }, 2350 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2351 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 }, 2352 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2353 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 }, 2354 2355 // These truncates are really widening elements. 2356 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD 2357 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ 2358 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD 2359 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD 2360 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD 2361 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW 2362 2363 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB 2364 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 2365 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB 2366 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 2367 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, 2368 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 }, 2369 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2370 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 }, 2371 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB 2372 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW 2373 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD 2374 }; 2375 2376 // Attempt to map directly to (simple) MVT types to let us match custom entries. 2377 EVT SrcTy = TLI->getValueType(DL, Src); 2378 EVT DstTy = TLI->getValueType(DL, Dst); 2379 2380 // The function getSimpleVT only handles simple value types. 2381 if (SrcTy.isSimple() && DstTy.isSimple()) { 2382 MVT SimpleSrcTy = SrcTy.getSimpleVT(); 2383 MVT SimpleDstTy = DstTy.getSimpleVT(); 2384 2385 if (ST->useAVX512Regs()) { 2386 if (ST->hasBWI()) 2387 if (const auto *Entry = ConvertCostTableLookup( 2388 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2389 return AdjustCost(Entry->Cost); 2390 2391 if (ST->hasDQI()) 2392 if (const auto *Entry = ConvertCostTableLookup( 2393 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2394 return AdjustCost(Entry->Cost); 2395 2396 if (ST->hasAVX512()) 2397 if (const auto *Entry = ConvertCostTableLookup( 2398 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2399 return AdjustCost(Entry->Cost); 2400 } 2401 2402 if (ST->hasBWI()) 2403 if (const auto *Entry = ConvertCostTableLookup( 2404 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2405 return AdjustCost(Entry->Cost); 2406 2407 if (ST->hasDQI()) 2408 if (const auto *Entry = ConvertCostTableLookup( 2409 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2410 return AdjustCost(Entry->Cost); 2411 2412 if (ST->hasAVX512()) 2413 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2414 SimpleDstTy, SimpleSrcTy)) 2415 return AdjustCost(Entry->Cost); 2416 2417 if (ST->hasAVX2()) { 2418 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2419 SimpleDstTy, SimpleSrcTy)) 2420 return AdjustCost(Entry->Cost); 2421 } 2422 2423 if (ST->hasAVX()) { 2424 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2425 SimpleDstTy, SimpleSrcTy)) 2426 return AdjustCost(Entry->Cost); 2427 } 2428 2429 if (ST->hasSSE41()) { 2430 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2431 SimpleDstTy, SimpleSrcTy)) 2432 return AdjustCost(Entry->Cost); 2433 } 2434 2435 if (ST->hasSSE2()) { 2436 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2437 SimpleDstTy, SimpleSrcTy)) 2438 return AdjustCost(Entry->Cost); 2439 } 2440 } 2441 2442 // Fall back to legalized types. 2443 std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 2444 std::pair<InstructionCost, MVT> LTDest = 2445 TLI->getTypeLegalizationCost(DL, Dst); 2446 2447 if (ST->useAVX512Regs()) { 2448 if (ST->hasBWI()) 2449 if (const auto *Entry = ConvertCostTableLookup( 2450 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second)) 2451 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2452 2453 if (ST->hasDQI()) 2454 if (const auto *Entry = ConvertCostTableLookup( 2455 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second)) 2456 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2457 2458 if (ST->hasAVX512()) 2459 if (const auto *Entry = ConvertCostTableLookup( 2460 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second)) 2461 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2462 } 2463 2464 if (ST->hasBWI()) 2465 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, 2466 LTDest.second, LTSrc.second)) 2467 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2468 2469 if (ST->hasDQI()) 2470 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, 2471 LTDest.second, LTSrc.second)) 2472 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2473 2474 if (ST->hasAVX512()) 2475 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2476 LTDest.second, LTSrc.second)) 2477 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2478 2479 if (ST->hasAVX2()) 2480 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2481 LTDest.second, LTSrc.second)) 2482 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2483 2484 if (ST->hasAVX()) 2485 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2486 LTDest.second, LTSrc.second)) 2487 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2488 2489 if (ST->hasSSE41()) 2490 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2491 LTDest.second, LTSrc.second)) 2492 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2493 2494 if (ST->hasSSE2()) 2495 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2496 LTDest.second, LTSrc.second)) 2497 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2498 2499 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for 2500 // sitofp. 2501 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) && 2502 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) { 2503 Type *ExtSrc = Src->getWithNewBitWidth(32); 2504 unsigned ExtOpc = 2505 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt; 2506 2507 // For scalar loads the extend would be free. 2508 InstructionCost ExtCost = 0; 2509 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0)))) 2510 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind); 2511 2512 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc, 2513 TTI::CastContextHint::None, CostKind); 2514 } 2515 2516 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi 2517 // i32. 2518 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) && 2519 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) { 2520 Type *TruncDst = Dst->getWithNewBitWidth(32); 2521 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) + 2522 getCastInstrCost(Instruction::Trunc, Dst, TruncDst, 2523 TTI::CastContextHint::None, CostKind); 2524 } 2525 2526 return AdjustCost( 2527 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2528 } 2529 2530 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 2531 Type *CondTy, 2532 CmpInst::Predicate VecPred, 2533 TTI::TargetCostKind CostKind, 2534 const Instruction *I) { 2535 // TODO: Handle other cost kinds. 2536 if (CostKind != TTI::TCK_RecipThroughput) 2537 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2538 I); 2539 2540 // Legalize the type. 2541 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2542 2543 MVT MTy = LT.second; 2544 2545 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2546 assert(ISD && "Invalid opcode"); 2547 2548 unsigned ExtraCost = 0; 2549 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) { 2550 // Some vector comparison predicates cost extra instructions. 2551 // TODO: Should we invert this and assume worst case cmp costs 2552 // and reduce for particular predicates? 2553 if (MTy.isVector() && 2554 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || 2555 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || 2556 ST->hasBWI())) { 2557 // Fallback to I if a specific predicate wasn't specified. 2558 CmpInst::Predicate Pred = VecPred; 2559 if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE || 2560 Pred == CmpInst::BAD_FCMP_PREDICATE)) 2561 Pred = cast<CmpInst>(I)->getPredicate(); 2562 2563 switch (Pred) { 2564 case CmpInst::Predicate::ICMP_NE: 2565 // xor(cmpeq(x,y),-1) 2566 ExtraCost = 1; 2567 break; 2568 case CmpInst::Predicate::ICMP_SGE: 2569 case CmpInst::Predicate::ICMP_SLE: 2570 // xor(cmpgt(x,y),-1) 2571 ExtraCost = 1; 2572 break; 2573 case CmpInst::Predicate::ICMP_ULT: 2574 case CmpInst::Predicate::ICMP_UGT: 2575 // cmpgt(xor(x,signbit),xor(y,signbit)) 2576 // xor(cmpeq(pmaxu(x,y),x),-1) 2577 ExtraCost = 2; 2578 break; 2579 case CmpInst::Predicate::ICMP_ULE: 2580 case CmpInst::Predicate::ICMP_UGE: 2581 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || 2582 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { 2583 // cmpeq(psubus(x,y),0) 2584 // cmpeq(pminu(x,y),x) 2585 ExtraCost = 1; 2586 } else { 2587 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) 2588 ExtraCost = 3; 2589 } 2590 break; 2591 case CmpInst::Predicate::BAD_ICMP_PREDICATE: 2592 case CmpInst::Predicate::BAD_FCMP_PREDICATE: 2593 // Assume worst case scenario and add the maximum extra cost. 2594 ExtraCost = 3; 2595 break; 2596 default: 2597 break; 2598 } 2599 } 2600 } 2601 2602 static const CostTblEntry SLMCostTbl[] = { 2603 // slm pcmpeq/pcmpgt throughput is 2 2604 { ISD::SETCC, MVT::v2i64, 2 }, 2605 }; 2606 2607 static const CostTblEntry AVX512BWCostTbl[] = { 2608 { ISD::SETCC, MVT::v32i16, 1 }, 2609 { ISD::SETCC, MVT::v64i8, 1 }, 2610 2611 { ISD::SELECT, MVT::v32i16, 1 }, 2612 { ISD::SELECT, MVT::v64i8, 1 }, 2613 }; 2614 2615 static const CostTblEntry AVX512CostTbl[] = { 2616 { ISD::SETCC, MVT::v8i64, 1 }, 2617 { ISD::SETCC, MVT::v16i32, 1 }, 2618 { ISD::SETCC, MVT::v8f64, 1 }, 2619 { ISD::SETCC, MVT::v16f32, 1 }, 2620 2621 { ISD::SELECT, MVT::v8i64, 1 }, 2622 { ISD::SELECT, MVT::v16i32, 1 }, 2623 { ISD::SELECT, MVT::v8f64, 1 }, 2624 { ISD::SELECT, MVT::v16f32, 1 }, 2625 2626 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 2627 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 2628 2629 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3 2630 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3 2631 }; 2632 2633 static const CostTblEntry AVX2CostTbl[] = { 2634 { ISD::SETCC, MVT::v4i64, 1 }, 2635 { ISD::SETCC, MVT::v8i32, 1 }, 2636 { ISD::SETCC, MVT::v16i16, 1 }, 2637 { ISD::SETCC, MVT::v32i8, 1 }, 2638 2639 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb 2640 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb 2641 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb 2642 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb 2643 }; 2644 2645 static const CostTblEntry AVX1CostTbl[] = { 2646 { ISD::SETCC, MVT::v4f64, 1 }, 2647 { ISD::SETCC, MVT::v8f32, 1 }, 2648 // AVX1 does not support 8-wide integer compare. 2649 { ISD::SETCC, MVT::v4i64, 4 }, 2650 { ISD::SETCC, MVT::v8i32, 4 }, 2651 { ISD::SETCC, MVT::v16i16, 4 }, 2652 { ISD::SETCC, MVT::v32i8, 4 }, 2653 2654 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd 2655 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps 2656 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd 2657 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps 2658 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps 2659 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps 2660 }; 2661 2662 static const CostTblEntry SSE42CostTbl[] = { 2663 { ISD::SETCC, MVT::v2f64, 1 }, 2664 { ISD::SETCC, MVT::v4f32, 1 }, 2665 { ISD::SETCC, MVT::v2i64, 1 }, 2666 }; 2667 2668 static const CostTblEntry SSE41CostTbl[] = { 2669 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd 2670 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps 2671 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb 2672 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb 2673 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb 2674 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb 2675 }; 2676 2677 static const CostTblEntry SSE2CostTbl[] = { 2678 { ISD::SETCC, MVT::v2f64, 2 }, 2679 { ISD::SETCC, MVT::f64, 1 }, 2680 { ISD::SETCC, MVT::v2i64, 8 }, 2681 { ISD::SETCC, MVT::v4i32, 1 }, 2682 { ISD::SETCC, MVT::v8i16, 1 }, 2683 { ISD::SETCC, MVT::v16i8, 1 }, 2684 2685 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd 2686 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por 2687 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por 2688 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por 2689 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por 2690 }; 2691 2692 static const CostTblEntry SSE1CostTbl[] = { 2693 { ISD::SETCC, MVT::v4f32, 2 }, 2694 { ISD::SETCC, MVT::f32, 1 }, 2695 2696 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps 2697 }; 2698 2699 if (ST->useSLMArithCosts()) 2700 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2701 return LT.first * (ExtraCost + Entry->Cost); 2702 2703 if (ST->hasBWI()) 2704 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2705 return LT.first * (ExtraCost + Entry->Cost); 2706 2707 if (ST->hasAVX512()) 2708 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2709 return LT.first * (ExtraCost + Entry->Cost); 2710 2711 if (ST->hasAVX2()) 2712 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2713 return LT.first * (ExtraCost + Entry->Cost); 2714 2715 if (ST->hasAVX()) 2716 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2717 return LT.first * (ExtraCost + Entry->Cost); 2718 2719 if (ST->hasSSE42()) 2720 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2721 return LT.first * (ExtraCost + Entry->Cost); 2722 2723 if (ST->hasSSE41()) 2724 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2725 return LT.first * (ExtraCost + Entry->Cost); 2726 2727 if (ST->hasSSE2()) 2728 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2729 return LT.first * (ExtraCost + Entry->Cost); 2730 2731 if (ST->hasSSE1()) 2732 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2733 return LT.first * (ExtraCost + Entry->Cost); 2734 2735 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 2736 } 2737 2738 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 2739 2740 InstructionCost 2741 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2742 TTI::TargetCostKind CostKind) { 2743 2744 // Costs should match the codegen from: 2745 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 2746 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 2747 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 2748 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 2749 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 2750 2751 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not 2752 // specialized in these tables yet. 2753 static const CostTblEntry AVX512BITALGCostTbl[] = { 2754 { ISD::CTPOP, MVT::v32i16, 1 }, 2755 { ISD::CTPOP, MVT::v64i8, 1 }, 2756 { ISD::CTPOP, MVT::v16i16, 1 }, 2757 { ISD::CTPOP, MVT::v32i8, 1 }, 2758 { ISD::CTPOP, MVT::v8i16, 1 }, 2759 { ISD::CTPOP, MVT::v16i8, 1 }, 2760 }; 2761 static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = { 2762 { ISD::CTPOP, MVT::v8i64, 1 }, 2763 { ISD::CTPOP, MVT::v16i32, 1 }, 2764 { ISD::CTPOP, MVT::v4i64, 1 }, 2765 { ISD::CTPOP, MVT::v8i32, 1 }, 2766 { ISD::CTPOP, MVT::v2i64, 1 }, 2767 { ISD::CTPOP, MVT::v4i32, 1 }, 2768 }; 2769 static const CostTblEntry AVX512CDCostTbl[] = { 2770 { ISD::CTLZ, MVT::v8i64, 1 }, 2771 { ISD::CTLZ, MVT::v16i32, 1 }, 2772 { ISD::CTLZ, MVT::v32i16, 8 }, 2773 { ISD::CTLZ, MVT::v64i8, 20 }, 2774 { ISD::CTLZ, MVT::v4i64, 1 }, 2775 { ISD::CTLZ, MVT::v8i32, 1 }, 2776 { ISD::CTLZ, MVT::v16i16, 4 }, 2777 { ISD::CTLZ, MVT::v32i8, 10 }, 2778 { ISD::CTLZ, MVT::v2i64, 1 }, 2779 { ISD::CTLZ, MVT::v4i32, 1 }, 2780 { ISD::CTLZ, MVT::v8i16, 4 }, 2781 { ISD::CTLZ, MVT::v16i8, 4 }, 2782 }; 2783 static const CostTblEntry AVX512BWCostTbl[] = { 2784 { ISD::ABS, MVT::v32i16, 1 }, 2785 { ISD::ABS, MVT::v64i8, 1 }, 2786 { ISD::BITREVERSE, MVT::v8i64, 3 }, 2787 { ISD::BITREVERSE, MVT::v16i32, 3 }, 2788 { ISD::BITREVERSE, MVT::v32i16, 3 }, 2789 { ISD::BITREVERSE, MVT::v64i8, 2 }, 2790 { ISD::BSWAP, MVT::v8i64, 1 }, 2791 { ISD::BSWAP, MVT::v16i32, 1 }, 2792 { ISD::BSWAP, MVT::v32i16, 1 }, 2793 { ISD::CTLZ, MVT::v8i64, 23 }, 2794 { ISD::CTLZ, MVT::v16i32, 22 }, 2795 { ISD::CTLZ, MVT::v32i16, 18 }, 2796 { ISD::CTLZ, MVT::v64i8, 17 }, 2797 { ISD::CTPOP, MVT::v8i64, 7 }, 2798 { ISD::CTPOP, MVT::v16i32, 11 }, 2799 { ISD::CTPOP, MVT::v32i16, 9 }, 2800 { ISD::CTPOP, MVT::v64i8, 6 }, 2801 { ISD::CTTZ, MVT::v8i64, 10 }, 2802 { ISD::CTTZ, MVT::v16i32, 14 }, 2803 { ISD::CTTZ, MVT::v32i16, 12 }, 2804 { ISD::CTTZ, MVT::v64i8, 9 }, 2805 { ISD::SADDSAT, MVT::v32i16, 1 }, 2806 { ISD::SADDSAT, MVT::v64i8, 1 }, 2807 { ISD::SMAX, MVT::v32i16, 1 }, 2808 { ISD::SMAX, MVT::v64i8, 1 }, 2809 { ISD::SMIN, MVT::v32i16, 1 }, 2810 { ISD::SMIN, MVT::v64i8, 1 }, 2811 { ISD::SSUBSAT, MVT::v32i16, 1 }, 2812 { ISD::SSUBSAT, MVT::v64i8, 1 }, 2813 { ISD::UADDSAT, MVT::v32i16, 1 }, 2814 { ISD::UADDSAT, MVT::v64i8, 1 }, 2815 { ISD::UMAX, MVT::v32i16, 1 }, 2816 { ISD::UMAX, MVT::v64i8, 1 }, 2817 { ISD::UMIN, MVT::v32i16, 1 }, 2818 { ISD::UMIN, MVT::v64i8, 1 }, 2819 { ISD::USUBSAT, MVT::v32i16, 1 }, 2820 { ISD::USUBSAT, MVT::v64i8, 1 }, 2821 }; 2822 static const CostTblEntry AVX512CostTbl[] = { 2823 { ISD::ABS, MVT::v8i64, 1 }, 2824 { ISD::ABS, MVT::v16i32, 1 }, 2825 { ISD::ABS, MVT::v32i16, 2 }, 2826 { ISD::ABS, MVT::v64i8, 2 }, 2827 { ISD::ABS, MVT::v4i64, 1 }, 2828 { ISD::ABS, MVT::v2i64, 1 }, 2829 { ISD::BITREVERSE, MVT::v8i64, 36 }, 2830 { ISD::BITREVERSE, MVT::v16i32, 24 }, 2831 { ISD::BITREVERSE, MVT::v32i16, 10 }, 2832 { ISD::BITREVERSE, MVT::v64i8, 10 }, 2833 { ISD::BSWAP, MVT::v8i64, 4 }, 2834 { ISD::BSWAP, MVT::v16i32, 4 }, 2835 { ISD::BSWAP, MVT::v32i16, 4 }, 2836 { ISD::CTLZ, MVT::v8i64, 29 }, 2837 { ISD::CTLZ, MVT::v16i32, 35 }, 2838 { ISD::CTLZ, MVT::v32i16, 28 }, 2839 { ISD::CTLZ, MVT::v64i8, 18 }, 2840 { ISD::CTPOP, MVT::v8i64, 16 }, 2841 { ISD::CTPOP, MVT::v16i32, 24 }, 2842 { ISD::CTPOP, MVT::v32i16, 18 }, 2843 { ISD::CTPOP, MVT::v64i8, 12 }, 2844 { ISD::CTTZ, MVT::v8i64, 20 }, 2845 { ISD::CTTZ, MVT::v16i32, 28 }, 2846 { ISD::CTTZ, MVT::v32i16, 24 }, 2847 { ISD::CTTZ, MVT::v64i8, 18 }, 2848 { ISD::SMAX, MVT::v8i64, 1 }, 2849 { ISD::SMAX, MVT::v16i32, 1 }, 2850 { ISD::SMAX, MVT::v32i16, 2 }, 2851 { ISD::SMAX, MVT::v64i8, 2 }, 2852 { ISD::SMAX, MVT::v4i64, 1 }, 2853 { ISD::SMAX, MVT::v2i64, 1 }, 2854 { ISD::SMIN, MVT::v8i64, 1 }, 2855 { ISD::SMIN, MVT::v16i32, 1 }, 2856 { ISD::SMIN, MVT::v32i16, 2 }, 2857 { ISD::SMIN, MVT::v64i8, 2 }, 2858 { ISD::SMIN, MVT::v4i64, 1 }, 2859 { ISD::SMIN, MVT::v2i64, 1 }, 2860 { ISD::UMAX, MVT::v8i64, 1 }, 2861 { ISD::UMAX, MVT::v16i32, 1 }, 2862 { ISD::UMAX, MVT::v32i16, 2 }, 2863 { ISD::UMAX, MVT::v64i8, 2 }, 2864 { ISD::UMAX, MVT::v4i64, 1 }, 2865 { ISD::UMAX, MVT::v2i64, 1 }, 2866 { ISD::UMIN, MVT::v8i64, 1 }, 2867 { ISD::UMIN, MVT::v16i32, 1 }, 2868 { ISD::UMIN, MVT::v32i16, 2 }, 2869 { ISD::UMIN, MVT::v64i8, 2 }, 2870 { ISD::UMIN, MVT::v4i64, 1 }, 2871 { ISD::UMIN, MVT::v2i64, 1 }, 2872 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd 2873 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq 2874 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq 2875 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq 2876 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd 2877 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq 2878 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq 2879 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq 2880 { ISD::SADDSAT, MVT::v32i16, 2 }, 2881 { ISD::SADDSAT, MVT::v64i8, 2 }, 2882 { ISD::SSUBSAT, MVT::v32i16, 2 }, 2883 { ISD::SSUBSAT, MVT::v64i8, 2 }, 2884 { ISD::UADDSAT, MVT::v32i16, 2 }, 2885 { ISD::UADDSAT, MVT::v64i8, 2 }, 2886 { ISD::USUBSAT, MVT::v32i16, 2 }, 2887 { ISD::USUBSAT, MVT::v64i8, 2 }, 2888 { ISD::FMAXNUM, MVT::f32, 2 }, 2889 { ISD::FMAXNUM, MVT::v4f32, 2 }, 2890 { ISD::FMAXNUM, MVT::v8f32, 2 }, 2891 { ISD::FMAXNUM, MVT::v16f32, 2 }, 2892 { ISD::FMAXNUM, MVT::f64, 2 }, 2893 { ISD::FMAXNUM, MVT::v2f64, 2 }, 2894 { ISD::FMAXNUM, MVT::v4f64, 2 }, 2895 { ISD::FMAXNUM, MVT::v8f64, 2 }, 2896 }; 2897 static const CostTblEntry XOPCostTbl[] = { 2898 { ISD::BITREVERSE, MVT::v4i64, 4 }, 2899 { ISD::BITREVERSE, MVT::v8i32, 4 }, 2900 { ISD::BITREVERSE, MVT::v16i16, 4 }, 2901 { ISD::BITREVERSE, MVT::v32i8, 4 }, 2902 { ISD::BITREVERSE, MVT::v2i64, 1 }, 2903 { ISD::BITREVERSE, MVT::v4i32, 1 }, 2904 { ISD::BITREVERSE, MVT::v8i16, 1 }, 2905 { ISD::BITREVERSE, MVT::v16i8, 1 }, 2906 { ISD::BITREVERSE, MVT::i64, 3 }, 2907 { ISD::BITREVERSE, MVT::i32, 3 }, 2908 { ISD::BITREVERSE, MVT::i16, 3 }, 2909 { ISD::BITREVERSE, MVT::i8, 3 } 2910 }; 2911 static const CostTblEntry AVX2CostTbl[] = { 2912 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2913 { ISD::ABS, MVT::v8i32, 1 }, 2914 { ISD::ABS, MVT::v16i16, 1 }, 2915 { ISD::ABS, MVT::v32i8, 1 }, 2916 { ISD::BITREVERSE, MVT::v2i64, 3 }, 2917 { ISD::BITREVERSE, MVT::v4i64, 3 }, 2918 { ISD::BITREVERSE, MVT::v4i32, 3 }, 2919 { ISD::BITREVERSE, MVT::v8i32, 3 }, 2920 { ISD::BITREVERSE, MVT::v8i16, 3 }, 2921 { ISD::BITREVERSE, MVT::v16i16, 3 }, 2922 { ISD::BITREVERSE, MVT::v16i8, 3 }, 2923 { ISD::BITREVERSE, MVT::v32i8, 3 }, 2924 { ISD::BSWAP, MVT::v4i64, 1 }, 2925 { ISD::BSWAP, MVT::v8i32, 1 }, 2926 { ISD::BSWAP, MVT::v16i16, 1 }, 2927 { ISD::CTLZ, MVT::v2i64, 7 }, 2928 { ISD::CTLZ, MVT::v4i64, 7 }, 2929 { ISD::CTLZ, MVT::v4i32, 5 }, 2930 { ISD::CTLZ, MVT::v8i32, 5 }, 2931 { ISD::CTLZ, MVT::v8i16, 4 }, 2932 { ISD::CTLZ, MVT::v16i16, 4 }, 2933 { ISD::CTLZ, MVT::v16i8, 3 }, 2934 { ISD::CTLZ, MVT::v32i8, 3 }, 2935 { ISD::CTPOP, MVT::v2i64, 3 }, 2936 { ISD::CTPOP, MVT::v4i64, 3 }, 2937 { ISD::CTPOP, MVT::v4i32, 7 }, 2938 { ISD::CTPOP, MVT::v8i32, 7 }, 2939 { ISD::CTPOP, MVT::v8i16, 3 }, 2940 { ISD::CTPOP, MVT::v16i16, 3 }, 2941 { ISD::CTPOP, MVT::v16i8, 2 }, 2942 { ISD::CTPOP, MVT::v32i8, 2 }, 2943 { ISD::CTTZ, MVT::v2i64, 4 }, 2944 { ISD::CTTZ, MVT::v4i64, 4 }, 2945 { ISD::CTTZ, MVT::v4i32, 7 }, 2946 { ISD::CTTZ, MVT::v8i32, 7 }, 2947 { ISD::CTTZ, MVT::v8i16, 4 }, 2948 { ISD::CTTZ, MVT::v16i16, 4 }, 2949 { ISD::CTTZ, MVT::v16i8, 3 }, 2950 { ISD::CTTZ, MVT::v32i8, 3 }, 2951 { ISD::SADDSAT, MVT::v16i16, 1 }, 2952 { ISD::SADDSAT, MVT::v32i8, 1 }, 2953 { ISD::SMAX, MVT::v8i32, 1 }, 2954 { ISD::SMAX, MVT::v16i16, 1 }, 2955 { ISD::SMAX, MVT::v32i8, 1 }, 2956 { ISD::SMIN, MVT::v8i32, 1 }, 2957 { ISD::SMIN, MVT::v16i16, 1 }, 2958 { ISD::SMIN, MVT::v32i8, 1 }, 2959 { ISD::SSUBSAT, MVT::v16i16, 1 }, 2960 { ISD::SSUBSAT, MVT::v32i8, 1 }, 2961 { ISD::UADDSAT, MVT::v16i16, 1 }, 2962 { ISD::UADDSAT, MVT::v32i8, 1 }, 2963 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd 2964 { ISD::UMAX, MVT::v8i32, 1 }, 2965 { ISD::UMAX, MVT::v16i16, 1 }, 2966 { ISD::UMAX, MVT::v32i8, 1 }, 2967 { ISD::UMIN, MVT::v8i32, 1 }, 2968 { ISD::UMIN, MVT::v16i16, 1 }, 2969 { ISD::UMIN, MVT::v32i8, 1 }, 2970 { ISD::USUBSAT, MVT::v16i16, 1 }, 2971 { ISD::USUBSAT, MVT::v32i8, 1 }, 2972 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd 2973 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2974 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2975 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 2976 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 2977 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 2978 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 2979 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 2980 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 2981 }; 2982 static const CostTblEntry AVX1CostTbl[] = { 2983 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2984 { ISD::ABS, MVT::v8i32, 3 }, 2985 { ISD::ABS, MVT::v16i16, 3 }, 2986 { ISD::ABS, MVT::v32i8, 3 }, 2987 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 2988 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 2989 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 2990 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 2991 { ISD::BSWAP, MVT::v4i64, 4 }, 2992 { ISD::BSWAP, MVT::v8i32, 4 }, 2993 { ISD::BSWAP, MVT::v16i16, 4 }, 2994 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 2995 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 2996 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 2997 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2998 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 2999 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 3000 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 3001 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 3002 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 3003 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 3004 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 3005 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 3006 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3007 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3008 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3009 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3010 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3011 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3012 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3013 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3014 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3015 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3016 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3017 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3018 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert 3019 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3020 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3021 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3022 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 3023 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3024 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3025 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 3026 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 3027 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert 3028 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS 3029 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 3030 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ? 3031 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD 3032 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 3033 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ? 3034 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 3035 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 3036 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 3037 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 3038 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 3039 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 3040 }; 3041 static const CostTblEntry GLMCostTbl[] = { 3042 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 3043 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 3044 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 3045 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 3046 }; 3047 static const CostTblEntry SLMCostTbl[] = { 3048 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 3049 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 3050 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 3051 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 3052 }; 3053 static const CostTblEntry SSE42CostTbl[] = { 3054 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd 3055 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd 3056 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 3057 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 3058 }; 3059 static const CostTblEntry SSE41CostTbl[] = { 3060 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X) 3061 { ISD::SMAX, MVT::v4i32, 1 }, 3062 { ISD::SMAX, MVT::v16i8, 1 }, 3063 { ISD::SMIN, MVT::v4i32, 1 }, 3064 { ISD::SMIN, MVT::v16i8, 1 }, 3065 { ISD::UMAX, MVT::v4i32, 1 }, 3066 { ISD::UMAX, MVT::v8i16, 1 }, 3067 { ISD::UMIN, MVT::v4i32, 1 }, 3068 { ISD::UMIN, MVT::v8i16, 1 }, 3069 }; 3070 static const CostTblEntry SSSE3CostTbl[] = { 3071 { ISD::ABS, MVT::v4i32, 1 }, 3072 { ISD::ABS, MVT::v8i16, 1 }, 3073 { ISD::ABS, MVT::v16i8, 1 }, 3074 { ISD::BITREVERSE, MVT::v2i64, 5 }, 3075 { ISD::BITREVERSE, MVT::v4i32, 5 }, 3076 { ISD::BITREVERSE, MVT::v8i16, 5 }, 3077 { ISD::BITREVERSE, MVT::v16i8, 5 }, 3078 { ISD::BSWAP, MVT::v2i64, 1 }, 3079 { ISD::BSWAP, MVT::v4i32, 1 }, 3080 { ISD::BSWAP, MVT::v8i16, 1 }, 3081 { ISD::CTLZ, MVT::v2i64, 23 }, 3082 { ISD::CTLZ, MVT::v4i32, 18 }, 3083 { ISD::CTLZ, MVT::v8i16, 14 }, 3084 { ISD::CTLZ, MVT::v16i8, 9 }, 3085 { ISD::CTPOP, MVT::v2i64, 7 }, 3086 { ISD::CTPOP, MVT::v4i32, 11 }, 3087 { ISD::CTPOP, MVT::v8i16, 9 }, 3088 { ISD::CTPOP, MVT::v16i8, 6 }, 3089 { ISD::CTTZ, MVT::v2i64, 10 }, 3090 { ISD::CTTZ, MVT::v4i32, 14 }, 3091 { ISD::CTTZ, MVT::v8i16, 12 }, 3092 { ISD::CTTZ, MVT::v16i8, 9 } 3093 }; 3094 static const CostTblEntry SSE2CostTbl[] = { 3095 { ISD::ABS, MVT::v2i64, 4 }, 3096 { ISD::ABS, MVT::v4i32, 3 }, 3097 { ISD::ABS, MVT::v8i16, 2 }, 3098 { ISD::ABS, MVT::v16i8, 2 }, 3099 { ISD::BITREVERSE, MVT::v2i64, 29 }, 3100 { ISD::BITREVERSE, MVT::v4i32, 27 }, 3101 { ISD::BITREVERSE, MVT::v8i16, 27 }, 3102 { ISD::BITREVERSE, MVT::v16i8, 20 }, 3103 { ISD::BSWAP, MVT::v2i64, 7 }, 3104 { ISD::BSWAP, MVT::v4i32, 7 }, 3105 { ISD::BSWAP, MVT::v8i16, 7 }, 3106 { ISD::CTLZ, MVT::v2i64, 25 }, 3107 { ISD::CTLZ, MVT::v4i32, 26 }, 3108 { ISD::CTLZ, MVT::v8i16, 20 }, 3109 { ISD::CTLZ, MVT::v16i8, 17 }, 3110 { ISD::CTPOP, MVT::v2i64, 12 }, 3111 { ISD::CTPOP, MVT::v4i32, 15 }, 3112 { ISD::CTPOP, MVT::v8i16, 13 }, 3113 { ISD::CTPOP, MVT::v16i8, 10 }, 3114 { ISD::CTTZ, MVT::v2i64, 14 }, 3115 { ISD::CTTZ, MVT::v4i32, 18 }, 3116 { ISD::CTTZ, MVT::v8i16, 16 }, 3117 { ISD::CTTZ, MVT::v16i8, 13 }, 3118 { ISD::SADDSAT, MVT::v8i16, 1 }, 3119 { ISD::SADDSAT, MVT::v16i8, 1 }, 3120 { ISD::SMAX, MVT::v8i16, 1 }, 3121 { ISD::SMIN, MVT::v8i16, 1 }, 3122 { ISD::SSUBSAT, MVT::v8i16, 1 }, 3123 { ISD::SSUBSAT, MVT::v16i8, 1 }, 3124 { ISD::UADDSAT, MVT::v8i16, 1 }, 3125 { ISD::UADDSAT, MVT::v16i8, 1 }, 3126 { ISD::UMAX, MVT::v8i16, 2 }, 3127 { ISD::UMAX, MVT::v16i8, 1 }, 3128 { ISD::UMIN, MVT::v8i16, 2 }, 3129 { ISD::UMIN, MVT::v16i8, 1 }, 3130 { ISD::USUBSAT, MVT::v8i16, 1 }, 3131 { ISD::USUBSAT, MVT::v16i8, 1 }, 3132 { ISD::FMAXNUM, MVT::f64, 4 }, 3133 { ISD::FMAXNUM, MVT::v2f64, 4 }, 3134 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 3135 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 3136 }; 3137 static const CostTblEntry SSE1CostTbl[] = { 3138 { ISD::FMAXNUM, MVT::f32, 4 }, 3139 { ISD::FMAXNUM, MVT::v4f32, 4 }, 3140 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 3141 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 3142 }; 3143 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets 3144 { ISD::CTTZ, MVT::i64, 1 }, 3145 }; 3146 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets 3147 { ISD::CTTZ, MVT::i32, 1 }, 3148 { ISD::CTTZ, MVT::i16, 1 }, 3149 { ISD::CTTZ, MVT::i8, 1 }, 3150 }; 3151 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets 3152 { ISD::CTLZ, MVT::i64, 1 }, 3153 }; 3154 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets 3155 { ISD::CTLZ, MVT::i32, 1 }, 3156 { ISD::CTLZ, MVT::i16, 1 }, 3157 { ISD::CTLZ, MVT::i8, 1 }, 3158 }; 3159 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets 3160 { ISD::CTPOP, MVT::i64, 1 }, 3161 }; 3162 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets 3163 { ISD::CTPOP, MVT::i32, 1 }, 3164 { ISD::CTPOP, MVT::i16, 1 }, 3165 { ISD::CTPOP, MVT::i8, 1 }, 3166 }; 3167 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3168 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV 3169 { ISD::BITREVERSE, MVT::i64, 14 }, 3170 { ISD::BSWAP, MVT::i64, 1 }, 3171 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV 3172 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH 3173 { ISD::CTPOP, MVT::i64, 10 }, 3174 { ISD::SADDO, MVT::i64, 1 }, 3175 { ISD::UADDO, MVT::i64, 1 }, 3176 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto 3177 }; 3178 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3179 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV 3180 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV 3181 { ISD::BITREVERSE, MVT::i32, 14 }, 3182 { ISD::BITREVERSE, MVT::i16, 14 }, 3183 { ISD::BITREVERSE, MVT::i8, 11 }, 3184 { ISD::BSWAP, MVT::i32, 1 }, 3185 { ISD::BSWAP, MVT::i16, 1 }, // ROL 3186 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV 3187 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV 3188 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV 3189 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH 3190 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH 3191 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH 3192 { ISD::CTPOP, MVT::i32, 8 }, 3193 { ISD::CTPOP, MVT::i16, 9 }, 3194 { ISD::CTPOP, MVT::i8, 7 }, 3195 { ISD::SADDO, MVT::i32, 1 }, 3196 { ISD::SADDO, MVT::i16, 1 }, 3197 { ISD::SADDO, MVT::i8, 1 }, 3198 { ISD::UADDO, MVT::i32, 1 }, 3199 { ISD::UADDO, MVT::i16, 1 }, 3200 { ISD::UADDO, MVT::i8, 1 }, 3201 { ISD::UMULO, MVT::i32, 2 }, // mul + seto 3202 { ISD::UMULO, MVT::i16, 2 }, 3203 { ISD::UMULO, MVT::i8, 2 }, 3204 }; 3205 3206 Type *RetTy = ICA.getReturnType(); 3207 Type *OpTy = RetTy; 3208 Intrinsic::ID IID = ICA.getID(); 3209 unsigned ISD = ISD::DELETED_NODE; 3210 switch (IID) { 3211 default: 3212 break; 3213 case Intrinsic::abs: 3214 ISD = ISD::ABS; 3215 break; 3216 case Intrinsic::bitreverse: 3217 ISD = ISD::BITREVERSE; 3218 break; 3219 case Intrinsic::bswap: 3220 ISD = ISD::BSWAP; 3221 break; 3222 case Intrinsic::ctlz: 3223 ISD = ISD::CTLZ; 3224 break; 3225 case Intrinsic::ctpop: 3226 ISD = ISD::CTPOP; 3227 break; 3228 case Intrinsic::cttz: 3229 ISD = ISD::CTTZ; 3230 break; 3231 case Intrinsic::maxnum: 3232 case Intrinsic::minnum: 3233 // FMINNUM has same costs so don't duplicate. 3234 ISD = ISD::FMAXNUM; 3235 break; 3236 case Intrinsic::sadd_sat: 3237 ISD = ISD::SADDSAT; 3238 break; 3239 case Intrinsic::smax: 3240 ISD = ISD::SMAX; 3241 break; 3242 case Intrinsic::smin: 3243 ISD = ISD::SMIN; 3244 break; 3245 case Intrinsic::ssub_sat: 3246 ISD = ISD::SSUBSAT; 3247 break; 3248 case Intrinsic::uadd_sat: 3249 ISD = ISD::UADDSAT; 3250 break; 3251 case Intrinsic::umax: 3252 ISD = ISD::UMAX; 3253 break; 3254 case Intrinsic::umin: 3255 ISD = ISD::UMIN; 3256 break; 3257 case Intrinsic::usub_sat: 3258 ISD = ISD::USUBSAT; 3259 break; 3260 case Intrinsic::sqrt: 3261 ISD = ISD::FSQRT; 3262 break; 3263 case Intrinsic::sadd_with_overflow: 3264 case Intrinsic::ssub_with_overflow: 3265 // SSUBO has same costs so don't duplicate. 3266 ISD = ISD::SADDO; 3267 OpTy = RetTy->getContainedType(0); 3268 break; 3269 case Intrinsic::uadd_with_overflow: 3270 case Intrinsic::usub_with_overflow: 3271 // USUBO has same costs so don't duplicate. 3272 ISD = ISD::UADDO; 3273 OpTy = RetTy->getContainedType(0); 3274 break; 3275 case Intrinsic::umul_with_overflow: 3276 case Intrinsic::smul_with_overflow: 3277 // SMULO has same costs so don't duplicate. 3278 ISD = ISD::UMULO; 3279 OpTy = RetTy->getContainedType(0); 3280 break; 3281 } 3282 3283 if (ISD != ISD::DELETED_NODE) { 3284 // Legalize the type. 3285 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); 3286 MVT MTy = LT.second; 3287 3288 // Attempt to lookup cost. 3289 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() && 3290 MTy.isVector()) { 3291 // With PSHUFB the code is very similar for all types. If we have integer 3292 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types 3293 // we also need a PSHUFB. 3294 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2; 3295 3296 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB 3297 // instructions. We also need an extract and an insert. 3298 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) || 3299 (ST->hasBWI() && MTy.is512BitVector()))) 3300 Cost = Cost * 2 + 2; 3301 3302 return LT.first * Cost; 3303 } 3304 3305 auto adjustTableCost = [](const CostTblEntry &Entry, 3306 InstructionCost LegalizationCost, 3307 FastMathFlags FMF) { 3308 // If there are no NANs to deal with, then these are reduced to a 3309 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we 3310 // assume is used in the non-fast case. 3311 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) { 3312 if (FMF.noNaNs()) 3313 return LegalizationCost * 1; 3314 } 3315 return LegalizationCost * (int)Entry.Cost; 3316 }; 3317 3318 if (ST->useGLMDivSqrtCosts()) 3319 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 3320 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3321 3322 if (ST->useSLMArithCosts()) 3323 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 3324 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3325 3326 if (ST->hasBITALG()) 3327 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy)) 3328 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3329 3330 if (ST->hasVPOPCNTDQ()) 3331 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy)) 3332 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3333 3334 if (ST->hasCDI()) 3335 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 3336 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3337 3338 if (ST->hasBWI()) 3339 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3340 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3341 3342 if (ST->hasAVX512()) 3343 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3344 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3345 3346 if (ST->hasXOP()) 3347 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3348 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3349 3350 if (ST->hasAVX2()) 3351 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 3352 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3353 3354 if (ST->hasAVX()) 3355 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 3356 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3357 3358 if (ST->hasSSE42()) 3359 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 3360 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3361 3362 if (ST->hasSSE41()) 3363 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 3364 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3365 3366 if (ST->hasSSSE3()) 3367 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 3368 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3369 3370 if (ST->hasSSE2()) 3371 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 3372 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3373 3374 if (ST->hasSSE1()) 3375 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 3376 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3377 3378 if (ST->hasBMI()) { 3379 if (ST->is64Bit()) 3380 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) 3381 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3382 3383 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) 3384 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3385 } 3386 3387 if (ST->hasLZCNT()) { 3388 if (ST->is64Bit()) 3389 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) 3390 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3391 3392 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) 3393 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3394 } 3395 3396 if (ST->hasPOPCNT()) { 3397 if (ST->is64Bit()) 3398 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) 3399 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3400 3401 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) 3402 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3403 } 3404 3405 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) { 3406 if (const Instruction *II = ICA.getInst()) { 3407 if (II->hasOneUse() && isa<StoreInst>(II->user_back())) 3408 return TTI::TCC_Free; 3409 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) { 3410 if (LI->hasOneUse()) 3411 return TTI::TCC_Free; 3412 } 3413 } 3414 } 3415 3416 if (ST->is64Bit()) 3417 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3418 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3419 3420 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3421 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3422 } 3423 3424 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3425 } 3426 3427 InstructionCost 3428 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 3429 TTI::TargetCostKind CostKind) { 3430 if (ICA.isTypeBasedOnly()) 3431 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 3432 3433 static const CostTblEntry AVX512CostTbl[] = { 3434 { ISD::ROTL, MVT::v8i64, 1 }, 3435 { ISD::ROTL, MVT::v4i64, 1 }, 3436 { ISD::ROTL, MVT::v2i64, 1 }, 3437 { ISD::ROTL, MVT::v16i32, 1 }, 3438 { ISD::ROTL, MVT::v8i32, 1 }, 3439 { ISD::ROTL, MVT::v4i32, 1 }, 3440 { ISD::ROTR, MVT::v8i64, 1 }, 3441 { ISD::ROTR, MVT::v4i64, 1 }, 3442 { ISD::ROTR, MVT::v2i64, 1 }, 3443 { ISD::ROTR, MVT::v16i32, 1 }, 3444 { ISD::ROTR, MVT::v8i32, 1 }, 3445 { ISD::ROTR, MVT::v4i32, 1 } 3446 }; 3447 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) 3448 static const CostTblEntry XOPCostTbl[] = { 3449 { ISD::ROTL, MVT::v4i64, 4 }, 3450 { ISD::ROTL, MVT::v8i32, 4 }, 3451 { ISD::ROTL, MVT::v16i16, 4 }, 3452 { ISD::ROTL, MVT::v32i8, 4 }, 3453 { ISD::ROTL, MVT::v2i64, 1 }, 3454 { ISD::ROTL, MVT::v4i32, 1 }, 3455 { ISD::ROTL, MVT::v8i16, 1 }, 3456 { ISD::ROTL, MVT::v16i8, 1 }, 3457 { ISD::ROTR, MVT::v4i64, 6 }, 3458 { ISD::ROTR, MVT::v8i32, 6 }, 3459 { ISD::ROTR, MVT::v16i16, 6 }, 3460 { ISD::ROTR, MVT::v32i8, 6 }, 3461 { ISD::ROTR, MVT::v2i64, 2 }, 3462 { ISD::ROTR, MVT::v4i32, 2 }, 3463 { ISD::ROTR, MVT::v8i16, 2 }, 3464 { ISD::ROTR, MVT::v16i8, 2 } 3465 }; 3466 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3467 { ISD::ROTL, MVT::i64, 1 }, 3468 { ISD::ROTR, MVT::i64, 1 }, 3469 { ISD::FSHL, MVT::i64, 4 } 3470 }; 3471 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3472 { ISD::ROTL, MVT::i32, 1 }, 3473 { ISD::ROTL, MVT::i16, 1 }, 3474 { ISD::ROTL, MVT::i8, 1 }, 3475 { ISD::ROTR, MVT::i32, 1 }, 3476 { ISD::ROTR, MVT::i16, 1 }, 3477 { ISD::ROTR, MVT::i8, 1 }, 3478 { ISD::FSHL, MVT::i32, 4 }, 3479 { ISD::FSHL, MVT::i16, 4 }, 3480 { ISD::FSHL, MVT::i8, 4 } 3481 }; 3482 3483 Intrinsic::ID IID = ICA.getID(); 3484 Type *RetTy = ICA.getReturnType(); 3485 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 3486 unsigned ISD = ISD::DELETED_NODE; 3487 switch (IID) { 3488 default: 3489 break; 3490 case Intrinsic::fshl: 3491 ISD = ISD::FSHL; 3492 if (Args[0] == Args[1]) 3493 ISD = ISD::ROTL; 3494 break; 3495 case Intrinsic::fshr: 3496 // FSHR has same costs so don't duplicate. 3497 ISD = ISD::FSHL; 3498 if (Args[0] == Args[1]) 3499 ISD = ISD::ROTR; 3500 break; 3501 } 3502 3503 if (ISD != ISD::DELETED_NODE) { 3504 // Legalize the type. 3505 std::pair<InstructionCost, MVT> LT = 3506 TLI->getTypeLegalizationCost(DL, RetTy); 3507 MVT MTy = LT.second; 3508 3509 // Attempt to lookup cost. 3510 if (ST->hasAVX512()) 3511 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3512 return LT.first * Entry->Cost; 3513 3514 if (ST->hasXOP()) 3515 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3516 return LT.first * Entry->Cost; 3517 3518 if (ST->is64Bit()) 3519 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3520 return LT.first * Entry->Cost; 3521 3522 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3523 return LT.first * Entry->Cost; 3524 } 3525 3526 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3527 } 3528 3529 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 3530 unsigned Index) { 3531 static const CostTblEntry SLMCostTbl[] = { 3532 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, 3533 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, 3534 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, 3535 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } 3536 }; 3537 3538 assert(Val->isVectorTy() && "This must be a vector type"); 3539 Type *ScalarType = Val->getScalarType(); 3540 int RegisterFileMoveCost = 0; 3541 3542 // Non-immediate extraction/insertion can be handled as a sequence of 3543 // aliased loads+stores via the stack. 3544 if (Index == -1U && (Opcode == Instruction::ExtractElement || 3545 Opcode == Instruction::InsertElement)) { 3546 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns: 3547 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0. 3548 3549 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling. 3550 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected"); 3551 Align VecAlign = DL.getPrefTypeAlign(Val); 3552 Align SclAlign = DL.getPrefTypeAlign(ScalarType); 3553 3554 // Extract - store vector to stack, load scalar. 3555 if (Opcode == Instruction::ExtractElement) { 3556 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3557 TTI::TargetCostKind::TCK_RecipThroughput) + 3558 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0, 3559 TTI::TargetCostKind::TCK_RecipThroughput); 3560 } 3561 // Insert - store vector to stack, store scalar, load vector. 3562 if (Opcode == Instruction::InsertElement) { 3563 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3564 TTI::TargetCostKind::TCK_RecipThroughput) + 3565 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0, 3566 TTI::TargetCostKind::TCK_RecipThroughput) + 3567 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, 3568 TTI::TargetCostKind::TCK_RecipThroughput); 3569 } 3570 } 3571 3572 if (Index != -1U && (Opcode == Instruction::ExtractElement || 3573 Opcode == Instruction::InsertElement)) { 3574 // Legalize the type. 3575 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 3576 3577 // This type is legalized to a scalar type. 3578 if (!LT.second.isVector()) 3579 return 0; 3580 3581 // The type may be split. Normalize the index to the new type. 3582 unsigned NumElts = LT.second.getVectorNumElements(); 3583 unsigned SubNumElts = NumElts; 3584 Index = Index % NumElts; 3585 3586 // For >128-bit vectors, we need to extract higher 128-bit subvectors. 3587 // For inserts, we also need to insert the subvector back. 3588 if (LT.second.getSizeInBits() > 128) { 3589 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"); 3590 unsigned NumSubVecs = LT.second.getSizeInBits() / 128; 3591 SubNumElts = NumElts / NumSubVecs; 3592 if (SubNumElts <= Index) { 3593 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1); 3594 Index %= SubNumElts; 3595 } 3596 } 3597 3598 if (Index == 0) { 3599 // Floating point scalars are already located in index #0. 3600 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume 3601 // true for all. 3602 if (ScalarType->isFloatingPointTy()) 3603 return RegisterFileMoveCost; 3604 3605 // Assume movd/movq XMM -> GPR is relatively cheap on all targets. 3606 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) 3607 return 1 + RegisterFileMoveCost; 3608 } 3609 3610 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3611 assert(ISD && "Unexpected vector opcode"); 3612 MVT MScalarTy = LT.second.getScalarType(); 3613 if (ST->useSLMArithCosts()) 3614 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) 3615 return Entry->Cost + RegisterFileMoveCost; 3616 3617 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. 3618 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3619 (MScalarTy.isInteger() && ST->hasSSE41())) 3620 return 1 + RegisterFileMoveCost; 3621 3622 // Assume insertps is relatively cheap on all targets. 3623 if (MScalarTy == MVT::f32 && ST->hasSSE41() && 3624 Opcode == Instruction::InsertElement) 3625 return 1 + RegisterFileMoveCost; 3626 3627 // For extractions we just need to shuffle the element to index 0, which 3628 // should be very cheap (assume cost = 1). For insertions we need to shuffle 3629 // the elements to its destination. In both cases we must handle the 3630 // subvector move(s). 3631 // If the vector type is already less than 128-bits then don't reduce it. 3632 // TODO: Under what circumstances should we shuffle using the full width? 3633 InstructionCost ShuffleCost = 1; 3634 if (Opcode == Instruction::InsertElement) { 3635 auto *SubTy = cast<VectorType>(Val); 3636 EVT VT = TLI->getValueType(DL, Val); 3637 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) 3638 SubTy = FixedVectorType::get(ScalarType, SubNumElts); 3639 ShuffleCost = 3640 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy); 3641 } 3642 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; 3643 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; 3644 } 3645 3646 // Add to the base cost if we know that the extracted element of a vector is 3647 // destined to be moved to and used in the integer register file. 3648 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 3649 RegisterFileMoveCost += 1; 3650 3651 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 3652 } 3653 3654 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty, 3655 const APInt &DemandedElts, 3656 bool Insert, 3657 bool Extract) { 3658 InstructionCost Cost = 0; 3659 3660 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much 3661 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. 3662 if (Insert) { 3663 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3664 MVT MScalarTy = LT.second.getScalarType(); 3665 3666 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3667 (MScalarTy.isInteger() && ST->hasSSE41()) || 3668 (MScalarTy == MVT::f32 && ST->hasSSE41())) { 3669 // For types we can insert directly, insertion into 128-bit sub vectors is 3670 // cheap, followed by a cheap chain of concatenations. 3671 if (LT.second.getSizeInBits() <= 128) { 3672 Cost += 3673 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); 3674 } else { 3675 // In each 128-lane, if at least one index is demanded but not all 3676 // indices are demanded and this 128-lane is not the first 128-lane of 3677 // the legalized-vector, then this 128-lane needs a extracti128; If in 3678 // each 128-lane, there is at least one demanded index, this 128-lane 3679 // needs a inserti128. 3680 3681 // The following cases will help you build a better understanding: 3682 // Assume we insert several elements into a v8i32 vector in avx2, 3683 // Case#1: inserting into 1th index needs vpinsrd + inserti128. 3684 // Case#2: inserting into 5th index needs extracti128 + vpinsrd + 3685 // inserti128. 3686 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128. 3687 const int CostValue = *LT.first.getValue(); 3688 assert(CostValue >= 0 && "Negative cost!"); 3689 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue; 3690 unsigned NumElts = LT.second.getVectorNumElements() * CostValue; 3691 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); 3692 unsigned Scale = NumElts / Num128Lanes; 3693 // We iterate each 128-lane, and check if we need a 3694 // extracti128/inserti128 for this 128-lane. 3695 for (unsigned I = 0; I < NumElts; I += Scale) { 3696 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale); 3697 APInt MaskedDE = Mask & WidenedDemandedElts; 3698 unsigned Population = MaskedDE.countPopulation(); 3699 Cost += (Population > 0 && Population != Scale && 3700 I % LT.second.getVectorNumElements() != 0); 3701 Cost += Population > 0; 3702 } 3703 Cost += DemandedElts.countPopulation(); 3704 3705 // For vXf32 cases, insertion into the 0'th index in each v4f32 3706 // 128-bit vector is free. 3707 // NOTE: This assumes legalization widens vXf32 vectors. 3708 if (MScalarTy == MVT::f32) 3709 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements(); 3710 i < e; i += 4) 3711 if (DemandedElts[i]) 3712 Cost--; 3713 } 3714 } else if (LT.second.isVector()) { 3715 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded 3716 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a 3717 // series of UNPCK followed by CONCAT_VECTORS - all of these can be 3718 // considered cheap. 3719 if (Ty->isIntOrIntVectorTy()) 3720 Cost += DemandedElts.countPopulation(); 3721 3722 // Get the smaller of the legalized or original pow2-extended number of 3723 // vector elements, which represents the number of unpacks we'll end up 3724 // performing. 3725 unsigned NumElts = LT.second.getVectorNumElements(); 3726 unsigned Pow2Elts = 3727 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements()); 3728 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; 3729 } 3730 } 3731 3732 // TODO: Use default extraction for now, but we should investigate extending this 3733 // to handle repeated subvector extraction. 3734 if (Extract) 3735 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); 3736 3737 return Cost; 3738 } 3739 3740 InstructionCost 3741 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, 3742 int VF, const APInt &DemandedDstElts, 3743 TTI::TargetCostKind CostKind) { 3744 const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy); 3745 // We don't differentiate element types here, only element bit width. 3746 EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits); 3747 3748 auto bailout = [&]() { 3749 return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF, 3750 DemandedDstElts, CostKind); 3751 }; 3752 3753 // For now, only deal with AVX512 cases. 3754 if (!ST->hasAVX512()) 3755 return bailout(); 3756 3757 // Do we have a native shuffle for this element type, or should we promote? 3758 unsigned PromEltTyBits = EltTyBits; 3759 switch (EltTyBits) { 3760 case 32: 3761 case 64: 3762 break; // AVX512F. 3763 case 16: 3764 if (!ST->hasBWI()) 3765 PromEltTyBits = 32; // promote to i32, AVX512F. 3766 break; // AVX512BW 3767 case 8: 3768 if (!ST->hasVBMI()) 3769 PromEltTyBits = 32; // promote to i32, AVX512F. 3770 break; // AVX512VBMI 3771 case 1: 3772 // There is no support for shuffling i1 elements. We *must* promote. 3773 if (ST->hasBWI()) { 3774 if (ST->hasVBMI()) 3775 PromEltTyBits = 8; // promote to i8, AVX512VBMI. 3776 else 3777 PromEltTyBits = 16; // promote to i16, AVX512BW. 3778 break; 3779 } 3780 if (ST->hasDQI()) { 3781 PromEltTyBits = 32; // promote to i32, AVX512F. 3782 break; 3783 } 3784 return bailout(); 3785 default: 3786 return bailout(); 3787 } 3788 auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits); 3789 3790 auto *SrcVecTy = FixedVectorType::get(EltTy, VF); 3791 auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF); 3792 3793 int NumDstElements = VF * ReplicationFactor; 3794 auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements); 3795 auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements); 3796 3797 // Legalize the types. 3798 MVT LegalSrcVecTy = TLI->getTypeLegalizationCost(DL, SrcVecTy).second; 3799 MVT LegalPromSrcVecTy = TLI->getTypeLegalizationCost(DL, PromSrcVecTy).second; 3800 MVT LegalPromDstVecTy = TLI->getTypeLegalizationCost(DL, PromDstVecTy).second; 3801 MVT LegalDstVecTy = TLI->getTypeLegalizationCost(DL, DstVecTy).second; 3802 // They should have legalized into vector types. 3803 if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() || 3804 !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector()) 3805 return bailout(); 3806 3807 if (PromEltTyBits != EltTyBits) { 3808 // If we have to perform the shuffle with wider elt type than our data type, 3809 // then we will first need to anyext (we don't care about the new bits) 3810 // the source elements, and then truncate Dst elements. 3811 InstructionCost PromotionCost; 3812 PromotionCost += getCastInstrCost( 3813 Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy, 3814 TargetTransformInfo::CastContextHint::None, CostKind); 3815 PromotionCost += 3816 getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy, 3817 /*Src=*/PromDstVecTy, 3818 TargetTransformInfo::CastContextHint::None, CostKind); 3819 return PromotionCost + getReplicationShuffleCost(PromEltTy, 3820 ReplicationFactor, VF, 3821 DemandedDstElts, CostKind); 3822 } 3823 3824 assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && 3825 LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && 3826 "We expect that the legalization doesn't affect the element width, " 3827 "doesn't coalesce/split elements."); 3828 3829 unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements(); 3830 unsigned NumDstVectors = 3831 divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec); 3832 3833 auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec); 3834 3835 // Not all the produced Dst elements may be demanded. In our case, 3836 // given that a single Dst vector is formed by a single shuffle, 3837 // if all elements that will form a single Dst vector aren't demanded, 3838 // then we won't need to do that shuffle, so adjust the cost accordingly. 3839 APInt DemandedDstVectors = APIntOps::ScaleBitMask( 3840 DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec), 3841 NumDstVectors); 3842 unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation(); 3843 3844 InstructionCost SingleShuffleCost = 3845 getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy, 3846 /*Mask=*/None, /*Index=*/0, /*SubTp=*/nullptr); 3847 return NumDstVectorsDemanded * SingleShuffleCost; 3848 } 3849 3850 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 3851 MaybeAlign Alignment, 3852 unsigned AddressSpace, 3853 TTI::TargetCostKind CostKind, 3854 const Instruction *I) { 3855 // TODO: Handle other cost kinds. 3856 if (CostKind != TTI::TCK_RecipThroughput) { 3857 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3858 // Store instruction with index and scale costs 2 Uops. 3859 // Check the preceding GEP to identify non-const indices. 3860 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) { 3861 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 3862 return TTI::TCC_Basic * 2; 3863 } 3864 } 3865 return TTI::TCC_Basic; 3866 } 3867 3868 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 3869 "Invalid Opcode"); 3870 // Type legalization can't handle structs 3871 if (TLI->getValueType(DL, Src, true) == MVT::Other) 3872 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3873 CostKind); 3874 3875 // Legalize the type. 3876 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 3877 3878 auto *VTy = dyn_cast<FixedVectorType>(Src); 3879 3880 // Handle the simple case of non-vectors. 3881 // NOTE: this assumes that legalization never creates vector from scalars! 3882 if (!VTy || !LT.second.isVector()) 3883 // Each load/store unit costs 1. 3884 return LT.first * 1; 3885 3886 bool IsLoad = Opcode == Instruction::Load; 3887 3888 Type *EltTy = VTy->getElementType(); 3889 3890 const int EltTyBits = DL.getTypeSizeInBits(EltTy); 3891 3892 InstructionCost Cost = 0; 3893 3894 // Source of truth: how many elements were there in the original IR vector? 3895 const unsigned SrcNumElt = VTy->getNumElements(); 3896 3897 // How far have we gotten? 3898 int NumEltRemaining = SrcNumElt; 3899 // Note that we intentionally capture by-reference, NumEltRemaining changes. 3900 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; }; 3901 3902 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8); 3903 3904 // Note that even if we can store 64 bits of an XMM, we still operate on XMM. 3905 const unsigned XMMBits = 128; 3906 if (XMMBits % EltTyBits != 0) 3907 // Vector size must be a multiple of the element size. I.e. no padding. 3908 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3909 CostKind); 3910 const int NumEltPerXMM = XMMBits / EltTyBits; 3911 3912 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM); 3913 3914 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0; 3915 NumEltRemaining > 0; CurrOpSizeBytes /= 2) { 3916 // How many elements would a single op deal with at once? 3917 if ((8 * CurrOpSizeBytes) % EltTyBits != 0) 3918 // Vector size must be a multiple of the element size. I.e. no padding. 3919 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3920 CostKind); 3921 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits; 3922 3923 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?"); 3924 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || 3925 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && 3926 "Unless we haven't halved the op size yet, " 3927 "we have less than two op's sized units of work left."); 3928 3929 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM 3930 ? FixedVectorType::get(EltTy, CurrNumEltPerOp) 3931 : XMMVecTy; 3932 3933 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && 3934 "After halving sizes, the vector elt count is no longer a multiple " 3935 "of number of elements per operation?"); 3936 auto *CoalescedVecTy = 3937 CurrNumEltPerOp == 1 3938 ? CurrVecTy 3939 : FixedVectorType::get( 3940 IntegerType::get(Src->getContext(), 3941 EltTyBits * CurrNumEltPerOp), 3942 CurrVecTy->getNumElements() / CurrNumEltPerOp); 3943 assert(DL.getTypeSizeInBits(CoalescedVecTy) == 3944 DL.getTypeSizeInBits(CurrVecTy) && 3945 "coalesciing elements doesn't change vector width."); 3946 3947 while (NumEltRemaining > 0) { 3948 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"); 3949 3950 // Can we use this vector size, as per the remaining element count? 3951 // Iff the vector is naturally aligned, we can do a wide load regardless. 3952 if (NumEltRemaining < CurrNumEltPerOp && 3953 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) && 3954 CurrOpSizeBytes != 1) 3955 break; // Try smalled vector size. 3956 3957 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0; 3958 3959 // If we have fully processed the previous reg, we need to replenish it. 3960 if (SubVecEltsLeft == 0) { 3961 SubVecEltsLeft += CurrVecTy->getNumElements(); 3962 // And that's free only for the 0'th subvector of a legalized vector. 3963 if (!Is0thSubVec) 3964 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector 3965 : TTI::ShuffleKind::SK_ExtractSubvector, 3966 VTy, None, NumEltDone(), CurrVecTy); 3967 } 3968 3969 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM, 3970 // for smaller widths (32/16/8) we have to insert/extract them separately. 3971 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide, 3972 // but let's pretend that it is also true for 16/8 bit wide ops...) 3973 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) { 3974 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM; 3975 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && ""); 3976 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp; 3977 APInt DemandedElts = 3978 APInt::getBitsSet(CoalescedVecTy->getNumElements(), 3979 CoalescedVecEltIdx, CoalescedVecEltIdx + 1); 3980 assert(DemandedElts.countPopulation() == 1 && "Inserting single value"); 3981 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad, 3982 !IsLoad); 3983 } 3984 3985 // This isn't exactly right. We're using slow unaligned 32-byte accesses 3986 // as a proxy for a double-pumped AVX memory interface such as on 3987 // Sandybridge. 3988 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow()) 3989 Cost += 2; 3990 else 3991 Cost += 1; 3992 3993 SubVecEltsLeft -= CurrNumEltPerOp; 3994 NumEltRemaining -= CurrNumEltPerOp; 3995 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes); 3996 } 3997 } 3998 3999 assert(NumEltRemaining <= 0 && "Should have processed all the elements."); 4000 4001 return Cost; 4002 } 4003 4004 InstructionCost 4005 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, 4006 unsigned AddressSpace, 4007 TTI::TargetCostKind CostKind) { 4008 bool IsLoad = (Instruction::Load == Opcode); 4009 bool IsStore = (Instruction::Store == Opcode); 4010 4011 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy); 4012 if (!SrcVTy) 4013 // To calculate scalar take the regular cost, without mask 4014 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind); 4015 4016 unsigned NumElem = SrcVTy->getNumElements(); 4017 auto *MaskTy = 4018 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 4019 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) || 4020 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) { 4021 // Scalarization 4022 APInt DemandedElts = APInt::getAllOnes(NumElem); 4023 InstructionCost MaskSplitCost = 4024 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 4025 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 4026 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, 4027 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4028 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 4029 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 4030 InstructionCost ValueSplitCost = 4031 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); 4032 InstructionCost MemopCost = 4033 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4034 Alignment, AddressSpace, CostKind); 4035 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 4036 } 4037 4038 // Legalize the type. 4039 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 4040 auto VT = TLI->getValueType(DL, SrcVTy); 4041 InstructionCost Cost = 0; 4042 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 4043 LT.second.getVectorNumElements() == NumElem) 4044 // Promotion requires extend/truncate for data and a shuffle for mask. 4045 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) + 4046 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr); 4047 4048 else if (LT.first * LT.second.getVectorNumElements() > NumElem) { 4049 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), 4050 LT.second.getVectorNumElements()); 4051 // Expanding requires fill mask with zeroes 4052 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy); 4053 } 4054 4055 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. 4056 if (!ST->hasAVX512()) 4057 return Cost + LT.first * (IsLoad ? 2 : 8); 4058 4059 // AVX-512 masked load/store is cheapper 4060 return Cost + LT.first; 4061 } 4062 4063 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty, 4064 ScalarEvolution *SE, 4065 const SCEV *Ptr) { 4066 // Address computations in vectorized code with non-consecutive addresses will 4067 // likely result in more instructions compared to scalar code where the 4068 // computation can more often be merged into the index mode. The resulting 4069 // extra micro-ops can significantly decrease throughput. 4070 const unsigned NumVectorInstToHideOverhead = 10; 4071 4072 // Cost modeling of Strided Access Computation is hidden by the indexing 4073 // modes of X86 regardless of the stride value. We dont believe that there 4074 // is a difference between constant strided access in gerenal and constant 4075 // strided value which is less than or equal to 64. 4076 // Even in the case of (loop invariant) stride whose value is not known at 4077 // compile time, the address computation will not incur more than one extra 4078 // ADD instruction. 4079 if (Ty->isVectorTy() && SE && !ST->hasAVX2()) { 4080 // TODO: AVX2 is the current cut-off because we don't have correct 4081 // interleaving costs for prior ISA's. 4082 if (!BaseT::isStridedAccess(Ptr)) 4083 return NumVectorInstToHideOverhead; 4084 if (!BaseT::getConstantStrideStep(SE, Ptr)) 4085 return 1; 4086 } 4087 4088 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 4089 } 4090 4091 InstructionCost 4092 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 4093 Optional<FastMathFlags> FMF, 4094 TTI::TargetCostKind CostKind) { 4095 if (TTI::requiresOrderedReduction(FMF)) 4096 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 4097 4098 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 4099 // and make it as the cost. 4100 4101 static const CostTblEntry SLMCostTblNoPairWise[] = { 4102 { ISD::FADD, MVT::v2f64, 3 }, 4103 { ISD::ADD, MVT::v2i64, 5 }, 4104 }; 4105 4106 static const CostTblEntry SSE2CostTblNoPairWise[] = { 4107 { ISD::FADD, MVT::v2f64, 2 }, 4108 { ISD::FADD, MVT::v2f32, 2 }, 4109 { ISD::FADD, MVT::v4f32, 4 }, 4110 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 4111 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 4112 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 4113 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". 4114 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". 4115 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 4116 { ISD::ADD, MVT::v2i8, 2 }, 4117 { ISD::ADD, MVT::v4i8, 2 }, 4118 { ISD::ADD, MVT::v8i8, 2 }, 4119 { ISD::ADD, MVT::v16i8, 3 }, 4120 }; 4121 4122 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4123 { ISD::FADD, MVT::v4f64, 3 }, 4124 { ISD::FADD, MVT::v4f32, 3 }, 4125 { ISD::FADD, MVT::v8f32, 4 }, 4126 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 4127 { ISD::ADD, MVT::v4i64, 3 }, 4128 { ISD::ADD, MVT::v8i32, 5 }, 4129 { ISD::ADD, MVT::v16i16, 5 }, 4130 { ISD::ADD, MVT::v32i8, 4 }, 4131 }; 4132 4133 int ISD = TLI->InstructionOpcodeToISD(Opcode); 4134 assert(ISD && "Invalid opcode"); 4135 4136 // Before legalizing the type, give a chance to look up illegal narrow types 4137 // in the table. 4138 // FIXME: Is there a better way to do this? 4139 EVT VT = TLI->getValueType(DL, ValTy); 4140 if (VT.isSimple()) { 4141 MVT MTy = VT.getSimpleVT(); 4142 if (ST->useSLMArithCosts()) 4143 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 4144 return Entry->Cost; 4145 4146 if (ST->hasAVX()) 4147 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4148 return Entry->Cost; 4149 4150 if (ST->hasSSE2()) 4151 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4152 return Entry->Cost; 4153 } 4154 4155 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4156 4157 MVT MTy = LT.second; 4158 4159 auto *ValVTy = cast<FixedVectorType>(ValTy); 4160 4161 // Special case: vXi8 mul reductions are performed as vXi16. 4162 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) { 4163 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16); 4164 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements()); 4165 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy, 4166 TargetTransformInfo::CastContextHint::None, 4167 CostKind) + 4168 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind); 4169 } 4170 4171 InstructionCost ArithmeticCost = 0; 4172 if (LT.first != 1 && MTy.isVector() && 4173 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4174 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4175 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 4176 MTy.getVectorNumElements()); 4177 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 4178 ArithmeticCost *= LT.first - 1; 4179 } 4180 4181 if (ST->useSLMArithCosts()) 4182 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 4183 return ArithmeticCost + Entry->Cost; 4184 4185 if (ST->hasAVX()) 4186 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4187 return ArithmeticCost + Entry->Cost; 4188 4189 if (ST->hasSSE2()) 4190 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4191 return ArithmeticCost + Entry->Cost; 4192 4193 // FIXME: These assume a naive kshift+binop lowering, which is probably 4194 // conservative in most cases. 4195 static const CostTblEntry AVX512BoolReduction[] = { 4196 { ISD::AND, MVT::v2i1, 3 }, 4197 { ISD::AND, MVT::v4i1, 5 }, 4198 { ISD::AND, MVT::v8i1, 7 }, 4199 { ISD::AND, MVT::v16i1, 9 }, 4200 { ISD::AND, MVT::v32i1, 11 }, 4201 { ISD::AND, MVT::v64i1, 13 }, 4202 { ISD::OR, MVT::v2i1, 3 }, 4203 { ISD::OR, MVT::v4i1, 5 }, 4204 { ISD::OR, MVT::v8i1, 7 }, 4205 { ISD::OR, MVT::v16i1, 9 }, 4206 { ISD::OR, MVT::v32i1, 11 }, 4207 { ISD::OR, MVT::v64i1, 13 }, 4208 }; 4209 4210 static const CostTblEntry AVX2BoolReduction[] = { 4211 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp 4212 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp 4213 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp 4214 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp 4215 }; 4216 4217 static const CostTblEntry AVX1BoolReduction[] = { 4218 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp 4219 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp 4220 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 4221 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 4222 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp 4223 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp 4224 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 4225 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 4226 }; 4227 4228 static const CostTblEntry SSE2BoolReduction[] = { 4229 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp 4230 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp 4231 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp 4232 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp 4233 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp 4234 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp 4235 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp 4236 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp 4237 }; 4238 4239 // Handle bool allof/anyof patterns. 4240 if (ValVTy->getElementType()->isIntegerTy(1)) { 4241 InstructionCost ArithmeticCost = 0; 4242 if (LT.first != 1 && MTy.isVector() && 4243 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4244 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4245 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 4246 MTy.getVectorNumElements()); 4247 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 4248 ArithmeticCost *= LT.first - 1; 4249 } 4250 4251 if (ST->hasAVX512()) 4252 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) 4253 return ArithmeticCost + Entry->Cost; 4254 if (ST->hasAVX2()) 4255 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) 4256 return ArithmeticCost + Entry->Cost; 4257 if (ST->hasAVX()) 4258 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) 4259 return ArithmeticCost + Entry->Cost; 4260 if (ST->hasSSE2()) 4261 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) 4262 return ArithmeticCost + Entry->Cost; 4263 4264 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 4265 } 4266 4267 unsigned NumVecElts = ValVTy->getNumElements(); 4268 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); 4269 4270 // Special case power of 2 reductions where the scalar type isn't changed 4271 // by type legalization. 4272 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) 4273 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 4274 4275 InstructionCost ReductionCost = 0; 4276 4277 auto *Ty = ValVTy; 4278 if (LT.first != 1 && MTy.isVector() && 4279 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4280 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4281 Ty = FixedVectorType::get(ValVTy->getElementType(), 4282 MTy.getVectorNumElements()); 4283 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 4284 ReductionCost *= LT.first - 1; 4285 NumVecElts = MTy.getVectorNumElements(); 4286 } 4287 4288 // Now handle reduction with the legal type, taking into account size changes 4289 // at each level. 4290 while (NumVecElts > 1) { 4291 // Determine the size of the remaining vector we need to reduce. 4292 unsigned Size = NumVecElts * ScalarSize; 4293 NumVecElts /= 2; 4294 // If we're reducing from 256/512 bits, use an extract_subvector. 4295 if (Size > 128) { 4296 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4297 ReductionCost += 4298 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4299 Ty = SubTy; 4300 } else if (Size == 128) { 4301 // Reducing from 128 bits is a permute of v2f64/v2i64. 4302 FixedVectorType *ShufTy; 4303 if (ValVTy->isFloatingPointTy()) 4304 ShufTy = 4305 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); 4306 else 4307 ShufTy = 4308 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); 4309 ReductionCost += 4310 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4311 } else if (Size == 64) { 4312 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4313 FixedVectorType *ShufTy; 4314 if (ValVTy->isFloatingPointTy()) 4315 ShufTy = 4316 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); 4317 else 4318 ShufTy = 4319 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); 4320 ReductionCost += 4321 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4322 } else { 4323 // Reducing from smaller size is a shift by immediate. 4324 auto *ShiftTy = FixedVectorType::get( 4325 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); 4326 ReductionCost += getArithmeticInstrCost( 4327 Instruction::LShr, ShiftTy, CostKind, 4328 TargetTransformInfo::OK_AnyValue, 4329 TargetTransformInfo::OK_UniformConstantValue, 4330 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4331 } 4332 4333 // Add the arithmetic op for this level. 4334 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); 4335 } 4336 4337 // Add the final extract element to the cost. 4338 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4339 } 4340 4341 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, 4342 bool IsUnsigned) { 4343 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 4344 4345 MVT MTy = LT.second; 4346 4347 int ISD; 4348 if (Ty->isIntOrIntVectorTy()) { 4349 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4350 } else { 4351 assert(Ty->isFPOrFPVectorTy() && 4352 "Expected float point or integer vector type."); 4353 ISD = ISD::FMINNUM; 4354 } 4355 4356 static const CostTblEntry SSE1CostTbl[] = { 4357 {ISD::FMINNUM, MVT::v4f32, 1}, 4358 }; 4359 4360 static const CostTblEntry SSE2CostTbl[] = { 4361 {ISD::FMINNUM, MVT::v2f64, 1}, 4362 {ISD::SMIN, MVT::v8i16, 1}, 4363 {ISD::UMIN, MVT::v16i8, 1}, 4364 }; 4365 4366 static const CostTblEntry SSE41CostTbl[] = { 4367 {ISD::SMIN, MVT::v4i32, 1}, 4368 {ISD::UMIN, MVT::v4i32, 1}, 4369 {ISD::UMIN, MVT::v8i16, 1}, 4370 {ISD::SMIN, MVT::v16i8, 1}, 4371 }; 4372 4373 static const CostTblEntry SSE42CostTbl[] = { 4374 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd 4375 }; 4376 4377 static const CostTblEntry AVX1CostTbl[] = { 4378 {ISD::FMINNUM, MVT::v8f32, 1}, 4379 {ISD::FMINNUM, MVT::v4f64, 1}, 4380 {ISD::SMIN, MVT::v8i32, 3}, 4381 {ISD::UMIN, MVT::v8i32, 3}, 4382 {ISD::SMIN, MVT::v16i16, 3}, 4383 {ISD::UMIN, MVT::v16i16, 3}, 4384 {ISD::SMIN, MVT::v32i8, 3}, 4385 {ISD::UMIN, MVT::v32i8, 3}, 4386 }; 4387 4388 static const CostTblEntry AVX2CostTbl[] = { 4389 {ISD::SMIN, MVT::v8i32, 1}, 4390 {ISD::UMIN, MVT::v8i32, 1}, 4391 {ISD::SMIN, MVT::v16i16, 1}, 4392 {ISD::UMIN, MVT::v16i16, 1}, 4393 {ISD::SMIN, MVT::v32i8, 1}, 4394 {ISD::UMIN, MVT::v32i8, 1}, 4395 }; 4396 4397 static const CostTblEntry AVX512CostTbl[] = { 4398 {ISD::FMINNUM, MVT::v16f32, 1}, 4399 {ISD::FMINNUM, MVT::v8f64, 1}, 4400 {ISD::SMIN, MVT::v2i64, 1}, 4401 {ISD::UMIN, MVT::v2i64, 1}, 4402 {ISD::SMIN, MVT::v4i64, 1}, 4403 {ISD::UMIN, MVT::v4i64, 1}, 4404 {ISD::SMIN, MVT::v8i64, 1}, 4405 {ISD::UMIN, MVT::v8i64, 1}, 4406 {ISD::SMIN, MVT::v16i32, 1}, 4407 {ISD::UMIN, MVT::v16i32, 1}, 4408 }; 4409 4410 static const CostTblEntry AVX512BWCostTbl[] = { 4411 {ISD::SMIN, MVT::v32i16, 1}, 4412 {ISD::UMIN, MVT::v32i16, 1}, 4413 {ISD::SMIN, MVT::v64i8, 1}, 4414 {ISD::UMIN, MVT::v64i8, 1}, 4415 }; 4416 4417 // If we have a native MIN/MAX instruction for this type, use it. 4418 if (ST->hasBWI()) 4419 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 4420 return LT.first * Entry->Cost; 4421 4422 if (ST->hasAVX512()) 4423 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 4424 return LT.first * Entry->Cost; 4425 4426 if (ST->hasAVX2()) 4427 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 4428 return LT.first * Entry->Cost; 4429 4430 if (ST->hasAVX()) 4431 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 4432 return LT.first * Entry->Cost; 4433 4434 if (ST->hasSSE42()) 4435 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 4436 return LT.first * Entry->Cost; 4437 4438 if (ST->hasSSE41()) 4439 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 4440 return LT.first * Entry->Cost; 4441 4442 if (ST->hasSSE2()) 4443 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 4444 return LT.first * Entry->Cost; 4445 4446 if (ST->hasSSE1()) 4447 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 4448 return LT.first * Entry->Cost; 4449 4450 unsigned CmpOpcode; 4451 if (Ty->isFPOrFPVectorTy()) { 4452 CmpOpcode = Instruction::FCmp; 4453 } else { 4454 assert(Ty->isIntOrIntVectorTy() && 4455 "expecting floating point or integer type for min/max reduction"); 4456 CmpOpcode = Instruction::ICmp; 4457 } 4458 4459 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4460 // Otherwise fall back to cmp+select. 4461 InstructionCost Result = 4462 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE, 4463 CostKind) + 4464 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, 4465 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4466 return Result; 4467 } 4468 4469 InstructionCost 4470 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, 4471 bool IsUnsigned, 4472 TTI::TargetCostKind CostKind) { 4473 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4474 4475 MVT MTy = LT.second; 4476 4477 int ISD; 4478 if (ValTy->isIntOrIntVectorTy()) { 4479 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4480 } else { 4481 assert(ValTy->isFPOrFPVectorTy() && 4482 "Expected float point or integer vector type."); 4483 ISD = ISD::FMINNUM; 4484 } 4485 4486 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 4487 // and make it as the cost. 4488 4489 static const CostTblEntry SSE2CostTblNoPairWise[] = { 4490 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw 4491 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw 4492 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw 4493 }; 4494 4495 static const CostTblEntry SSE41CostTblNoPairWise[] = { 4496 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 4497 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 4498 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 4499 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 4500 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor 4501 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax 4502 {ISD::SMIN, MVT::v2i8, 3}, // pminsb 4503 {ISD::SMIN, MVT::v4i8, 5}, // pminsb 4504 {ISD::SMIN, MVT::v8i8, 7}, // pminsb 4505 {ISD::SMIN, MVT::v16i8, 6}, 4506 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 4507 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 4508 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 4509 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax 4510 }; 4511 4512 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4513 {ISD::SMIN, MVT::v16i16, 6}, 4514 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax 4515 {ISD::SMIN, MVT::v32i8, 8}, 4516 {ISD::UMIN, MVT::v32i8, 8}, 4517 }; 4518 4519 static const CostTblEntry AVX512BWCostTblNoPairWise[] = { 4520 {ISD::SMIN, MVT::v32i16, 8}, 4521 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax 4522 {ISD::SMIN, MVT::v64i8, 10}, 4523 {ISD::UMIN, MVT::v64i8, 10}, 4524 }; 4525 4526 // Before legalizing the type, give a chance to look up illegal narrow types 4527 // in the table. 4528 // FIXME: Is there a better way to do this? 4529 EVT VT = TLI->getValueType(DL, ValTy); 4530 if (VT.isSimple()) { 4531 MVT MTy = VT.getSimpleVT(); 4532 if (ST->hasBWI()) 4533 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4534 return Entry->Cost; 4535 4536 if (ST->hasAVX()) 4537 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4538 return Entry->Cost; 4539 4540 if (ST->hasSSE41()) 4541 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4542 return Entry->Cost; 4543 4544 if (ST->hasSSE2()) 4545 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4546 return Entry->Cost; 4547 } 4548 4549 auto *ValVTy = cast<FixedVectorType>(ValTy); 4550 unsigned NumVecElts = ValVTy->getNumElements(); 4551 4552 auto *Ty = ValVTy; 4553 InstructionCost MinMaxCost = 0; 4554 if (LT.first != 1 && MTy.isVector() && 4555 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4556 // Type needs to be split. We need LT.first - 1 operations ops. 4557 Ty = FixedVectorType::get(ValVTy->getElementType(), 4558 MTy.getVectorNumElements()); 4559 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(), 4560 MTy.getVectorNumElements()); 4561 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4562 MinMaxCost *= LT.first - 1; 4563 NumVecElts = MTy.getVectorNumElements(); 4564 } 4565 4566 if (ST->hasBWI()) 4567 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4568 return MinMaxCost + Entry->Cost; 4569 4570 if (ST->hasAVX()) 4571 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4572 return MinMaxCost + Entry->Cost; 4573 4574 if (ST->hasSSE41()) 4575 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4576 return MinMaxCost + Entry->Cost; 4577 4578 if (ST->hasSSE2()) 4579 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4580 return MinMaxCost + Entry->Cost; 4581 4582 unsigned ScalarSize = ValTy->getScalarSizeInBits(); 4583 4584 // Special case power of 2 reductions where the scalar type isn't changed 4585 // by type legalization. 4586 if (!isPowerOf2_32(ValVTy->getNumElements()) || 4587 ScalarSize != MTy.getScalarSizeInBits()) 4588 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind); 4589 4590 // Now handle reduction with the legal type, taking into account size changes 4591 // at each level. 4592 while (NumVecElts > 1) { 4593 // Determine the size of the remaining vector we need to reduce. 4594 unsigned Size = NumVecElts * ScalarSize; 4595 NumVecElts /= 2; 4596 // If we're reducing from 256/512 bits, use an extract_subvector. 4597 if (Size > 128) { 4598 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4599 MinMaxCost += 4600 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4601 Ty = SubTy; 4602 } else if (Size == 128) { 4603 // Reducing from 128 bits is a permute of v2f64/v2i64. 4604 VectorType *ShufTy; 4605 if (ValTy->isFloatingPointTy()) 4606 ShufTy = 4607 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); 4608 else 4609 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); 4610 MinMaxCost += 4611 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4612 } else if (Size == 64) { 4613 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4614 FixedVectorType *ShufTy; 4615 if (ValTy->isFloatingPointTy()) 4616 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); 4617 else 4618 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); 4619 MinMaxCost += 4620 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4621 } else { 4622 // Reducing from smaller size is a shift by immediate. 4623 auto *ShiftTy = FixedVectorType::get( 4624 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); 4625 MinMaxCost += getArithmeticInstrCost( 4626 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, 4627 TargetTransformInfo::OK_AnyValue, 4628 TargetTransformInfo::OK_UniformConstantValue, 4629 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4630 } 4631 4632 // Add the arithmetic op for this level. 4633 auto *SubCondTy = 4634 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); 4635 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4636 } 4637 4638 // Add the final extract element to the cost. 4639 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4640 } 4641 4642 /// Calculate the cost of materializing a 64-bit value. This helper 4643 /// method might only calculate a fraction of a larger immediate. Therefore it 4644 /// is valid to return a cost of ZERO. 4645 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) { 4646 if (Val == 0) 4647 return TTI::TCC_Free; 4648 4649 if (isInt<32>(Val)) 4650 return TTI::TCC_Basic; 4651 4652 return 2 * TTI::TCC_Basic; 4653 } 4654 4655 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 4656 TTI::TargetCostKind CostKind) { 4657 assert(Ty->isIntegerTy()); 4658 4659 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4660 if (BitSize == 0) 4661 return ~0U; 4662 4663 // Never hoist constants larger than 128bit, because this might lead to 4664 // incorrect code generation or assertions in codegen. 4665 // Fixme: Create a cost model for types larger than i128 once the codegen 4666 // issues have been fixed. 4667 if (BitSize > 128) 4668 return TTI::TCC_Free; 4669 4670 if (Imm == 0) 4671 return TTI::TCC_Free; 4672 4673 // Sign-extend all constants to a multiple of 64-bit. 4674 APInt ImmVal = Imm; 4675 if (BitSize % 64 != 0) 4676 ImmVal = Imm.sext(alignTo(BitSize, 64)); 4677 4678 // Split the constant into 64-bit chunks and calculate the cost for each 4679 // chunk. 4680 InstructionCost Cost = 0; 4681 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 4682 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 4683 int64_t Val = Tmp.getSExtValue(); 4684 Cost += getIntImmCost(Val); 4685 } 4686 // We need at least one instruction to materialize the constant. 4687 return std::max<InstructionCost>(1, Cost); 4688 } 4689 4690 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 4691 const APInt &Imm, Type *Ty, 4692 TTI::TargetCostKind CostKind, 4693 Instruction *Inst) { 4694 assert(Ty->isIntegerTy()); 4695 4696 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4697 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4698 // here, so that constant hoisting will ignore this constant. 4699 if (BitSize == 0) 4700 return TTI::TCC_Free; 4701 4702 unsigned ImmIdx = ~0U; 4703 switch (Opcode) { 4704 default: 4705 return TTI::TCC_Free; 4706 case Instruction::GetElementPtr: 4707 // Always hoist the base address of a GetElementPtr. This prevents the 4708 // creation of new constants for every base constant that gets constant 4709 // folded with the offset. 4710 if (Idx == 0) 4711 return 2 * TTI::TCC_Basic; 4712 return TTI::TCC_Free; 4713 case Instruction::Store: 4714 ImmIdx = 0; 4715 break; 4716 case Instruction::ICmp: 4717 // This is an imperfect hack to prevent constant hoisting of 4718 // compares that might be trying to check if a 64-bit value fits in 4719 // 32-bits. The backend can optimize these cases using a right shift by 32. 4720 // Ideally we would check the compare predicate here. There also other 4721 // similar immediates the backend can use shifts for. 4722 if (Idx == 1 && Imm.getBitWidth() == 64) { 4723 uint64_t ImmVal = Imm.getZExtValue(); 4724 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 4725 return TTI::TCC_Free; 4726 } 4727 ImmIdx = 1; 4728 break; 4729 case Instruction::And: 4730 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 4731 // by using a 32-bit operation with implicit zero extension. Detect such 4732 // immediates here as the normal path expects bit 31 to be sign extended. 4733 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 4734 return TTI::TCC_Free; 4735 ImmIdx = 1; 4736 break; 4737 case Instruction::Add: 4738 case Instruction::Sub: 4739 // For add/sub, we can use the opposite instruction for INT32_MIN. 4740 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 4741 return TTI::TCC_Free; 4742 ImmIdx = 1; 4743 break; 4744 case Instruction::UDiv: 4745 case Instruction::SDiv: 4746 case Instruction::URem: 4747 case Instruction::SRem: 4748 // Division by constant is typically expanded later into a different 4749 // instruction sequence. This completely changes the constants. 4750 // Report them as "free" to stop ConstantHoist from marking them as opaque. 4751 return TTI::TCC_Free; 4752 case Instruction::Mul: 4753 case Instruction::Or: 4754 case Instruction::Xor: 4755 ImmIdx = 1; 4756 break; 4757 // Always return TCC_Free for the shift value of a shift instruction. 4758 case Instruction::Shl: 4759 case Instruction::LShr: 4760 case Instruction::AShr: 4761 if (Idx == 1) 4762 return TTI::TCC_Free; 4763 break; 4764 case Instruction::Trunc: 4765 case Instruction::ZExt: 4766 case Instruction::SExt: 4767 case Instruction::IntToPtr: 4768 case Instruction::PtrToInt: 4769 case Instruction::BitCast: 4770 case Instruction::PHI: 4771 case Instruction::Call: 4772 case Instruction::Select: 4773 case Instruction::Ret: 4774 case Instruction::Load: 4775 break; 4776 } 4777 4778 if (Idx == ImmIdx) { 4779 int NumConstants = divideCeil(BitSize, 64); 4780 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4781 return (Cost <= NumConstants * TTI::TCC_Basic) 4782 ? static_cast<int>(TTI::TCC_Free) 4783 : Cost; 4784 } 4785 4786 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4787 } 4788 4789 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 4790 const APInt &Imm, Type *Ty, 4791 TTI::TargetCostKind CostKind) { 4792 assert(Ty->isIntegerTy()); 4793 4794 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4795 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4796 // here, so that constant hoisting will ignore this constant. 4797 if (BitSize == 0) 4798 return TTI::TCC_Free; 4799 4800 switch (IID) { 4801 default: 4802 return TTI::TCC_Free; 4803 case Intrinsic::sadd_with_overflow: 4804 case Intrinsic::uadd_with_overflow: 4805 case Intrinsic::ssub_with_overflow: 4806 case Intrinsic::usub_with_overflow: 4807 case Intrinsic::smul_with_overflow: 4808 case Intrinsic::umul_with_overflow: 4809 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 4810 return TTI::TCC_Free; 4811 break; 4812 case Intrinsic::experimental_stackmap: 4813 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4814 return TTI::TCC_Free; 4815 break; 4816 case Intrinsic::experimental_patchpoint_void: 4817 case Intrinsic::experimental_patchpoint_i64: 4818 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4819 return TTI::TCC_Free; 4820 break; 4821 } 4822 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4823 } 4824 4825 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, 4826 TTI::TargetCostKind CostKind, 4827 const Instruction *I) { 4828 if (CostKind != TTI::TCK_RecipThroughput) 4829 return Opcode == Instruction::PHI ? 0 : 1; 4830 // Branches are assumed to be predicted. 4831 return 0; 4832 } 4833 4834 int X86TTIImpl::getGatherOverhead() const { 4835 // Some CPUs have more overhead for gather. The specified overhead is relative 4836 // to the Load operation. "2" is the number provided by Intel architects. This 4837 // parameter is used for cost estimation of Gather Op and comparison with 4838 // other alternatives. 4839 // TODO: Remove the explicit hasAVX512()?, That would mean we would only 4840 // enable gather with a -march. 4841 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather())) 4842 return 2; 4843 4844 return 1024; 4845 } 4846 4847 int X86TTIImpl::getScatterOverhead() const { 4848 if (ST->hasAVX512()) 4849 return 2; 4850 4851 return 1024; 4852 } 4853 4854 // Return an average cost of Gather / Scatter instruction, maybe improved later. 4855 // FIXME: Add TargetCostKind support. 4856 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, 4857 const Value *Ptr, Align Alignment, 4858 unsigned AddressSpace) { 4859 4860 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 4861 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4862 4863 // Try to reduce index size from 64 bit (default for GEP) 4864 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 4865 // operation will use 16 x 64 indices which do not fit in a zmm and needs 4866 // to split. Also check that the base pointer is the same for all lanes, 4867 // and that there's at most one variable index. 4868 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { 4869 unsigned IndexSize = DL.getPointerSizeInBits(); 4870 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 4871 if (IndexSize < 64 || !GEP) 4872 return IndexSize; 4873 4874 unsigned NumOfVarIndices = 0; 4875 const Value *Ptrs = GEP->getPointerOperand(); 4876 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 4877 return IndexSize; 4878 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 4879 if (isa<Constant>(GEP->getOperand(i))) 4880 continue; 4881 Type *IndxTy = GEP->getOperand(i)->getType(); 4882 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) 4883 IndxTy = IndexVTy->getElementType(); 4884 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 4885 !isa<SExtInst>(GEP->getOperand(i))) || 4886 ++NumOfVarIndices > 1) 4887 return IndexSize; // 64 4888 } 4889 return (unsigned)32; 4890 }; 4891 4892 // Trying to reduce IndexSize to 32 bits for vector 16. 4893 // By default the IndexSize is equal to pointer size. 4894 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 4895 ? getIndexSizeInBits(Ptr, DL) 4896 : DL.getPointerSizeInBits(); 4897 4898 auto *IndexVTy = FixedVectorType::get( 4899 IntegerType::get(SrcVTy->getContext(), IndexSize), VF); 4900 std::pair<InstructionCost, MVT> IdxsLT = 4901 TLI->getTypeLegalizationCost(DL, IndexVTy); 4902 std::pair<InstructionCost, MVT> SrcLT = 4903 TLI->getTypeLegalizationCost(DL, SrcVTy); 4904 InstructionCost::CostType SplitFactor = 4905 *std::max(IdxsLT.first, SrcLT.first).getValue(); 4906 if (SplitFactor > 1) { 4907 // Handle splitting of vector of pointers 4908 auto *SplitSrcTy = 4909 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 4910 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 4911 AddressSpace); 4912 } 4913 4914 // The gather / scatter cost is given by Intel architects. It is a rough 4915 // number since we are looking at one instruction in a time. 4916 const int GSOverhead = (Opcode == Instruction::Load) 4917 ? getGatherOverhead() 4918 : getScatterOverhead(); 4919 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4920 MaybeAlign(Alignment), AddressSpace, 4921 TTI::TCK_RecipThroughput); 4922 } 4923 4924 /// Return the cost of full scalarization of gather / scatter operation. 4925 /// 4926 /// Opcode - Load or Store instruction. 4927 /// SrcVTy - The type of the data vector that should be gathered or scattered. 4928 /// VariableMask - The mask is non-constant at compile time. 4929 /// Alignment - Alignment for one element. 4930 /// AddressSpace - pointer[s] address space. 4931 /// 4932 /// FIXME: Add TargetCostKind support. 4933 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 4934 bool VariableMask, Align Alignment, 4935 unsigned AddressSpace) { 4936 Type *ScalarTy = SrcVTy->getScalarType(); 4937 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4938 APInt DemandedElts = APInt::getAllOnes(VF); 4939 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4940 4941 InstructionCost MaskUnpackCost = 0; 4942 if (VariableMask) { 4943 auto *MaskTy = 4944 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 4945 MaskUnpackCost = getScalarizationOverhead( 4946 MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true); 4947 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 4948 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, 4949 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4950 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 4951 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 4952 } 4953 4954 InstructionCost AddressUnpackCost = getScalarizationOverhead( 4955 FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts, 4956 /*Insert=*/false, /*Extract=*/true); 4957 4958 // The cost of the scalar loads/stores. 4959 InstructionCost MemoryOpCost = 4960 VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment), 4961 AddressSpace, CostKind); 4962 4963 // The cost of forming the vector from loaded scalars/ 4964 // scalarizing the vector to perform scalar stores. 4965 InstructionCost InsertExtractCost = 4966 getScalarizationOverhead(cast<FixedVectorType>(SrcVTy), DemandedElts, 4967 /*Insert=*/Opcode == Instruction::Load, 4968 /*Extract=*/Opcode == Instruction::Store); 4969 4970 return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost; 4971 } 4972 4973 /// Calculate the cost of Gather / Scatter operation 4974 InstructionCost X86TTIImpl::getGatherScatterOpCost( 4975 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, 4976 Align Alignment, TTI::TargetCostKind CostKind, 4977 const Instruction *I = nullptr) { 4978 if (CostKind != TTI::TCK_RecipThroughput) { 4979 if ((Opcode == Instruction::Load && 4980 isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4981 (Opcode == Instruction::Store && 4982 isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4983 return 1; 4984 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask, 4985 Alignment, CostKind, I); 4986 } 4987 4988 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 4989 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4990 if (!PtrTy && Ptr->getType()->isVectorTy()) 4991 PtrTy = dyn_cast<PointerType>( 4992 cast<VectorType>(Ptr->getType())->getElementType()); 4993 assert(PtrTy && "Unexpected type for Ptr argument"); 4994 unsigned AddressSpace = PtrTy->getAddressSpace(); 4995 4996 if ((Opcode == Instruction::Load && 4997 !isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4998 (Opcode == Instruction::Store && 4999 !isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 5000 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 5001 AddressSpace); 5002 5003 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 5004 } 5005 5006 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 5007 TargetTransformInfo::LSRCost &C2) { 5008 // X86 specific here are "instruction number 1st priority". 5009 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 5010 C1.NumIVMuls, C1.NumBaseAdds, 5011 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 5012 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 5013 C2.NumIVMuls, C2.NumBaseAdds, 5014 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 5015 } 5016 5017 bool X86TTIImpl::canMacroFuseCmp() { 5018 return ST->hasMacroFusion() || ST->hasBranchFusion(); 5019 } 5020 5021 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 5022 if (!ST->hasAVX()) 5023 return false; 5024 5025 // The backend can't handle a single element vector. 5026 if (isa<VectorType>(DataTy) && 5027 cast<FixedVectorType>(DataTy)->getNumElements() == 1) 5028 return false; 5029 Type *ScalarTy = DataTy->getScalarType(); 5030 5031 if (ScalarTy->isPointerTy()) 5032 return true; 5033 5034 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5035 return true; 5036 5037 if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16()) 5038 return true; 5039 5040 if (!ScalarTy->isIntegerTy()) 5041 return false; 5042 5043 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5044 return IntWidth == 32 || IntWidth == 64 || 5045 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); 5046 } 5047 5048 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { 5049 return isLegalMaskedLoad(DataType, Alignment); 5050 } 5051 5052 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { 5053 unsigned DataSize = DL.getTypeStoreSize(DataType); 5054 // The only supported nontemporal loads are for aligned vectors of 16 or 32 5055 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 5056 // (the equivalent stores only require AVX). 5057 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) 5058 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); 5059 5060 return false; 5061 } 5062 5063 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { 5064 unsigned DataSize = DL.getTypeStoreSize(DataType); 5065 5066 // SSE4A supports nontemporal stores of float and double at arbitrary 5067 // alignment. 5068 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) 5069 return true; 5070 5071 // Besides the SSE4A subtarget exception above, only aligned stores are 5072 // available nontemporaly on any other subtarget. And only stores with a size 5073 // of 4..32 bytes (powers of 2, only) are permitted. 5074 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || 5075 !isPowerOf2_32(DataSize)) 5076 return false; 5077 5078 // 32-byte vector nontemporal stores are supported by AVX (the equivalent 5079 // loads require AVX2). 5080 if (DataSize == 32) 5081 return ST->hasAVX(); 5082 if (DataSize == 16) 5083 return ST->hasSSE1(); 5084 return true; 5085 } 5086 5087 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { 5088 if (!isa<VectorType>(DataTy)) 5089 return false; 5090 5091 if (!ST->hasAVX512()) 5092 return false; 5093 5094 // The backend can't handle a single element vector. 5095 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1) 5096 return false; 5097 5098 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); 5099 5100 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5101 return true; 5102 5103 if (!ScalarTy->isIntegerTy()) 5104 return false; 5105 5106 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5107 return IntWidth == 32 || IntWidth == 64 || 5108 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); 5109 } 5110 5111 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { 5112 return isLegalMaskedExpandLoad(DataTy); 5113 } 5114 5115 bool X86TTIImpl::supportsGather() const { 5116 // Some CPUs have better gather performance than others. 5117 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 5118 // enable gather with a -march. 5119 return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()); 5120 } 5121 5122 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { 5123 if (!supportsGather()) 5124 return false; 5125 5126 // This function is called now in two cases: from the Loop Vectorizer 5127 // and from the Scalarizer. 5128 // When the Loop Vectorizer asks about legality of the feature, 5129 // the vectorization factor is not calculated yet. The Loop Vectorizer 5130 // sends a scalar type and the decision is based on the width of the 5131 // scalar element. 5132 // Later on, the cost model will estimate usage this intrinsic based on 5133 // the vector type. 5134 // The Scalarizer asks again about legality. It sends a vector type. 5135 // In this case we can reject non-power-of-2 vectors. 5136 // We also reject single element vectors as the type legalizer can't 5137 // scalarize it. 5138 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) { 5139 unsigned NumElts = DataVTy->getNumElements(); 5140 if (NumElts == 1) 5141 return false; 5142 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 5143 // Vector-4 of gather/scatter instruction does not exist on KNL. 5144 // We can extend it to 8 elements, but zeroing upper bits of 5145 // the mask vector will add more instructions. Right now we give the scalar 5146 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter 5147 // instruction is better in the VariableMask case. 5148 if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX()))) 5149 return false; 5150 } 5151 Type *ScalarTy = DataTy->getScalarType(); 5152 if (ScalarTy->isPointerTy()) 5153 return true; 5154 5155 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5156 return true; 5157 5158 if (!ScalarTy->isIntegerTy()) 5159 return false; 5160 5161 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5162 return IntWidth == 32 || IntWidth == 64; 5163 } 5164 5165 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { 5166 // AVX2 doesn't support scatter 5167 if (!ST->hasAVX512()) 5168 return false; 5169 return isLegalMaskedGather(DataType, Alignment); 5170 } 5171 5172 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 5173 EVT VT = TLI->getValueType(DL, DataType); 5174 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 5175 } 5176 5177 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 5178 return false; 5179 } 5180 5181 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 5182 const Function *Callee) const { 5183 const TargetMachine &TM = getTLI()->getTargetMachine(); 5184 5185 // Work this as a subsetting of subtarget features. 5186 const FeatureBitset &CallerBits = 5187 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 5188 const FeatureBitset &CalleeBits = 5189 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 5190 5191 // Check whether features are the same (apart from the ignore list). 5192 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 5193 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 5194 if (RealCallerBits == RealCalleeBits) 5195 return true; 5196 5197 // If the features are a subset, we need to additionally check for calls 5198 // that may become ABI-incompatible as a result of inlining. 5199 if ((RealCallerBits & RealCalleeBits) != RealCalleeBits) 5200 return false; 5201 5202 for (const Instruction &I : instructions(Callee)) { 5203 if (const auto *CB = dyn_cast<CallBase>(&I)) { 5204 SmallVector<Type *, 8> Types; 5205 for (Value *Arg : CB->args()) 5206 Types.push_back(Arg->getType()); 5207 if (!CB->getType()->isVoidTy()) 5208 Types.push_back(CB->getType()); 5209 5210 // Simple types are always ABI compatible. 5211 auto IsSimpleTy = [](Type *Ty) { 5212 return !Ty->isVectorTy() && !Ty->isAggregateType(); 5213 }; 5214 if (all_of(Types, IsSimpleTy)) 5215 continue; 5216 5217 if (Function *NestedCallee = CB->getCalledFunction()) { 5218 // Assume that intrinsics are always ABI compatible. 5219 if (NestedCallee->isIntrinsic()) 5220 continue; 5221 5222 // Do a precise compatibility check. 5223 if (!areTypesABICompatible(Caller, NestedCallee, Types)) 5224 return false; 5225 } else { 5226 // We don't know the target features of the callee, 5227 // assume it is incompatible. 5228 return false; 5229 } 5230 } 5231 } 5232 return true; 5233 } 5234 5235 bool X86TTIImpl::areTypesABICompatible(const Function *Caller, 5236 const Function *Callee, 5237 const ArrayRef<Type *> &Types) const { 5238 if (!BaseT::areTypesABICompatible(Caller, Callee, Types)) 5239 return false; 5240 5241 // If we get here, we know the target features match. If one function 5242 // considers 512-bit vectors legal and the other does not, consider them 5243 // incompatible. 5244 const TargetMachine &TM = getTLI()->getTargetMachine(); 5245 5246 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == 5247 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) 5248 return true; 5249 5250 // Consider the arguments compatible if they aren't vectors or aggregates. 5251 // FIXME: Look at the size of vectors. 5252 // FIXME: Look at the element types of aggregates to see if there are vectors. 5253 return llvm::none_of(Types, 5254 [](Type *T) { return T->isVectorTy() || T->isAggregateType(); }); 5255 } 5256 5257 X86TTIImpl::TTI::MemCmpExpansionOptions 5258 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 5259 TTI::MemCmpExpansionOptions Options; 5260 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 5261 Options.NumLoadsPerBlock = 2; 5262 // All GPR and vector loads can be unaligned. 5263 Options.AllowOverlappingLoads = true; 5264 if (IsZeroCmp) { 5265 // Only enable vector loads for equality comparison. Right now the vector 5266 // version is not as fast for three way compare (see #33329). 5267 const unsigned PreferredWidth = ST->getPreferVectorWidth(); 5268 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); 5269 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); 5270 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); 5271 } 5272 if (ST->is64Bit()) { 5273 Options.LoadSizes.push_back(8); 5274 } 5275 Options.LoadSizes.push_back(4); 5276 Options.LoadSizes.push_back(2); 5277 Options.LoadSizes.push_back(1); 5278 return Options; 5279 } 5280 5281 bool X86TTIImpl::prefersVectorizedAddressing() const { 5282 return supportsGather(); 5283 } 5284 5285 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const { 5286 return false; 5287 } 5288 5289 bool X86TTIImpl::enableInterleavedAccessVectorization() { 5290 // TODO: We expect this to be beneficial regardless of arch, 5291 // but there are currently some unexplained performance artifacts on Atom. 5292 // As a temporary solution, disable on Atom. 5293 return !(ST->isAtom()); 5294 } 5295 5296 // Get estimation for interleaved load/store operations and strided load. 5297 // \p Indices contains indices for strided load. 5298 // \p Factor - the factor of interleaving. 5299 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 5300 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( 5301 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 5302 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 5303 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 5304 // VecTy for interleave memop is <VF*Factor x Elt>. 5305 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5306 // VecTy = <12 x i32>. 5307 5308 // Calculate the number of memory operations (NumOfMemOps), required 5309 // for load/store the VecTy. 5310 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5311 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 5312 unsigned LegalVTSize = LegalVT.getStoreSize(); 5313 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 5314 5315 // Get the cost of one memory operation. 5316 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), 5317 LegalVT.getVectorNumElements()); 5318 InstructionCost MemOpCost; 5319 bool UseMaskedMemOp = UseMaskForCond || UseMaskForGaps; 5320 if (UseMaskedMemOp) 5321 MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment, 5322 AddressSpace, CostKind); 5323 else 5324 MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment), 5325 AddressSpace, CostKind); 5326 5327 unsigned VF = VecTy->getNumElements() / Factor; 5328 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 5329 5330 InstructionCost MaskCost; 5331 if (UseMaskedMemOp) { 5332 APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements()); 5333 for (unsigned Index : Indices) { 5334 assert(Index < Factor && "Invalid index for interleaved memory op"); 5335 for (unsigned Elm = 0; Elm < VF; Elm++) 5336 DemandedLoadStoreElts.setBit(Index + Elm * Factor); 5337 } 5338 5339 Type *I1Type = Type::getInt1Ty(VecTy->getContext()); 5340 5341 MaskCost = getReplicationShuffleCost( 5342 I1Type, Factor, VF, 5343 UseMaskForGaps ? DemandedLoadStoreElts 5344 : APInt::getAllOnes(VecTy->getNumElements()), 5345 CostKind); 5346 5347 // The Gaps mask is invariant and created outside the loop, therefore the 5348 // cost of creating it is not accounted for here. However if we have both 5349 // a MaskForGaps and some other mask that guards the execution of the 5350 // memory access, we need to account for the cost of And-ing the two masks 5351 // inside the loop. 5352 if (UseMaskForGaps) { 5353 auto *MaskVT = FixedVectorType::get(I1Type, VecTy->getNumElements()); 5354 MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind); 5355 } 5356 } 5357 5358 if (Opcode == Instruction::Load) { 5359 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 5360 // contain the cost of the optimized shuffle sequence that the 5361 // X86InterleavedAccess pass will generate. 5362 // The cost of loads and stores are computed separately from the table. 5363 5364 // X86InterleavedAccess support only the following interleaved-access group. 5365 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 5366 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 5367 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 5368 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 5369 }; 5370 5371 if (const auto *Entry = 5372 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 5373 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost; 5374 //If an entry does not exist, fallback to the default implementation. 5375 5376 // Kind of shuffle depends on number of loaded values. 5377 // If we load the entire data in one register, we can use a 1-src shuffle. 5378 // Otherwise, we'll merge 2 sources in each operation. 5379 TTI::ShuffleKind ShuffleKind = 5380 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 5381 5382 InstructionCost ShuffleCost = 5383 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr); 5384 5385 unsigned NumOfLoadsInInterleaveGrp = 5386 Indices.size() ? Indices.size() : Factor; 5387 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(), 5388 VecTy->getNumElements() / Factor); 5389 InstructionCost NumOfResults = 5390 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 5391 NumOfLoadsInInterleaveGrp; 5392 5393 // About a half of the loads may be folded in shuffles when we have only 5394 // one result. If we have more than one result, or the loads are masked, 5395 // we do not fold loads at all. 5396 unsigned NumOfUnfoldedLoads = 5397 UseMaskedMemOp || NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 5398 5399 // Get a number of shuffle operations per result. 5400 unsigned NumOfShufflesPerResult = 5401 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 5402 5403 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5404 // When we have more than one destination, we need additional instructions 5405 // to keep sources. 5406 InstructionCost NumOfMoves = 0; 5407 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 5408 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 5409 5410 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 5411 MaskCost + NumOfUnfoldedLoads * MemOpCost + 5412 NumOfMoves; 5413 5414 return Cost; 5415 } 5416 5417 // Store. 5418 assert(Opcode == Instruction::Store && 5419 "Expected Store Instruction at this point"); 5420 // X86InterleavedAccess support only the following interleaved-access group. 5421 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 5422 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 5423 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 5424 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 5425 5426 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 5427 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 5428 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 5429 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 5430 }; 5431 5432 if (const auto *Entry = 5433 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 5434 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost; 5435 //If an entry does not exist, fallback to the default implementation. 5436 5437 // There is no strided stores meanwhile. And store can't be folded in 5438 // shuffle. 5439 unsigned NumOfSources = Factor; // The number of values to be merged. 5440 InstructionCost ShuffleCost = 5441 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr); 5442 unsigned NumOfShufflesPerStore = NumOfSources - 1; 5443 5444 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5445 // We need additional instructions to keep sources. 5446 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 5447 InstructionCost Cost = 5448 MaskCost + 5449 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 5450 NumOfMoves; 5451 return Cost; 5452 } 5453 5454 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost( 5455 unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices, 5456 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 5457 bool UseMaskForCond, bool UseMaskForGaps) { 5458 auto *VecTy = cast<FixedVectorType>(BaseTy); 5459 5460 auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) { 5461 Type *EltTy = cast<VectorType>(VecTy)->getElementType(); 5462 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 5463 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 5464 return true; 5465 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) || 5466 (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy())) 5467 return HasBW; 5468 return false; 5469 }; 5470 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 5471 return getInterleavedMemoryOpCostAVX512( 5472 Opcode, VecTy, Factor, Indices, Alignment, 5473 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 5474 5475 if (UseMaskForCond || UseMaskForGaps) 5476 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5477 Alignment, AddressSpace, CostKind, 5478 UseMaskForCond, UseMaskForGaps); 5479 5480 // Get estimation for interleaved load/store operations for SSE-AVX2. 5481 // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow 5482 // computing the cost using a generic formula as a function of generic 5483 // shuffles. We therefore use a lookup table instead, filled according to 5484 // the instruction sequences that codegen currently generates. 5485 5486 // VecTy for interleave memop is <VF*Factor x Elt>. 5487 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5488 // VecTy = <12 x i32>. 5489 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5490 5491 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 5492 // the VF=2, while v2i128 is an unsupported MVT vector type 5493 // (see MachineValueType.h::getVectorVT()). 5494 if (!LegalVT.isVector()) 5495 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5496 Alignment, AddressSpace, CostKind); 5497 5498 unsigned VF = VecTy->getNumElements() / Factor; 5499 Type *ScalarTy = VecTy->getElementType(); 5500 // Deduplicate entries, model floats/pointers as appropriately-sized integers. 5501 if (!ScalarTy->isIntegerTy()) 5502 ScalarTy = 5503 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy)); 5504 5505 // Get the cost of all the memory operations. 5506 // FIXME: discount dead loads. 5507 InstructionCost MemOpCosts = getMemoryOpCost( 5508 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind); 5509 5510 auto *VT = FixedVectorType::get(ScalarTy, VF); 5511 EVT ETy = TLI->getValueType(DL, VT); 5512 if (!ETy.isSimple()) 5513 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5514 Alignment, AddressSpace, CostKind); 5515 5516 // TODO: Complete for other data-types and strides. 5517 // Each combination of Stride, element bit width and VF results in a different 5518 // sequence; The cost tables are therefore accessed with: 5519 // Factor (stride) and VectorType=VFxiN. 5520 // The Cost accounts only for the shuffle sequence; 5521 // The cost of the loads/stores is accounted for separately. 5522 // 5523 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 5524 {2, MVT::v2i8, 2}, // (load 4i8 and) deinterleave into 2 x 2i8 5525 {2, MVT::v4i8, 2}, // (load 8i8 and) deinterleave into 2 x 4i8 5526 {2, MVT::v8i8, 2}, // (load 16i8 and) deinterleave into 2 x 8i8 5527 {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8 5528 {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8 5529 5530 {2, MVT::v8i16, 6}, // (load 16i16 and) deinterleave into 2 x 8i16 5531 {2, MVT::v16i16, 9}, // (load 32i16 and) deinterleave into 2 x 16i16 5532 {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16 5533 5534 {2, MVT::v8i32, 4}, // (load 16i32 and) deinterleave into 2 x 8i32 5535 {2, MVT::v16i32, 8}, // (load 32i32 and) deinterleave into 2 x 16i32 5536 {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32 5537 5538 {2, MVT::v4i64, 4}, // (load 8i64 and) deinterleave into 2 x 4i64 5539 {2, MVT::v8i64, 8}, // (load 16i64 and) deinterleave into 2 x 8i64 5540 {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64 5541 {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64 5542 5543 {3, MVT::v2i8, 3}, // (load 6i8 and) deinterleave into 3 x 2i8 5544 {3, MVT::v4i8, 3}, // (load 12i8 and) deinterleave into 3 x 4i8 5545 {3, MVT::v8i8, 6}, // (load 24i8 and) deinterleave into 3 x 8i8 5546 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8 5547 {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8 5548 5549 {3, MVT::v2i16, 5}, // (load 6i16 and) deinterleave into 3 x 2i16 5550 {3, MVT::v4i16, 7}, // (load 12i16 and) deinterleave into 3 x 4i16 5551 {3, MVT::v8i16, 9}, // (load 24i16 and) deinterleave into 3 x 8i16 5552 {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16 5553 {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16 5554 5555 {3, MVT::v2i32, 3}, // (load 6i32 and) deinterleave into 3 x 2i32 5556 {3, MVT::v4i32, 3}, // (load 12i32 and) deinterleave into 3 x 4i32 5557 {3, MVT::v8i32, 7}, // (load 24i32 and) deinterleave into 3 x 8i32 5558 {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32 5559 {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32 5560 5561 {3, MVT::v2i64, 1}, // (load 6i64 and) deinterleave into 3 x 2i64 5562 {3, MVT::v4i64, 5}, // (load 12i64 and) deinterleave into 3 x 4i64 5563 {3, MVT::v8i64, 10}, // (load 24i64 and) deinterleave into 3 x 8i64 5564 {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64 5565 5566 {4, MVT::v2i8, 4}, // (load 8i8 and) deinterleave into 4 x 2i8 5567 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8 5568 {4, MVT::v8i8, 12}, // (load 32i8 and) deinterleave into 4 x 8i8 5569 {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8 5570 {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8 5571 5572 {4, MVT::v2i16, 6}, // (load 8i16 and) deinterleave into 4 x 2i16 5573 {4, MVT::v4i16, 17}, // (load 16i16 and) deinterleave into 4 x 4i16 5574 {4, MVT::v8i16, 33}, // (load 32i16 and) deinterleave into 4 x 8i16 5575 {4, MVT::v16i16, 75}, // (load 64i16 and) deinterleave into 4 x 16i16 5576 {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16 5577 5578 {4, MVT::v2i32, 4}, // (load 8i32 and) deinterleave into 4 x 2i32 5579 {4, MVT::v4i32, 8}, // (load 16i32 and) deinterleave into 4 x 4i32 5580 {4, MVT::v8i32, 16}, // (load 32i32 and) deinterleave into 4 x 8i32 5581 {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32 5582 {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32 5583 5584 {4, MVT::v2i64, 6}, // (load 8i64 and) deinterleave into 4 x 2i64 5585 {4, MVT::v4i64, 8}, // (load 16i64 and) deinterleave into 4 x 4i64 5586 {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64 5587 {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64 5588 5589 {6, MVT::v2i8, 6}, // (load 12i8 and) deinterleave into 6 x 2i8 5590 {6, MVT::v4i8, 14}, // (load 24i8 and) deinterleave into 6 x 4i8 5591 {6, MVT::v8i8, 18}, // (load 48i8 and) deinterleave into 6 x 8i8 5592 {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8 5593 {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8 5594 5595 {6, MVT::v2i16, 13}, // (load 12i16 and) deinterleave into 6 x 2i16 5596 {6, MVT::v4i16, 9}, // (load 24i16 and) deinterleave into 6 x 4i16 5597 {6, MVT::v8i16, 39}, // (load 48i16 and) deinterleave into 6 x 8i16 5598 {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16 5599 {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16 5600 5601 {6, MVT::v2i32, 6}, // (load 12i32 and) deinterleave into 6 x 2i32 5602 {6, MVT::v4i32, 15}, // (load 24i32 and) deinterleave into 6 x 4i32 5603 {6, MVT::v8i32, 31}, // (load 48i32 and) deinterleave into 6 x 8i32 5604 {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32 5605 5606 {6, MVT::v2i64, 6}, // (load 12i64 and) deinterleave into 6 x 2i64 5607 {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64 5608 {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64 5609 5610 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32 5611 }; 5612 5613 static const CostTblEntry SSSE3InterleavedLoadTbl[] = { 5614 {2, MVT::v4i16, 2}, // (load 8i16 and) deinterleave into 2 x 4i16 5615 }; 5616 5617 static const CostTblEntry SSE2InterleavedLoadTbl[] = { 5618 {2, MVT::v2i16, 2}, // (load 4i16 and) deinterleave into 2 x 2i16 5619 {2, MVT::v4i16, 7}, // (load 8i16 and) deinterleave into 2 x 4i16 5620 5621 {2, MVT::v2i32, 2}, // (load 4i32 and) deinterleave into 2 x 2i32 5622 {2, MVT::v4i32, 2}, // (load 8i32 and) deinterleave into 2 x 4i32 5623 5624 {2, MVT::v2i64, 2}, // (load 4i64 and) deinterleave into 2 x 2i64 5625 }; 5626 5627 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 5628 {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store) 5629 {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store) 5630 5631 {2, MVT::v8i16, 3}, // interleave 2 x 8i16 into 16i16 (and store) 5632 {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store) 5633 {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store) 5634 5635 {2, MVT::v4i32, 2}, // interleave 2 x 4i32 into 8i32 (and store) 5636 {2, MVT::v8i32, 4}, // interleave 2 x 8i32 into 16i32 (and store) 5637 {2, MVT::v16i32, 8}, // interleave 2 x 16i32 into 32i32 (and store) 5638 {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store) 5639 5640 {2, MVT::v2i64, 2}, // interleave 2 x 2i64 into 4i64 (and store) 5641 {2, MVT::v4i64, 4}, // interleave 2 x 4i64 into 8i64 (and store) 5642 {2, MVT::v8i64, 8}, // interleave 2 x 8i64 into 16i64 (and store) 5643 {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store) 5644 {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store) 5645 5646 {3, MVT::v2i8, 4}, // interleave 3 x 2i8 into 6i8 (and store) 5647 {3, MVT::v4i8, 4}, // interleave 3 x 4i8 into 12i8 (and store) 5648 {3, MVT::v8i8, 6}, // interleave 3 x 8i8 into 24i8 (and store) 5649 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store) 5650 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store) 5651 5652 {3, MVT::v2i16, 4}, // interleave 3 x 2i16 into 6i16 (and store) 5653 {3, MVT::v4i16, 6}, // interleave 3 x 4i16 into 12i16 (and store) 5654 {3, MVT::v8i16, 12}, // interleave 3 x 8i16 into 24i16 (and store) 5655 {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store) 5656 {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store) 5657 5658 {3, MVT::v2i32, 4}, // interleave 3 x 2i32 into 6i32 (and store) 5659 {3, MVT::v4i32, 5}, // interleave 3 x 4i32 into 12i32 (and store) 5660 {3, MVT::v8i32, 11}, // interleave 3 x 8i32 into 24i32 (and store) 5661 {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store) 5662 {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store) 5663 5664 {3, MVT::v2i64, 4}, // interleave 3 x 2i64 into 6i64 (and store) 5665 {3, MVT::v4i64, 6}, // interleave 3 x 4i64 into 12i64 (and store) 5666 {3, MVT::v8i64, 12}, // interleave 3 x 8i64 into 24i64 (and store) 5667 {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store) 5668 5669 {4, MVT::v2i8, 4}, // interleave 4 x 2i8 into 8i8 (and store) 5670 {4, MVT::v4i8, 4}, // interleave 4 x 4i8 into 16i8 (and store) 5671 {4, MVT::v8i8, 4}, // interleave 4 x 8i8 into 32i8 (and store) 5672 {4, MVT::v16i8, 8}, // interleave 4 x 16i8 into 64i8 (and store) 5673 {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store) 5674 5675 {4, MVT::v2i16, 2}, // interleave 4 x 2i16 into 8i16 (and store) 5676 {4, MVT::v4i16, 6}, // interleave 4 x 4i16 into 16i16 (and store) 5677 {4, MVT::v8i16, 10}, // interleave 4 x 8i16 into 32i16 (and store) 5678 {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store) 5679 {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store) 5680 5681 {4, MVT::v2i32, 5}, // interleave 4 x 2i32 into 8i32 (and store) 5682 {4, MVT::v4i32, 6}, // interleave 4 x 4i32 into 16i32 (and store) 5683 {4, MVT::v8i32, 16}, // interleave 4 x 8i32 into 32i32 (and store) 5684 {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store) 5685 {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store) 5686 5687 {4, MVT::v2i64, 6}, // interleave 4 x 2i64 into 8i64 (and store) 5688 {4, MVT::v4i64, 8}, // interleave 4 x 4i64 into 16i64 (and store) 5689 {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store) 5690 {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store) 5691 5692 {6, MVT::v2i8, 7}, // interleave 6 x 2i8 into 12i8 (and store) 5693 {6, MVT::v4i8, 9}, // interleave 6 x 4i8 into 24i8 (and store) 5694 {6, MVT::v8i8, 16}, // interleave 6 x 8i8 into 48i8 (and store) 5695 {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store) 5696 {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store) 5697 5698 {6, MVT::v2i16, 10}, // interleave 6 x 2i16 into 12i16 (and store) 5699 {6, MVT::v4i16, 15}, // interleave 6 x 4i16 into 24i16 (and store) 5700 {6, MVT::v8i16, 21}, // interleave 6 x 8i16 into 48i16 (and store) 5701 {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store) 5702 {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store) 5703 5704 {6, MVT::v2i32, 9}, // interleave 6 x 2i32 into 12i32 (and store) 5705 {6, MVT::v4i32, 12}, // interleave 6 x 4i32 into 24i32 (and store) 5706 {6, MVT::v8i32, 33}, // interleave 6 x 8i32 into 48i32 (and store) 5707 {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store) 5708 5709 {6, MVT::v2i64, 8}, // interleave 6 x 2i64 into 12i64 (and store) 5710 {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store) 5711 {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store) 5712 }; 5713 5714 static const CostTblEntry SSE2InterleavedStoreTbl[] = { 5715 {2, MVT::v2i8, 1}, // interleave 2 x 2i8 into 4i8 (and store) 5716 {2, MVT::v4i8, 1}, // interleave 2 x 4i8 into 8i8 (and store) 5717 {2, MVT::v8i8, 1}, // interleave 2 x 8i8 into 16i8 (and store) 5718 5719 {2, MVT::v2i16, 1}, // interleave 2 x 2i16 into 4i16 (and store) 5720 {2, MVT::v4i16, 1}, // interleave 2 x 4i16 into 8i16 (and store) 5721 5722 {2, MVT::v2i32, 1}, // interleave 2 x 2i32 into 4i32 (and store) 5723 }; 5724 5725 if (Opcode == Instruction::Load) { 5726 auto GetDiscountedCost = [Factor, NumMembers = Indices.size(), 5727 MemOpCosts](const CostTblEntry *Entry) { 5728 // NOTE: this is just an approximation! 5729 // It can over/under -estimate the cost! 5730 return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor); 5731 }; 5732 5733 if (ST->hasAVX2()) 5734 if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor, 5735 ETy.getSimpleVT())) 5736 return GetDiscountedCost(Entry); 5737 5738 if (ST->hasSSSE3()) 5739 if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor, 5740 ETy.getSimpleVT())) 5741 return GetDiscountedCost(Entry); 5742 5743 if (ST->hasSSE2()) 5744 if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor, 5745 ETy.getSimpleVT())) 5746 return GetDiscountedCost(Entry); 5747 } else { 5748 assert(Opcode == Instruction::Store && 5749 "Expected Store Instruction at this point"); 5750 assert((!Indices.size() || Indices.size() == Factor) && 5751 "Interleaved store only supports fully-interleaved groups."); 5752 if (ST->hasAVX2()) 5753 if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor, 5754 ETy.getSimpleVT())) 5755 return MemOpCosts + Entry->Cost; 5756 5757 if (ST->hasSSE2()) 5758 if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor, 5759 ETy.getSimpleVT())) 5760 return MemOpCosts + Entry->Cost; 5761 } 5762 5763 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5764 Alignment, AddressSpace, CostKind, 5765 UseMaskForCond, UseMaskForGaps); 5766 } 5767