1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements a TargetTransformInfo analysis pass specific to the 10 /// X86 target machine. It uses the target's detailed information to provide 11 /// more precise answers to certain TTI queries, while letting the target 12 /// independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 /// About Cost Model numbers used below it's necessary to say the following: 16 /// the numbers correspond to some "generic" X86 CPU instead of usage of 17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 19 /// the lookups below the cost is based on Nehalem as that was the first CPU 20 /// to support that feature level and thus has most likely the worst case cost. 21 /// Some examples of other technologies/CPUs: 22 /// SSE 3 - Pentium4 / Athlon64 23 /// SSE 4.1 - Penryn 24 /// SSE 4.2 - Nehalem 25 /// AVX - Sandy Bridge 26 /// AVX2 - Haswell 27 /// AVX-512 - Xeon Phi / Skylake 28 /// And some examples of instruction target dependent costs (latency) 29 /// divss sqrtss rsqrtss 30 /// AMD K7 11-16 19 3 31 /// Piledriver 9-24 13-15 5 32 /// Jaguar 14 16 2 33 /// Pentium II,III 18 30 2 34 /// Nehalem 7-14 7-18 3 35 /// Haswell 10-13 11 5 36 /// TODO: Develop and implement the target dependent cost model and 37 /// specialize cost numbers for different Cost Model Targets such as throughput, 38 /// code size, latency and uop count. 39 //===----------------------------------------------------------------------===// 40 41 #include "X86TargetTransformInfo.h" 42 #include "llvm/Analysis/TargetTransformInfo.h" 43 #include "llvm/CodeGen/BasicTTIImpl.h" 44 #include "llvm/CodeGen/CostTable.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/Support/Debug.h" 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "x86tti" 52 53 //===----------------------------------------------------------------------===// 54 // 55 // X86 cost model. 56 // 57 //===----------------------------------------------------------------------===// 58 59 TargetTransformInfo::PopcntSupportKind 60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 62 // TODO: Currently the __builtin_popcount() implementation using SSE3 63 // instructions is inefficient. Once the problem is fixed, we should 64 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 66 } 67 68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 69 TargetTransformInfo::CacheLevel Level) const { 70 switch (Level) { 71 case TargetTransformInfo::CacheLevel::L1D: 72 // - Penryn 73 // - Nehalem 74 // - Westmere 75 // - Sandy Bridge 76 // - Ivy Bridge 77 // - Haswell 78 // - Broadwell 79 // - Skylake 80 // - Kabylake 81 return 32 * 1024; // 32 KByte 82 case TargetTransformInfo::CacheLevel::L2D: 83 // - Penryn 84 // - Nehalem 85 // - Westmere 86 // - Sandy Bridge 87 // - Ivy Bridge 88 // - Haswell 89 // - Broadwell 90 // - Skylake 91 // - Kabylake 92 return 256 * 1024; // 256 KByte 93 } 94 95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 96 } 97 98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 99 TargetTransformInfo::CacheLevel Level) const { 100 // - Penryn 101 // - Nehalem 102 // - Westmere 103 // - Sandy Bridge 104 // - Ivy Bridge 105 // - Haswell 106 // - Broadwell 107 // - Skylake 108 // - Kabylake 109 switch (Level) { 110 case TargetTransformInfo::CacheLevel::L1D: 111 LLVM_FALLTHROUGH; 112 case TargetTransformInfo::CacheLevel::L2D: 113 return 8; 114 } 115 116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 117 } 118 119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { 120 bool Vector = (ClassID == 1); 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 TypeSize 133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 134 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 135 switch (K) { 136 case TargetTransformInfo::RGK_Scalar: 137 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32); 138 case TargetTransformInfo::RGK_FixedWidthVector: 139 if (ST->hasAVX512() && PreferVectorWidth >= 512) 140 return TypeSize::getFixed(512); 141 if (ST->hasAVX() && PreferVectorWidth >= 256) 142 return TypeSize::getFixed(256); 143 if (ST->hasSSE1() && PreferVectorWidth >= 128) 144 return TypeSize::getFixed(128); 145 return TypeSize::getFixed(0); 146 case TargetTransformInfo::RGK_ScalableVector: 147 return TypeSize::getScalable(0); 148 } 149 150 llvm_unreachable("Unsupported register kind"); 151 } 152 153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 154 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 155 .getFixedSize(); 156 } 157 158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 159 // If the loop will not be vectorized, don't interleave the loop. 160 // Let regular unroll to unroll the loop, which saves the overflow 161 // check and memory check cost. 162 if (VF == 1) 163 return 1; 164 165 if (ST->isAtom()) 166 return 1; 167 168 // Sandybridge and Haswell have multiple execution ports and pipelined 169 // vector units. 170 if (ST->hasAVX()) 171 return 4; 172 173 return 2; 174 } 175 176 InstructionCost X86TTIImpl::getArithmeticInstrCost( 177 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 178 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 179 TTI::OperandValueProperties Opd1PropInfo, 180 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 181 const Instruction *CxtI) { 182 // TODO: Handle more cost kinds. 183 if (CostKind != TTI::TCK_RecipThroughput) 184 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 185 Op2Info, Opd1PropInfo, 186 Opd2PropInfo, Args, CxtI); 187 188 // vXi8 multiplications are always promoted to vXi16. 189 if (Opcode == Instruction::Mul && Ty->isVectorTy() && 190 Ty->getScalarSizeInBits() == 8) { 191 Type *WideVecTy = 192 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty)); 193 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty, 194 TargetTransformInfo::CastContextHint::None, 195 CostKind) + 196 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy, 197 TargetTransformInfo::CastContextHint::None, 198 CostKind) + 199 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info, 200 Opd1PropInfo, Opd2PropInfo); 201 } 202 203 // Legalize the type. 204 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 205 206 int ISD = TLI->InstructionOpcodeToISD(Opcode); 207 assert(ISD && "Invalid opcode"); 208 209 if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() && 210 LT.second.getScalarType() == MVT::i32) { 211 // Check if the operands can be represented as a smaller datatype. 212 bool Op1Signed = false, Op2Signed = false; 213 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 214 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 215 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 216 217 // If both are representable as i15 and at least one is constant, 218 // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we 219 // can treat this as PMADDWD which has the same costs as a vXi16 multiply. 220 if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) { 221 bool Op1Constant = 222 isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]); 223 bool Op2Constant = 224 isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]); 225 bool Op1Sext = isa<SExtInst>(Args[0]) && 226 (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41())); 227 bool Op2Sext = isa<SExtInst>(Args[1]) && 228 (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41())); 229 230 bool IsZeroExtended = !Op1Signed || !Op2Signed; 231 bool IsConstant = Op1Constant || Op2Constant; 232 bool IsSext = Op1Sext || Op2Sext; 233 if (IsConstant || IsZeroExtended || IsSext) 234 LT.second = 235 MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements()); 236 } 237 } 238 239 if ((ISD == ISD::MUL || ISD == ISD::SDIV || ISD == ISD::SREM || 240 ISD == ISD::UDIV || ISD == ISD::UREM) && 241 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 242 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 243 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 244 // Vector multiply by pow2 will be simplified to shifts. 245 if (ISD == ISD::MUL) { 246 InstructionCost Cost = getArithmeticInstrCost( 247 Instruction::Shl, Ty, CostKind, Op1Info, Op2Info, 248 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 249 return Cost; 250 } 251 252 if (ISD == ISD::SDIV || ISD == ISD::SREM) { 253 // On X86, vector signed division by constants power-of-two are 254 // normally expanded to the sequence SRA + SRL + ADD + SRA. 255 // The OperandValue properties may not be the same as that of the previous 256 // operation; conservatively assume OP_None. 257 InstructionCost Cost = 258 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, 259 Op2Info, TargetTransformInfo::OP_None, 260 TargetTransformInfo::OP_None); 261 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 262 Op2Info, TargetTransformInfo::OP_None, 263 TargetTransformInfo::OP_None); 264 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, 265 Op2Info, TargetTransformInfo::OP_None, 266 TargetTransformInfo::OP_None); 267 268 if (ISD == ISD::SREM) { 269 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 270 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, 271 Op2Info); 272 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, 273 Op2Info); 274 } 275 276 return Cost; 277 } 278 279 // Vector unsigned division/remainder will be simplified to shifts/masks. 280 if (ISD == ISD::UDIV) 281 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 282 Op2Info, TargetTransformInfo::OP_None, 283 TargetTransformInfo::OP_None); 284 // UREM 285 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info, 286 Op2Info, TargetTransformInfo::OP_None, 287 TargetTransformInfo::OP_None); 288 } 289 290 static const CostTblEntry GLMCostTable[] = { 291 { ISD::FDIV, MVT::f32, 18 }, // divss 292 { ISD::FDIV, MVT::v4f32, 35 }, // divps 293 { ISD::FDIV, MVT::f64, 33 }, // divsd 294 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 295 }; 296 297 if (ST->useGLMDivSqrtCosts()) 298 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 299 LT.second)) 300 return LT.first * Entry->Cost; 301 302 static const CostTblEntry SLMCostTable[] = { 303 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 304 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 305 { ISD::FMUL, MVT::f64, 2 }, // mulsd 306 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 307 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 308 { ISD::FDIV, MVT::f32, 17 }, // divss 309 { ISD::FDIV, MVT::v4f32, 39 }, // divps 310 { ISD::FDIV, MVT::f64, 32 }, // divsd 311 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 312 { ISD::FADD, MVT::v2f64, 2 }, // addpd 313 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 314 // v2i64/v4i64 mul is custom lowered as a series of long: 315 // multiplies(3), shifts(3) and adds(2) 316 // slm muldq version throughput is 2 and addq throughput 4 317 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 318 // 3X4 (addq throughput) = 17 319 { ISD::MUL, MVT::v2i64, 17 }, 320 // slm addq\subq throughput is 4 321 { ISD::ADD, MVT::v2i64, 4 }, 322 { ISD::SUB, MVT::v2i64, 4 }, 323 }; 324 325 if (ST->useSLMArithCosts()) { 326 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 327 // Check if the operands can be shrinked into a smaller datatype. 328 // TODO: Merge this into generiic vXi32 MUL patterns above. 329 bool Op1Signed = false; 330 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 331 bool Op2Signed = false; 332 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 333 334 bool SignedMode = Op1Signed || Op2Signed; 335 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 336 337 if (OpMinSize <= 7) 338 return LT.first * 3; // pmullw/sext 339 if (!SignedMode && OpMinSize <= 8) 340 return LT.first * 3; // pmullw/zext 341 if (OpMinSize <= 15) 342 return LT.first * 5; // pmullw/pmulhw/pshuf 343 if (!SignedMode && OpMinSize <= 16) 344 return LT.first * 5; // pmullw/pmulhw/pshuf 345 } 346 347 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 348 LT.second)) { 349 return LT.first * Entry->Cost; 350 } 351 } 352 353 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 354 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 355 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 356 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 357 }; 358 359 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 360 ST->hasBWI()) { 361 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 362 LT.second)) 363 return LT.first * Entry->Cost; 364 } 365 366 static const CostTblEntry AVX512UniformConstCostTable[] = { 367 { ISD::SRA, MVT::v2i64, 1 }, 368 { ISD::SRA, MVT::v4i64, 1 }, 369 { ISD::SRA, MVT::v8i64, 1 }, 370 371 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. 372 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. 373 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. 374 375 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence 376 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence 377 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence 378 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence 379 }; 380 381 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 382 ST->hasAVX512()) { 383 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 384 LT.second)) 385 return LT.first * Entry->Cost; 386 } 387 388 static const CostTblEntry AVX2UniformConstCostTable[] = { 389 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 390 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 391 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 392 393 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 394 395 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence 396 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence 397 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence 398 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence 399 }; 400 401 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 402 ST->hasAVX2()) { 403 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 404 LT.second)) 405 return LT.first * Entry->Cost; 406 } 407 408 static const CostTblEntry SSE2UniformConstCostTable[] = { 409 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 410 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 411 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 412 413 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 414 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 415 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 416 417 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split. 418 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split. 419 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence 420 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence 421 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split. 422 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split. 423 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence 424 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence 425 }; 426 427 // XOP has faster vXi8 shifts. 428 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 429 ST->hasSSE2() && !ST->hasXOP()) { 430 if (const auto *Entry = 431 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 432 return LT.first * Entry->Cost; 433 } 434 435 static const CostTblEntry AVX512BWConstCostTable[] = { 436 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 437 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 438 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 439 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 440 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 441 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 442 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 443 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 444 }; 445 446 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 447 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 448 ST->hasBWI()) { 449 if (const auto *Entry = 450 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 451 return LT.first * Entry->Cost; 452 } 453 454 static const CostTblEntry AVX512ConstCostTable[] = { 455 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 456 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 457 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 458 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 459 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 460 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 461 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 462 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 463 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence 464 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence 465 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence 466 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence 467 }; 468 469 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 470 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 471 ST->hasAVX512()) { 472 if (const auto *Entry = 473 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 474 return LT.first * Entry->Cost; 475 } 476 477 static const CostTblEntry AVX2ConstCostTable[] = { 478 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 479 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 480 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 481 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 482 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 483 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 484 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 485 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 486 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 487 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 488 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 489 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 490 }; 491 492 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 493 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 494 ST->hasAVX2()) { 495 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 496 return LT.first * Entry->Cost; 497 } 498 499 static const CostTblEntry SSE2ConstCostTable[] = { 500 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 501 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 502 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 503 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 504 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 505 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 506 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 507 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 508 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 509 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 510 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 511 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 512 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 513 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 514 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 515 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 516 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 517 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 518 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 519 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 520 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 521 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 522 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 523 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 524 }; 525 526 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 527 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 528 ST->hasSSE2()) { 529 // pmuldq sequence. 530 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 531 return LT.first * 32; 532 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 533 return LT.first * 38; 534 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 535 return LT.first * 15; 536 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 537 return LT.first * 20; 538 539 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 540 return LT.first * Entry->Cost; 541 } 542 543 static const CostTblEntry AVX512BWShiftCostTable[] = { 544 { ISD::SHL, MVT::v16i8, 4 }, // extend/vpsllvw/pack sequence. 545 { ISD::SRL, MVT::v16i8, 4 }, // extend/vpsrlvw/pack sequence. 546 { ISD::SRA, MVT::v16i8, 4 }, // extend/vpsravw/pack sequence. 547 { ISD::SHL, MVT::v32i8, 4 }, // extend/vpsllvw/pack sequence. 548 { ISD::SRL, MVT::v32i8, 4 }, // extend/vpsrlvw/pack sequence. 549 { ISD::SRA, MVT::v32i8, 6 }, // extend/vpsravw/pack sequence. 550 { ISD::SHL, MVT::v64i8, 6 }, // extend/vpsllvw/pack sequence. 551 { ISD::SRL, MVT::v64i8, 7 }, // extend/vpsrlvw/pack sequence. 552 { ISD::SRA, MVT::v64i8, 15 }, // extend/vpsravw/pack sequence. 553 554 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 555 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 556 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 557 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 558 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 559 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 560 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 561 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 562 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 563 }; 564 565 if (ST->hasBWI()) 566 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) 567 return LT.first * Entry->Cost; 568 569 static const CostTblEntry AVX2UniformCostTable[] = { 570 // Uniform splats are cheaper for the following instructions. 571 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 572 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 573 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 574 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. 575 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. 576 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. 577 578 { ISD::SHL, MVT::v8i32, 1 }, // pslld 579 { ISD::SRL, MVT::v8i32, 1 }, // psrld 580 { ISD::SRA, MVT::v8i32, 1 }, // psrad 581 { ISD::SHL, MVT::v4i64, 1 }, // psllq 582 { ISD::SRL, MVT::v4i64, 1 }, // psrlq 583 }; 584 585 if (ST->hasAVX2() && 586 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 587 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 588 if (const auto *Entry = 589 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 590 return LT.first * Entry->Cost; 591 } 592 593 static const CostTblEntry SSE2UniformCostTable[] = { 594 // Uniform splats are cheaper for the following instructions. 595 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 596 { ISD::SHL, MVT::v4i32, 1 }, // pslld 597 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 598 599 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 600 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 601 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 602 603 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 604 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 605 }; 606 607 if (ST->hasSSE2() && 608 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 609 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 610 if (const auto *Entry = 611 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 612 return LT.first * Entry->Cost; 613 } 614 615 static const CostTblEntry AVX512DQCostTable[] = { 616 { ISD::MUL, MVT::v2i64, 2 }, // pmullq 617 { ISD::MUL, MVT::v4i64, 2 }, // pmullq 618 { ISD::MUL, MVT::v8i64, 2 } // pmullq 619 }; 620 621 // Look for AVX512DQ lowering tricks for custom cases. 622 if (ST->hasDQI()) 623 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 624 return LT.first * Entry->Cost; 625 626 static const CostTblEntry AVX512BWCostTable[] = { 627 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 628 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 629 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 630 }; 631 632 // Look for AVX512BW lowering tricks for custom cases. 633 if (ST->hasBWI()) 634 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 635 return LT.first * Entry->Cost; 636 637 static const CostTblEntry AVX512CostTable[] = { 638 { ISD::SHL, MVT::v4i32, 1 }, 639 { ISD::SRL, MVT::v4i32, 1 }, 640 { ISD::SRA, MVT::v4i32, 1 }, 641 { ISD::SHL, MVT::v8i32, 1 }, 642 { ISD::SRL, MVT::v8i32, 1 }, 643 { ISD::SRA, MVT::v8i32, 1 }, 644 { ISD::SHL, MVT::v16i32, 1 }, 645 { ISD::SRL, MVT::v16i32, 1 }, 646 { ISD::SRA, MVT::v16i32, 1 }, 647 648 { ISD::SHL, MVT::v2i64, 1 }, 649 { ISD::SRL, MVT::v2i64, 1 }, 650 { ISD::SHL, MVT::v4i64, 1 }, 651 { ISD::SRL, MVT::v4i64, 1 }, 652 { ISD::SHL, MVT::v8i64, 1 }, 653 { ISD::SRL, MVT::v8i64, 1 }, 654 655 { ISD::SRA, MVT::v2i64, 1 }, 656 { ISD::SRA, MVT::v4i64, 1 }, 657 { ISD::SRA, MVT::v8i64, 1 }, 658 659 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 660 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 661 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 662 { ISD::MUL, MVT::v8i64, 6 }, // 3*pmuludq/3*shift/2*add 663 664 { ISD::FNEG, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 665 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 666 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 667 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 668 { ISD::FDIV, MVT::f64, 4 }, // Skylake from http://www.agner.org/ 669 { ISD::FDIV, MVT::v2f64, 4 }, // Skylake from http://www.agner.org/ 670 { ISD::FDIV, MVT::v4f64, 8 }, // Skylake from http://www.agner.org/ 671 { ISD::FDIV, MVT::v8f64, 16 }, // Skylake from http://www.agner.org/ 672 673 { ISD::FNEG, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 674 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 675 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 676 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 677 { ISD::FDIV, MVT::f32, 3 }, // Skylake from http://www.agner.org/ 678 { ISD::FDIV, MVT::v4f32, 3 }, // Skylake from http://www.agner.org/ 679 { ISD::FDIV, MVT::v8f32, 5 }, // Skylake from http://www.agner.org/ 680 { ISD::FDIV, MVT::v16f32, 10 }, // Skylake from http://www.agner.org/ 681 }; 682 683 if (ST->hasAVX512()) 684 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 685 return LT.first * Entry->Cost; 686 687 static const CostTblEntry AVX2ShiftCostTable[] = { 688 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to 689 // customize them to detect the cases where shift amount is a scalar one. 690 { ISD::SHL, MVT::v4i32, 2 }, // vpsllvd (Haswell from agner.org) 691 { ISD::SRL, MVT::v4i32, 2 }, // vpsrlvd (Haswell from agner.org) 692 { ISD::SRA, MVT::v4i32, 2 }, // vpsravd (Haswell from agner.org) 693 { ISD::SHL, MVT::v8i32, 2 }, // vpsllvd (Haswell from agner.org) 694 { ISD::SRL, MVT::v8i32, 2 }, // vpsrlvd (Haswell from agner.org) 695 { ISD::SRA, MVT::v8i32, 2 }, // vpsravd (Haswell from agner.org) 696 { ISD::SHL, MVT::v2i64, 1 }, // vpsllvq (Haswell from agner.org) 697 { ISD::SRL, MVT::v2i64, 1 }, // vpsrlvq (Haswell from agner.org) 698 { ISD::SHL, MVT::v4i64, 1 }, // vpsllvq (Haswell from agner.org) 699 { ISD::SRL, MVT::v4i64, 1 }, // vpsrlvq (Haswell from agner.org) 700 }; 701 702 if (ST->hasAVX512()) { 703 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && 704 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 705 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 706 // On AVX512, a packed v32i16 shift left by a constant build_vector 707 // is lowered into a vector multiply (vpmullw). 708 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 709 Op1Info, Op2Info, 710 TargetTransformInfo::OP_None, 711 TargetTransformInfo::OP_None); 712 } 713 714 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts). 715 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) { 716 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 717 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 718 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 719 // On AVX2, a packed v16i16 shift left by a constant build_vector 720 // is lowered into a vector multiply (vpmullw). 721 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 722 Op1Info, Op2Info, 723 TargetTransformInfo::OP_None, 724 TargetTransformInfo::OP_None); 725 726 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 727 return LT.first * Entry->Cost; 728 } 729 730 static const CostTblEntry XOPShiftCostTable[] = { 731 // 128bit shifts take 1cy, but right shifts require negation beforehand. 732 { ISD::SHL, MVT::v16i8, 1 }, 733 { ISD::SRL, MVT::v16i8, 2 }, 734 { ISD::SRA, MVT::v16i8, 2 }, 735 { ISD::SHL, MVT::v8i16, 1 }, 736 { ISD::SRL, MVT::v8i16, 2 }, 737 { ISD::SRA, MVT::v8i16, 2 }, 738 { ISD::SHL, MVT::v4i32, 1 }, 739 { ISD::SRL, MVT::v4i32, 2 }, 740 { ISD::SRA, MVT::v4i32, 2 }, 741 { ISD::SHL, MVT::v2i64, 1 }, 742 { ISD::SRL, MVT::v2i64, 2 }, 743 { ISD::SRA, MVT::v2i64, 2 }, 744 // 256bit shifts require splitting if AVX2 didn't catch them above. 745 { ISD::SHL, MVT::v32i8, 2+2 }, 746 { ISD::SRL, MVT::v32i8, 4+2 }, 747 { ISD::SRA, MVT::v32i8, 4+2 }, 748 { ISD::SHL, MVT::v16i16, 2+2 }, 749 { ISD::SRL, MVT::v16i16, 4+2 }, 750 { ISD::SRA, MVT::v16i16, 4+2 }, 751 { ISD::SHL, MVT::v8i32, 2+2 }, 752 { ISD::SRL, MVT::v8i32, 4+2 }, 753 { ISD::SRA, MVT::v8i32, 4+2 }, 754 { ISD::SHL, MVT::v4i64, 2+2 }, 755 { ISD::SRL, MVT::v4i64, 4+2 }, 756 { ISD::SRA, MVT::v4i64, 4+2 }, 757 }; 758 759 // Look for XOP lowering tricks. 760 if (ST->hasXOP()) { 761 // If the right shift is constant then we'll fold the negation so 762 // it's as cheap as a left shift. 763 int ShiftISD = ISD; 764 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && 765 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 766 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 767 ShiftISD = ISD::SHL; 768 if (const auto *Entry = 769 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) 770 return LT.first * Entry->Cost; 771 } 772 773 static const CostTblEntry SSE2UniformShiftCostTable[] = { 774 // Uniform splats are cheaper for the following instructions. 775 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 776 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 777 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 778 779 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 780 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 781 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 782 783 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 784 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 785 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 786 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 787 }; 788 789 if (ST->hasSSE2() && 790 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 791 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 792 793 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 794 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 795 return LT.first * 4; // 2*psrad + shuffle. 796 797 if (const auto *Entry = 798 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 799 return LT.first * Entry->Cost; 800 } 801 802 if (ISD == ISD::SHL && 803 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 804 MVT VT = LT.second; 805 // Vector shift left by non uniform constant can be lowered 806 // into vector multiply. 807 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 808 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 809 ISD = ISD::MUL; 810 } 811 812 static const CostTblEntry AVX2CostTable[] = { 813 { ISD::SHL, MVT::v16i8, 6 }, // vpblendvb sequence. 814 { ISD::SHL, MVT::v32i8, 6 }, // vpblendvb sequence. 815 { ISD::SHL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 816 { ISD::SHL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 817 { ISD::SHL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 818 { ISD::SHL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 819 820 { ISD::SRL, MVT::v16i8, 6 }, // vpblendvb sequence. 821 { ISD::SRL, MVT::v32i8, 6 }, // vpblendvb sequence. 822 { ISD::SRL, MVT::v64i8, 12 }, // 2*vpblendvb sequence. 823 { ISD::SRL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence. 824 { ISD::SRL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence. 825 { ISD::SRL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence. 826 827 { ISD::SRA, MVT::v16i8, 17 }, // vpblendvb sequence. 828 { ISD::SRA, MVT::v32i8, 17 }, // vpblendvb sequence. 829 { ISD::SRA, MVT::v64i8, 34 }, // 2*vpblendvb sequence. 830 { ISD::SRA, MVT::v8i16, 5 }, // extend/vpsravd/pack sequence. 831 { ISD::SRA, MVT::v16i16, 7 }, // extend/vpsravd/pack sequence. 832 { ISD::SRA, MVT::v32i16, 14 }, // 2*extend/vpsravd/pack sequence. 833 { ISD::SRA, MVT::v2i64, 2 }, // srl/xor/sub sequence. 834 { ISD::SRA, MVT::v4i64, 2 }, // srl/xor/sub sequence. 835 836 { ISD::SUB, MVT::v32i8, 1 }, // psubb 837 { ISD::ADD, MVT::v32i8, 1 }, // paddb 838 { ISD::SUB, MVT::v16i16, 1 }, // psubw 839 { ISD::ADD, MVT::v16i16, 1 }, // paddw 840 { ISD::SUB, MVT::v8i32, 1 }, // psubd 841 { ISD::ADD, MVT::v8i32, 1 }, // paddd 842 { ISD::SUB, MVT::v4i64, 1 }, // psubq 843 { ISD::ADD, MVT::v4i64, 1 }, // paddq 844 845 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 846 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 847 { ISD::MUL, MVT::v4i64, 6 }, // 3*pmuludq/3*shift/2*add 848 849 { ISD::FNEG, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 850 { ISD::FNEG, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 851 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 852 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 853 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 854 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 855 { ISD::FMUL, MVT::f64, 1 }, // Haswell from http://www.agner.org/ 856 { ISD::FMUL, MVT::v2f64, 1 }, // Haswell from http://www.agner.org/ 857 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 858 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 859 860 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 861 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 862 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 863 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 864 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 865 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 866 }; 867 868 // Look for AVX2 lowering tricks for custom cases. 869 if (ST->hasAVX2()) 870 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 871 return LT.first * Entry->Cost; 872 873 static const CostTblEntry AVX1CostTable[] = { 874 // We don't have to scalarize unsupported ops. We can issue two half-sized 875 // operations and we only need to extract the upper YMM half. 876 // Two ops + 1 extract + 1 insert = 4. 877 { ISD::MUL, MVT::v16i16, 4 }, 878 { ISD::MUL, MVT::v8i32, 5 }, // BTVER2 from http://www.agner.org/ 879 { ISD::MUL, MVT::v4i64, 12 }, 880 881 { ISD::SUB, MVT::v32i8, 4 }, 882 { ISD::ADD, MVT::v32i8, 4 }, 883 { ISD::SUB, MVT::v16i16, 4 }, 884 { ISD::ADD, MVT::v16i16, 4 }, 885 { ISD::SUB, MVT::v8i32, 4 }, 886 { ISD::ADD, MVT::v8i32, 4 }, 887 { ISD::SUB, MVT::v4i64, 4 }, 888 { ISD::ADD, MVT::v4i64, 4 }, 889 890 { ISD::SHL, MVT::v32i8, 22 }, // pblendvb sequence + split. 891 { ISD::SHL, MVT::v8i16, 6 }, // pblendvb sequence. 892 { ISD::SHL, MVT::v16i16, 13 }, // pblendvb sequence + split. 893 { ISD::SHL, MVT::v4i32, 3 }, // pslld/paddd/cvttps2dq/pmulld 894 { ISD::SHL, MVT::v8i32, 9 }, // pslld/paddd/cvttps2dq/pmulld + split 895 { ISD::SHL, MVT::v2i64, 2 }, // Shift each lane + blend. 896 { ISD::SHL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 897 898 { ISD::SRL, MVT::v32i8, 23 }, // pblendvb sequence + split. 899 { ISD::SRL, MVT::v16i16, 28 }, // pblendvb sequence + split. 900 { ISD::SRL, MVT::v4i32, 6 }, // Shift each lane + blend. 901 { ISD::SRL, MVT::v8i32, 14 }, // Shift each lane + blend + split. 902 { ISD::SRL, MVT::v2i64, 2 }, // Shift each lane + blend. 903 { ISD::SRL, MVT::v4i64, 6 }, // Shift each lane + blend + split. 904 905 { ISD::SRA, MVT::v32i8, 44 }, // pblendvb sequence + split. 906 { ISD::SRA, MVT::v16i16, 28 }, // pblendvb sequence + split. 907 { ISD::SRA, MVT::v4i32, 6 }, // Shift each lane + blend. 908 { ISD::SRA, MVT::v8i32, 14 }, // Shift each lane + blend + split. 909 { ISD::SRA, MVT::v2i64, 5 }, // Shift each lane + blend. 910 { ISD::SRA, MVT::v4i64, 12 }, // Shift each lane + blend + split. 911 912 { ISD::FNEG, MVT::v4f64, 2 }, // BTVER2 from http://www.agner.org/ 913 { ISD::FNEG, MVT::v8f32, 2 }, // BTVER2 from http://www.agner.org/ 914 915 { ISD::FMUL, MVT::f64, 2 }, // BTVER2 from http://www.agner.org/ 916 { ISD::FMUL, MVT::v2f64, 2 }, // BTVER2 from http://www.agner.org/ 917 { ISD::FMUL, MVT::v4f64, 4 }, // BTVER2 from http://www.agner.org/ 918 919 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 920 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 921 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 922 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 923 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 924 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 925 }; 926 927 if (ST->hasAVX()) 928 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 929 return LT.first * Entry->Cost; 930 931 static const CostTblEntry SSE42CostTable[] = { 932 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 933 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 934 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 935 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 936 937 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 938 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 939 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 940 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 941 942 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 943 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 944 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 945 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 946 947 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 948 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 949 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 950 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 951 952 { ISD::MUL, MVT::v2i64, 6 } // 3*pmuludq/3*shift/2*add 953 }; 954 955 if (ST->hasSSE42()) 956 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 957 return LT.first * Entry->Cost; 958 959 static const CostTblEntry SSE41CostTable[] = { 960 { ISD::SHL, MVT::v16i8, 10 }, // pblendvb sequence. 961 { ISD::SHL, MVT::v8i16, 11 }, // pblendvb sequence. 962 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 963 964 { ISD::SRL, MVT::v16i8, 11 }, // pblendvb sequence. 965 { ISD::SRL, MVT::v8i16, 13 }, // pblendvb sequence. 966 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 967 968 { ISD::SRA, MVT::v16i8, 21 }, // pblendvb sequence. 969 { ISD::SRA, MVT::v8i16, 13 }, // pblendvb sequence. 970 971 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 972 }; 973 974 if (ST->hasSSE41()) 975 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 976 return LT.first * Entry->Cost; 977 978 static const CostTblEntry SSE2CostTable[] = { 979 // We don't correctly identify costs of casts because they are marked as 980 // custom. 981 { ISD::SHL, MVT::v16i8, 13 }, // cmpgtb sequence. 982 { ISD::SHL, MVT::v8i16, 25 }, // cmpgtw sequence. 983 { ISD::SHL, MVT::v4i32, 16 }, // pslld/paddd/cvttps2dq/pmuludq. 984 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 985 986 { ISD::SRL, MVT::v16i8, 14 }, // cmpgtb sequence. 987 { ISD::SRL, MVT::v8i16, 16 }, // cmpgtw sequence. 988 { ISD::SRL, MVT::v4i32, 12 }, // Shift each lane + blend. 989 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 990 991 { ISD::SRA, MVT::v16i8, 27 }, // unpacked cmpgtb sequence. 992 { ISD::SRA, MVT::v8i16, 16 }, // cmpgtw sequence. 993 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 994 { ISD::SRA, MVT::v2i64, 8 }, // srl/xor/sub splat+shuffle sequence. 995 996 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 997 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 998 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 999 1000 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 1001 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 1002 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 1003 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 1004 1005 { ISD::FNEG, MVT::f32, 1 }, // Pentium IV from http://www.agner.org/ 1006 { ISD::FNEG, MVT::f64, 1 }, // Pentium IV from http://www.agner.org/ 1007 { ISD::FNEG, MVT::v4f32, 1 }, // Pentium IV from http://www.agner.org/ 1008 { ISD::FNEG, MVT::v2f64, 1 }, // Pentium IV from http://www.agner.org/ 1009 1010 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 1011 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 1012 1013 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 1014 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 1015 }; 1016 1017 if (ST->hasSSE2()) 1018 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 1019 return LT.first * Entry->Cost; 1020 1021 static const CostTblEntry SSE1CostTable[] = { 1022 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 1023 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 1024 1025 { ISD::FNEG, MVT::f32, 2 }, // Pentium III from http://www.agner.org/ 1026 { ISD::FNEG, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1027 1028 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 1029 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1030 1031 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 1032 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 1033 }; 1034 1035 if (ST->hasSSE1()) 1036 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 1037 return LT.first * Entry->Cost; 1038 1039 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 1040 { ISD::ADD, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1041 { ISD::SUB, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/ 1042 { ISD::MUL, MVT::i64, 2 }, // Nehalem from http://www.agner.org/ 1043 }; 1044 1045 if (ST->is64Bit()) 1046 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second)) 1047 return LT.first * Entry->Cost; 1048 1049 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 1050 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1051 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1052 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1053 1054 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 1055 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 1056 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 1057 }; 1058 1059 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second)) 1060 return LT.first * Entry->Cost; 1061 1062 // It is not a good idea to vectorize division. We have to scalarize it and 1063 // in the process we will often end up having to spilling regular 1064 // registers. The overhead of division is going to dominate most kernels 1065 // anyways so try hard to prevent vectorization of division - it is 1066 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 1067 // to hide "20 cycles" for each lane. 1068 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 1069 ISD == ISD::UDIV || ISD == ISD::UREM)) { 1070 InstructionCost ScalarCost = getArithmeticInstrCost( 1071 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, 1072 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 1073 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 1074 } 1075 1076 // Fallback to the default implementation. 1077 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); 1078 } 1079 1080 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 1081 VectorType *BaseTp, 1082 ArrayRef<int> Mask, int Index, 1083 VectorType *SubTp) { 1084 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 1085 // 64-bit packed integer vectors (v2i32) are widened to type v4i32. 1086 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); 1087 1088 Kind = improveShuffleKindFromMask(Kind, Mask); 1089 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 1090 if (Kind == TTI::SK_Transpose) 1091 Kind = TTI::SK_PermuteTwoSrc; 1092 1093 // For Broadcasts we are splatting the first element from the first input 1094 // register, so only need to reference that input and all the output 1095 // registers are the same. 1096 if (Kind == TTI::SK_Broadcast) 1097 LT.first = 1; 1098 1099 // Subvector extractions are free if they start at the beginning of a 1100 // vector and cheap if the subvectors are aligned. 1101 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { 1102 int NumElts = LT.second.getVectorNumElements(); 1103 if ((Index % NumElts) == 0) 1104 return 0; 1105 std::pair<InstructionCost, MVT> SubLT = 1106 TLI->getTypeLegalizationCost(DL, SubTp); 1107 if (SubLT.second.isVector()) { 1108 int NumSubElts = SubLT.second.getVectorNumElements(); 1109 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1110 return SubLT.first; 1111 // Handle some cases for widening legalization. For now we only handle 1112 // cases where the original subvector was naturally aligned and evenly 1113 // fit in its legalized subvector type. 1114 // FIXME: Remove some of the alignment restrictions. 1115 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit 1116 // vectors. 1117 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements(); 1118 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && 1119 (NumSubElts % OrigSubElts) == 0 && 1120 LT.second.getVectorElementType() == 1121 SubLT.second.getVectorElementType() && 1122 LT.second.getVectorElementType().getSizeInBits() == 1123 BaseTp->getElementType()->getPrimitiveSizeInBits()) { 1124 assert(NumElts >= NumSubElts && NumElts > OrigSubElts && 1125 "Unexpected number of elements!"); 1126 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(), 1127 LT.second.getVectorNumElements()); 1128 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), 1129 SubLT.second.getVectorNumElements()); 1130 int ExtractIndex = alignDown((Index % NumElts), NumSubElts); 1131 InstructionCost ExtractCost = getShuffleCost( 1132 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy); 1133 1134 // If the original size is 32-bits or more, we can use pshufd. Otherwise 1135 // if we have SSSE3 we can use pshufb. 1136 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) 1137 return ExtractCost + 1; // pshufd or pshufb 1138 1139 assert(SubTp->getPrimitiveSizeInBits() == 16 && 1140 "Unexpected vector size"); 1141 1142 return ExtractCost + 2; // worst case pshufhw + pshufd 1143 } 1144 } 1145 } 1146 1147 // Subvector insertions are cheap if the subvectors are aligned. 1148 // Note that in general, the insertion starting at the beginning of a vector 1149 // isn't free, because we need to preserve the rest of the wide vector. 1150 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) { 1151 int NumElts = LT.second.getVectorNumElements(); 1152 std::pair<InstructionCost, MVT> SubLT = 1153 TLI->getTypeLegalizationCost(DL, SubTp); 1154 if (SubLT.second.isVector()) { 1155 int NumSubElts = SubLT.second.getVectorNumElements(); 1156 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1157 return SubLT.first; 1158 } 1159 1160 // If the insertion isn't aligned, treat it like a 2-op shuffle. 1161 Kind = TTI::SK_PermuteTwoSrc; 1162 } 1163 1164 // Handle some common (illegal) sub-vector types as they are often very cheap 1165 // to shuffle even on targets without PSHUFB. 1166 EVT VT = TLI->getValueType(DL, BaseTp); 1167 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && 1168 !ST->hasSSSE3()) { 1169 static const CostTblEntry SSE2SubVectorShuffleTbl[] = { 1170 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw 1171 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw 1172 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw 1173 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw 1174 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck 1175 1176 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw 1177 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw 1178 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus 1179 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck 1180 1181 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw 1182 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw 1183 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw 1184 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw 1185 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck 1186 1187 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw 1188 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw 1189 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw 1190 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw 1191 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck 1192 }; 1193 1194 if (ST->hasSSE2()) 1195 if (const auto *Entry = 1196 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) 1197 return Entry->Cost; 1198 } 1199 1200 // We are going to permute multiple sources and the result will be in multiple 1201 // destinations. Providing an accurate cost only for splits where the element 1202 // type remains the same. 1203 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 1204 MVT LegalVT = LT.second; 1205 if (LegalVT.isVector() && 1206 LegalVT.getVectorElementType().getSizeInBits() == 1207 BaseTp->getElementType()->getPrimitiveSizeInBits() && 1208 LegalVT.getVectorNumElements() < 1209 cast<FixedVectorType>(BaseTp)->getNumElements()) { 1210 1211 unsigned VecTySize = DL.getTypeStoreSize(BaseTp); 1212 unsigned LegalVTSize = LegalVT.getStoreSize(); 1213 // Number of source vectors after legalization: 1214 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 1215 // Number of destination vectors after legalization: 1216 InstructionCost NumOfDests = LT.first; 1217 1218 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(), 1219 LegalVT.getVectorNumElements()); 1220 1221 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 1222 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 1223 None, 0, nullptr); 1224 } 1225 1226 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1227 } 1228 1229 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 1230 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 1231 // We assume that source and destination have the same vector type. 1232 InstructionCost NumOfDests = LT.first; 1233 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1; 1234 LT.first = NumOfDests * NumOfShufflesPerDest; 1235 } 1236 1237 static const CostTblEntry AVX512FP16ShuffleTbl[] = { 1238 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw 1239 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw 1240 {TTI::SK_Broadcast, MVT::v8f16, 1}, // vpbroadcastw 1241 1242 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw 1243 {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw 1244 {TTI::SK_Reverse, MVT::v8f16, 1}, // vpshufb 1245 1246 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw 1247 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw 1248 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // vpshufb 1249 1250 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w 1251 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w 1252 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2} // vpermt2w 1253 }; 1254 1255 if (!ST->useSoftFloat() && ST->hasFP16()) 1256 if (const auto *Entry = 1257 CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second)) 1258 return LT.first * Entry->Cost; 1259 1260 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 1261 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb 1262 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb 1263 1264 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb 1265 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb 1266 1267 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b 1268 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b 1269 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b 1270 }; 1271 1272 if (ST->hasVBMI()) 1273 if (const auto *Entry = 1274 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 1275 return LT.first * Entry->Cost; 1276 1277 static const CostTblEntry AVX512BWShuffleTbl[] = { 1278 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1279 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1280 1281 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw 1282 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw 1283 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 1284 1285 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw 1286 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw 1287 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 1288 1289 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w 1290 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w 1291 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w 1292 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 1293 1294 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw 1295 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb 1296 }; 1297 1298 if (ST->hasBWI()) 1299 if (const auto *Entry = 1300 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 1301 return LT.first * Entry->Cost; 1302 1303 static const CostTblEntry AVX512ShuffleTbl[] = { 1304 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd 1305 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps 1306 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq 1307 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd 1308 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1309 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1310 1311 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd 1312 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps 1313 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq 1314 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd 1315 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca 1316 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca 1317 1318 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd 1319 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1320 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd 1321 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps 1322 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1323 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps 1324 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq 1325 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1326 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq 1327 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd 1328 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1329 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd 1330 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1331 1332 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd 1333 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps 1334 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q 1335 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d 1336 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd 1337 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps 1338 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q 1339 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d 1340 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd 1341 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps 1342 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q 1343 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d 1344 1345 // FIXME: This just applies the type legalization cost rules above 1346 // assuming these completely split. 1347 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, 1348 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, 1349 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, 1350 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, 1351 1352 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq 1353 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq 1354 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd 1355 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps 1356 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq 1357 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd 1358 }; 1359 1360 if (ST->hasAVX512()) 1361 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 1362 return LT.first * Entry->Cost; 1363 1364 static const CostTblEntry AVX2ShuffleTbl[] = { 1365 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd 1366 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps 1367 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq 1368 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd 1369 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw 1370 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb 1371 1372 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd 1373 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps 1374 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq 1375 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd 1376 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb 1377 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb 1378 1379 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb 1380 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb 1381 1382 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1383 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1384 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1385 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1386 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb 1387 // + vpblendvb 1388 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb 1389 // + vpblendvb 1390 1391 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd 1392 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps 1393 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd 1394 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd 1395 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb 1396 // + vpblendvb 1397 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb 1398 // + vpblendvb 1399 }; 1400 1401 if (ST->hasAVX2()) 1402 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1403 return LT.first * Entry->Cost; 1404 1405 static const CostTblEntry XOPShuffleTbl[] = { 1406 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd 1407 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps 1408 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd 1409 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps 1410 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm 1411 // + vinsertf128 1412 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm 1413 // + vinsertf128 1414 1415 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm 1416 // + vinsertf128 1417 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm 1418 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm 1419 // + vinsertf128 1420 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm 1421 }; 1422 1423 if (ST->hasXOP()) 1424 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1425 return LT.first * Entry->Cost; 1426 1427 static const CostTblEntry AVX1ShuffleTbl[] = { 1428 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1429 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1430 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1431 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1432 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 1433 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 1434 1435 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1436 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1437 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1438 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1439 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb 1440 // + vinsertf128 1441 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb 1442 // + vinsertf128 1443 1444 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd 1445 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd 1446 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps 1447 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps 1448 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor 1449 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor 1450 1451 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd 1452 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd 1453 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1454 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1455 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb 1456 // + 2*por + vinsertf128 1457 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb 1458 // + 2*por + vinsertf128 1459 1460 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd 1461 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd 1462 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1463 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1464 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb 1465 // + 4*por + vinsertf128 1466 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb 1467 // + 4*por + vinsertf128 1468 }; 1469 1470 if (ST->hasAVX()) 1471 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1472 return LT.first * Entry->Cost; 1473 1474 static const CostTblEntry SSE41ShuffleTbl[] = { 1475 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw 1476 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1477 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw 1478 {TTI::SK_Select, MVT::v4f32, 1}, // blendps 1479 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw 1480 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb 1481 }; 1482 1483 if (ST->hasSSE41()) 1484 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1485 return LT.first * Entry->Cost; 1486 1487 static const CostTblEntry SSSE3ShuffleTbl[] = { 1488 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb 1489 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb 1490 1491 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb 1492 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb 1493 1494 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por 1495 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por 1496 1497 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb 1498 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1499 1500 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por 1501 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por 1502 }; 1503 1504 if (ST->hasSSSE3()) 1505 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1506 return LT.first * Entry->Cost; 1507 1508 static const CostTblEntry SSE2ShuffleTbl[] = { 1509 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd 1510 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd 1511 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd 1512 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd 1513 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd 1514 1515 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd 1516 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd 1517 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd 1518 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd 1519 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw 1520 // + 2*pshufd + 2*unpck + packus 1521 1522 {TTI::SK_Select, MVT::v2i64, 1}, // movsd 1523 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1524 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps 1525 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por 1526 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por 1527 1528 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd 1529 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd 1530 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd 1531 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw 1532 // + pshufd/unpck 1533 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1534 // + 2*pshufd + 2*unpck + 2*packus 1535 1536 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1537 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1538 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1539 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1540 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1541 }; 1542 1543 if (ST->hasSSE2()) 1544 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1545 return LT.first * Entry->Cost; 1546 1547 static const CostTblEntry SSE1ShuffleTbl[] = { 1548 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1549 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1550 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1551 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1552 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1553 }; 1554 1555 if (ST->hasSSE1()) 1556 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1557 return LT.first * Entry->Cost; 1558 1559 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1560 } 1561 1562 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1563 Type *Src, 1564 TTI::CastContextHint CCH, 1565 TTI::TargetCostKind CostKind, 1566 const Instruction *I) { 1567 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1568 assert(ISD && "Invalid opcode"); 1569 1570 // TODO: Allow non-throughput costs that aren't binary. 1571 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1572 if (CostKind != TTI::TCK_RecipThroughput) 1573 return Cost == 0 ? 0 : 1; 1574 return Cost; 1575 }; 1576 1577 // The cost tables include both specific, custom (non-legal) src/dst type 1578 // conversions and generic, legalized types. We test for customs first, before 1579 // falling back to legalization. 1580 // FIXME: Need a better design of the cost table to handle non-simple types of 1581 // potential massive combinations (elem_num x src_type x dst_type). 1582 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { 1583 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1584 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1585 1586 // Mask sign extend has an instruction. 1587 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1588 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1589 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1590 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1591 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1592 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1593 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1594 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1595 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1596 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, 1597 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, 1598 1599 // Mask zero extend is a sext + shift. 1600 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1601 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1602 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1603 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1604 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1605 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1606 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1607 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1608 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1609 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, 1610 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, 1611 1612 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, 1613 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm 1614 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm 1615 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm 1616 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb 1617 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm 1618 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm 1619 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb 1620 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm 1621 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm 1622 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb 1623 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm 1624 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm 1625 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm 1626 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, 1627 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, 1628 }; 1629 1630 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1631 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1632 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1633 1634 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1635 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1636 1637 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1638 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1639 1640 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1641 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1642 }; 1643 1644 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1645 // 256-bit wide vectors. 1646 1647 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1648 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1649 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1650 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1651 1652 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1653 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1654 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1655 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd 1656 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1657 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1658 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1659 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd 1660 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd 1661 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd 1662 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd 1663 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd 1664 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq 1665 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq 1666 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq 1667 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb 1668 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb 1669 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb 1670 { ISD::TRUNCATE, MVT::v32i8, MVT::v16i32, 2 }, // vpmovdb 1671 { ISD::TRUNCATE, MVT::v64i8, MVT::v16i32, 2 }, // vpmovdb 1672 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdw 1673 { ISD::TRUNCATE, MVT::v32i16, MVT::v16i32, 2 }, // vpmovdw 1674 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb 1675 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb 1676 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb 1677 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i64, 2 }, // vpmovqb 1678 { ISD::TRUNCATE, MVT::v32i8, MVT::v8i64, 2 }, // vpmovqb 1679 { ISD::TRUNCATE, MVT::v64i8, MVT::v8i64, 2 }, // vpmovqb 1680 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw 1681 { ISD::TRUNCATE, MVT::v16i16, MVT::v8i64, 2 }, // vpmovqw 1682 { ISD::TRUNCATE, MVT::v32i16, MVT::v8i64, 2 }, // vpmovqw 1683 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd 1684 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd 1685 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb 1686 1687 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 1688 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, 1689 { ISD::TRUNCATE, MVT::v64i8, MVT::v32i16, 8 }, 1690 1691 // Sign extend is zmm vpternlogd+vptruncdb. 1692 // Zero extend is zmm broadcast load+vptruncdw. 1693 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, 1694 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, 1695 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, 1696 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, 1697 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, 1698 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, 1699 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, 1700 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, 1701 1702 // Sign extend is zmm vpternlogd+vptruncdw. 1703 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. 1704 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, 1705 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1706 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, 1707 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1708 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, 1709 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1710 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, 1711 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1712 1713 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd 1714 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld 1715 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd 1716 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld 1717 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd 1718 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld 1719 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq 1720 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq 1721 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq 1722 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq 1723 1724 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd 1725 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld 1726 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq 1727 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq 1728 1729 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1730 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1731 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1732 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1733 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1734 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1735 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1736 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1737 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1738 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1739 1740 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1741 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1742 1743 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1744 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1745 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1746 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1747 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1748 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1749 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1750 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1751 1752 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1753 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1754 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 }, 1755 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 }, 1756 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1757 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 }, 1758 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1759 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1760 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1761 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1762 1763 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 1764 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 }, 1765 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 }, 1766 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 }, 1767 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 }, 1768 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, 1769 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 }, 1770 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 }, 1771 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 }, 1772 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 }, 1773 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 }, 1774 1775 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1776 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, 1777 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, 1778 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1779 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, 1780 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, 1781 }; 1782 1783 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { 1784 // Mask sign extend has an instruction. 1785 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1786 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1787 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1788 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1789 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1790 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1791 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1792 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1793 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1794 1795 // Mask zero extend is a sext + shift. 1796 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1797 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1798 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1799 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1800 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1801 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1802 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1803 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1804 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1805 1806 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, 1807 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb 1808 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw 1809 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb 1810 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw 1811 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb 1812 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw 1813 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb 1814 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw 1815 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb 1816 }; 1817 1818 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { 1819 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1820 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1821 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1822 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1823 1824 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1825 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1826 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1827 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1828 1829 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 }, 1830 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1831 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1832 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1833 1834 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 }, 1835 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1836 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1837 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1838 }; 1839 1840 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { 1841 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1842 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1843 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1844 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 1845 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1846 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1847 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1848 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 1849 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd 1850 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd 1851 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd 1852 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq 1853 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq 1854 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd 1855 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb 1856 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw 1857 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb 1858 1859 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb 1860 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb 1861 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, 1862 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, 1863 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, 1864 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, 1865 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, 1866 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, 1867 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, 1868 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, 1869 1870 // sign extend is vpcmpeq+maskedmove+vpmovdw 1871 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw 1872 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1873 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, 1874 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1875 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, 1876 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1877 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, 1878 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, 1879 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, 1880 1881 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd 1882 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld 1883 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd 1884 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld 1885 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd 1886 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld 1887 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq 1888 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq 1889 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq 1890 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq 1891 1892 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 1893 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 }, 1894 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 1895 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 }, 1896 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1897 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1898 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 1899 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 }, 1900 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1901 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1902 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1903 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1904 1905 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 1906 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 1907 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 1908 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 1909 1910 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, 1911 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 1912 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 1913 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 }, 1914 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 1915 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 }, 1916 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 }, 1917 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1918 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1919 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1920 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1921 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1922 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 1923 1924 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 1925 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 }, 1926 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 }, 1927 1928 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, 1929 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, 1930 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1931 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 }, 1932 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 1933 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1934 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1935 }; 1936 1937 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1938 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1939 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1940 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1941 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1942 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1943 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1944 1945 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 1946 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 }, 1947 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 1948 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 }, 1949 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1950 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1951 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 1952 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 }, 1953 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1954 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1955 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1956 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1957 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1958 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1959 1960 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1961 1962 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 4 }, 1963 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4 }, 1964 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 }, 1965 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 }, 1966 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 }, 1967 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 }, 1968 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 }, 1969 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 }, 1970 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 }, 1971 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 }, 1972 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, 1973 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1974 1975 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1976 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1977 1978 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 }, 1979 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 }, 1980 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 }, 1981 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 }, 1982 1983 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 }, 1984 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 }, 1985 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 }, 1986 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 1987 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 1988 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 }, 1989 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 }, 1990 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 }, 1991 1992 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 1993 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 1994 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 1995 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1996 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1997 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1998 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 }, 1999 2000 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 }, 2001 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 }, 2002 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 }, 2003 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 2004 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 2005 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 2006 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 }, 2007 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2008 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 2009 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 2010 }; 2011 2012 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 2013 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 2014 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 2015 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 2016 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 2017 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 2018 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 2019 2020 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 2021 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 }, 2022 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 2023 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 }, 2024 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 2025 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 2026 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 2027 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 }, 2028 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 2029 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 2030 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 2031 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 2032 2033 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, 2034 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, 2035 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, 2036 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, 2037 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, 2038 2039 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 2040 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 }, 2041 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb 2042 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 }, 2043 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2044 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 }, 2045 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw 2046 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 2047 2048 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 2049 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 2050 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 2051 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 2052 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 2053 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2054 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 2055 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2056 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 }, 2057 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 }, 2058 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 }, 2059 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 }, 2060 2061 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 2062 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 2063 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 2064 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 }, 2065 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 }, 2066 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 }, 2067 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 }, 2068 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 }, 2069 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 }, 2070 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2071 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 2072 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 2073 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 }, 2074 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 }, 2075 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 }, 2076 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 2077 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 }, 2078 2079 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 }, 2080 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 }, 2081 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 }, 2082 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 }, 2083 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 }, 2084 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 }, 2085 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 }, 2086 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 }, 2087 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 }, 2088 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 }, 2089 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 }, 2090 2091 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 }, 2092 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 }, 2093 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 }, 2094 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 }, 2095 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 }, 2096 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 }, 2097 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 }, 2098 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 }, 2099 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 }, 2100 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2101 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 }, 2102 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 }, 2103 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 }, 2104 2105 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 2106 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 2107 }; 2108 2109 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 2110 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2111 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 }, 2112 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2113 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 }, 2114 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2115 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2116 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2117 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 }, 2118 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2119 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2120 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2121 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2122 2123 // These truncates end up widening elements. 2124 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ 2125 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ 2126 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD 2127 2128 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 }, 2129 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 }, 2130 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 }, 2131 2132 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2133 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2134 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 }, 2135 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 }, 2136 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2137 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2138 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2139 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2140 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 2141 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 }, 2142 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 }, 2143 2144 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 }, 2145 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 }, 2146 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, 2147 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 2148 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 }, 2149 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 }, 2150 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 }, 2151 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 }, 2152 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 }, 2153 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2154 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 }, 2155 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 }, 2156 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 }, 2157 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 }, 2158 2159 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 }, 2160 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 }, 2161 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 }, 2162 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 }, 2163 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 }, 2164 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 }, 2165 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 }, 2166 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 }, 2167 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 }, 2168 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 }, 2169 2170 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 }, 2171 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2172 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 }, 2173 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, 2174 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 }, 2175 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 }, 2176 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 }, 2177 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 }, 2178 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 }, 2179 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 }, 2180 }; 2181 2182 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 2183 // These are somewhat magic numbers justified by comparing the 2184 // output of llvm-mca for our various supported scheduler models 2185 // and basing it off the worst case scenario. 2186 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2187 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2188 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 }, 2189 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 }, 2190 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 }, 2191 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2192 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 }, 2193 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2194 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 }, 2195 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 }, 2196 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 }, 2197 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 }, 2198 2199 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 }, 2200 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 }, 2201 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 }, 2202 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 }, 2203 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 }, 2204 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 }, 2205 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 }, 2206 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 }, 2207 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 }, 2208 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 }, 2209 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 2210 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 }, 2211 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 }, 2212 2213 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 }, 2214 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 }, 2215 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 }, 2216 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 }, 2217 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 }, 2218 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 }, 2219 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 }, 2220 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 }, 2221 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 }, 2222 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 }, 2223 2224 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 }, 2225 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 2226 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 }, 2227 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 }, 2228 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 }, 2229 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 }, 2230 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 }, 2231 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 }, 2232 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 }, 2233 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 }, 2234 2235 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2236 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 }, 2237 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 }, 2238 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 }, 2239 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 }, 2240 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 }, 2241 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 }, 2242 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 }, 2243 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 }, 2244 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 }, 2245 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 }, 2246 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 }, 2247 2248 // These truncates are really widening elements. 2249 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD 2250 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ 2251 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD 2252 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD 2253 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD 2254 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW 2255 2256 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB 2257 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 2258 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB 2259 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 2260 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, 2261 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 }, 2262 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2263 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 }, 2264 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB 2265 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW 2266 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD 2267 }; 2268 2269 // Attempt to map directly to (simple) MVT types to let us match custom entries. 2270 EVT SrcTy = TLI->getValueType(DL, Src); 2271 EVT DstTy = TLI->getValueType(DL, Dst); 2272 2273 // The function getSimpleVT only handles simple value types. 2274 if (SrcTy.isSimple() && DstTy.isSimple()) { 2275 MVT SimpleSrcTy = SrcTy.getSimpleVT(); 2276 MVT SimpleDstTy = DstTy.getSimpleVT(); 2277 2278 if (ST->useAVX512Regs()) { 2279 if (ST->hasBWI()) 2280 if (const auto *Entry = ConvertCostTableLookup( 2281 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2282 return AdjustCost(Entry->Cost); 2283 2284 if (ST->hasDQI()) 2285 if (const auto *Entry = ConvertCostTableLookup( 2286 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2287 return AdjustCost(Entry->Cost); 2288 2289 if (ST->hasAVX512()) 2290 if (const auto *Entry = ConvertCostTableLookup( 2291 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2292 return AdjustCost(Entry->Cost); 2293 } 2294 2295 if (ST->hasBWI()) 2296 if (const auto *Entry = ConvertCostTableLookup( 2297 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2298 return AdjustCost(Entry->Cost); 2299 2300 if (ST->hasDQI()) 2301 if (const auto *Entry = ConvertCostTableLookup( 2302 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy)) 2303 return AdjustCost(Entry->Cost); 2304 2305 if (ST->hasAVX512()) 2306 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2307 SimpleDstTy, SimpleSrcTy)) 2308 return AdjustCost(Entry->Cost); 2309 2310 if (ST->hasAVX2()) { 2311 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2312 SimpleDstTy, SimpleSrcTy)) 2313 return AdjustCost(Entry->Cost); 2314 } 2315 2316 if (ST->hasAVX()) { 2317 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2318 SimpleDstTy, SimpleSrcTy)) 2319 return AdjustCost(Entry->Cost); 2320 } 2321 2322 if (ST->hasSSE41()) { 2323 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2324 SimpleDstTy, SimpleSrcTy)) 2325 return AdjustCost(Entry->Cost); 2326 } 2327 2328 if (ST->hasSSE2()) { 2329 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2330 SimpleDstTy, SimpleSrcTy)) 2331 return AdjustCost(Entry->Cost); 2332 } 2333 } 2334 2335 // Fall back to legalized types. 2336 std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 2337 std::pair<InstructionCost, MVT> LTDest = 2338 TLI->getTypeLegalizationCost(DL, Dst); 2339 2340 if (ST->useAVX512Regs()) { 2341 if (ST->hasBWI()) 2342 if (const auto *Entry = ConvertCostTableLookup( 2343 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second)) 2344 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2345 2346 if (ST->hasDQI()) 2347 if (const auto *Entry = ConvertCostTableLookup( 2348 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second)) 2349 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2350 2351 if (ST->hasAVX512()) 2352 if (const auto *Entry = ConvertCostTableLookup( 2353 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second)) 2354 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2355 } 2356 2357 if (ST->hasBWI()) 2358 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, 2359 LTDest.second, LTSrc.second)) 2360 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2361 2362 if (ST->hasDQI()) 2363 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, 2364 LTDest.second, LTSrc.second)) 2365 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2366 2367 if (ST->hasAVX512()) 2368 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2369 LTDest.second, LTSrc.second)) 2370 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2371 2372 if (ST->hasAVX2()) 2373 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2374 LTDest.second, LTSrc.second)) 2375 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2376 2377 if (ST->hasAVX()) 2378 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2379 LTDest.second, LTSrc.second)) 2380 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2381 2382 if (ST->hasSSE41()) 2383 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2384 LTDest.second, LTSrc.second)) 2385 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2386 2387 if (ST->hasSSE2()) 2388 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2389 LTDest.second, LTSrc.second)) 2390 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost); 2391 2392 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for 2393 // sitofp. 2394 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) && 2395 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) { 2396 Type *ExtSrc = Src->getWithNewBitWidth(32); 2397 unsigned ExtOpc = 2398 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt; 2399 2400 // For scalar loads the extend would be free. 2401 InstructionCost ExtCost = 0; 2402 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0)))) 2403 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind); 2404 2405 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc, 2406 TTI::CastContextHint::None, CostKind); 2407 } 2408 2409 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi 2410 // i32. 2411 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) && 2412 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) { 2413 Type *TruncDst = Dst->getWithNewBitWidth(32); 2414 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) + 2415 getCastInstrCost(Instruction::Trunc, Dst, TruncDst, 2416 TTI::CastContextHint::None, CostKind); 2417 } 2418 2419 return AdjustCost( 2420 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2421 } 2422 2423 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 2424 Type *CondTy, 2425 CmpInst::Predicate VecPred, 2426 TTI::TargetCostKind CostKind, 2427 const Instruction *I) { 2428 // TODO: Handle other cost kinds. 2429 if (CostKind != TTI::TCK_RecipThroughput) 2430 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2431 I); 2432 2433 // Legalize the type. 2434 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2435 2436 MVT MTy = LT.second; 2437 2438 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2439 assert(ISD && "Invalid opcode"); 2440 2441 unsigned ExtraCost = 0; 2442 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) { 2443 // Some vector comparison predicates cost extra instructions. 2444 // TODO: Should we invert this and assume worst case cmp costs 2445 // and reduce for particular predicates? 2446 if (MTy.isVector() && 2447 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || 2448 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || 2449 ST->hasBWI())) { 2450 // Fallback to I if a specific predicate wasn't specified. 2451 CmpInst::Predicate Pred = VecPred; 2452 if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE || 2453 Pred == CmpInst::BAD_FCMP_PREDICATE)) 2454 Pred = cast<CmpInst>(I)->getPredicate(); 2455 2456 switch (Pred) { 2457 case CmpInst::Predicate::ICMP_NE: 2458 // xor(cmpeq(x,y),-1) 2459 ExtraCost = 1; 2460 break; 2461 case CmpInst::Predicate::ICMP_SGE: 2462 case CmpInst::Predicate::ICMP_SLE: 2463 // xor(cmpgt(x,y),-1) 2464 ExtraCost = 1; 2465 break; 2466 case CmpInst::Predicate::ICMP_ULT: 2467 case CmpInst::Predicate::ICMP_UGT: 2468 // cmpgt(xor(x,signbit),xor(y,signbit)) 2469 // xor(cmpeq(pmaxu(x,y),x),-1) 2470 ExtraCost = 2; 2471 break; 2472 case CmpInst::Predicate::ICMP_ULE: 2473 case CmpInst::Predicate::ICMP_UGE: 2474 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || 2475 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { 2476 // cmpeq(psubus(x,y),0) 2477 // cmpeq(pminu(x,y),x) 2478 ExtraCost = 1; 2479 } else { 2480 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) 2481 ExtraCost = 3; 2482 } 2483 break; 2484 case CmpInst::Predicate::BAD_ICMP_PREDICATE: 2485 case CmpInst::Predicate::BAD_FCMP_PREDICATE: 2486 // Assume worst case scenario and add the maximum extra cost. 2487 ExtraCost = 3; 2488 break; 2489 default: 2490 break; 2491 } 2492 } 2493 } 2494 2495 static const CostTblEntry SLMCostTbl[] = { 2496 // slm pcmpeq/pcmpgt throughput is 2 2497 { ISD::SETCC, MVT::v2i64, 2 }, 2498 }; 2499 2500 static const CostTblEntry AVX512BWCostTbl[] = { 2501 { ISD::SETCC, MVT::v32i16, 1 }, 2502 { ISD::SETCC, MVT::v64i8, 1 }, 2503 2504 { ISD::SELECT, MVT::v32i16, 1 }, 2505 { ISD::SELECT, MVT::v64i8, 1 }, 2506 }; 2507 2508 static const CostTblEntry AVX512CostTbl[] = { 2509 { ISD::SETCC, MVT::v8i64, 1 }, 2510 { ISD::SETCC, MVT::v16i32, 1 }, 2511 { ISD::SETCC, MVT::v8f64, 1 }, 2512 { ISD::SETCC, MVT::v16f32, 1 }, 2513 2514 { ISD::SELECT, MVT::v8i64, 1 }, 2515 { ISD::SELECT, MVT::v16i32, 1 }, 2516 { ISD::SELECT, MVT::v8f64, 1 }, 2517 { ISD::SELECT, MVT::v16f32, 1 }, 2518 2519 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 2520 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 2521 2522 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3 2523 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3 2524 }; 2525 2526 static const CostTblEntry AVX2CostTbl[] = { 2527 { ISD::SETCC, MVT::v4i64, 1 }, 2528 { ISD::SETCC, MVT::v8i32, 1 }, 2529 { ISD::SETCC, MVT::v16i16, 1 }, 2530 { ISD::SETCC, MVT::v32i8, 1 }, 2531 2532 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb 2533 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb 2534 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb 2535 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb 2536 }; 2537 2538 static const CostTblEntry AVX1CostTbl[] = { 2539 { ISD::SETCC, MVT::v4f64, 1 }, 2540 { ISD::SETCC, MVT::v8f32, 1 }, 2541 // AVX1 does not support 8-wide integer compare. 2542 { ISD::SETCC, MVT::v4i64, 4 }, 2543 { ISD::SETCC, MVT::v8i32, 4 }, 2544 { ISD::SETCC, MVT::v16i16, 4 }, 2545 { ISD::SETCC, MVT::v32i8, 4 }, 2546 2547 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd 2548 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps 2549 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd 2550 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps 2551 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps 2552 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps 2553 }; 2554 2555 static const CostTblEntry SSE42CostTbl[] = { 2556 { ISD::SETCC, MVT::v2f64, 1 }, 2557 { ISD::SETCC, MVT::v4f32, 1 }, 2558 { ISD::SETCC, MVT::v2i64, 1 }, 2559 }; 2560 2561 static const CostTblEntry SSE41CostTbl[] = { 2562 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd 2563 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps 2564 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb 2565 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb 2566 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb 2567 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb 2568 }; 2569 2570 static const CostTblEntry SSE2CostTbl[] = { 2571 { ISD::SETCC, MVT::v2f64, 2 }, 2572 { ISD::SETCC, MVT::f64, 1 }, 2573 { ISD::SETCC, MVT::v2i64, 8 }, 2574 { ISD::SETCC, MVT::v4i32, 1 }, 2575 { ISD::SETCC, MVT::v8i16, 1 }, 2576 { ISD::SETCC, MVT::v16i8, 1 }, 2577 2578 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd 2579 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por 2580 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por 2581 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por 2582 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por 2583 }; 2584 2585 static const CostTblEntry SSE1CostTbl[] = { 2586 { ISD::SETCC, MVT::v4f32, 2 }, 2587 { ISD::SETCC, MVT::f32, 1 }, 2588 2589 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps 2590 }; 2591 2592 if (ST->useSLMArithCosts()) 2593 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2594 return LT.first * (ExtraCost + Entry->Cost); 2595 2596 if (ST->hasBWI()) 2597 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2598 return LT.first * (ExtraCost + Entry->Cost); 2599 2600 if (ST->hasAVX512()) 2601 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2602 return LT.first * (ExtraCost + Entry->Cost); 2603 2604 if (ST->hasAVX2()) 2605 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2606 return LT.first * (ExtraCost + Entry->Cost); 2607 2608 if (ST->hasAVX()) 2609 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2610 return LT.first * (ExtraCost + Entry->Cost); 2611 2612 if (ST->hasSSE42()) 2613 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2614 return LT.first * (ExtraCost + Entry->Cost); 2615 2616 if (ST->hasSSE41()) 2617 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2618 return LT.first * (ExtraCost + Entry->Cost); 2619 2620 if (ST->hasSSE2()) 2621 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2622 return LT.first * (ExtraCost + Entry->Cost); 2623 2624 if (ST->hasSSE1()) 2625 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2626 return LT.first * (ExtraCost + Entry->Cost); 2627 2628 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 2629 } 2630 2631 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 2632 2633 InstructionCost 2634 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2635 TTI::TargetCostKind CostKind) { 2636 2637 // Costs should match the codegen from: 2638 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 2639 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 2640 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 2641 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 2642 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 2643 2644 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not 2645 // specialized in these tables yet. 2646 static const CostTblEntry AVX512BITALGCostTbl[] = { 2647 { ISD::CTPOP, MVT::v32i16, 1 }, 2648 { ISD::CTPOP, MVT::v64i8, 1 }, 2649 { ISD::CTPOP, MVT::v16i16, 1 }, 2650 { ISD::CTPOP, MVT::v32i8, 1 }, 2651 { ISD::CTPOP, MVT::v8i16, 1 }, 2652 { ISD::CTPOP, MVT::v16i8, 1 }, 2653 }; 2654 static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = { 2655 { ISD::CTPOP, MVT::v8i64, 1 }, 2656 { ISD::CTPOP, MVT::v16i32, 1 }, 2657 { ISD::CTPOP, MVT::v4i64, 1 }, 2658 { ISD::CTPOP, MVT::v8i32, 1 }, 2659 { ISD::CTPOP, MVT::v2i64, 1 }, 2660 { ISD::CTPOP, MVT::v4i32, 1 }, 2661 }; 2662 static const CostTblEntry AVX512CDCostTbl[] = { 2663 { ISD::CTLZ, MVT::v8i64, 1 }, 2664 { ISD::CTLZ, MVT::v16i32, 1 }, 2665 { ISD::CTLZ, MVT::v32i16, 8 }, 2666 { ISD::CTLZ, MVT::v64i8, 20 }, 2667 { ISD::CTLZ, MVT::v4i64, 1 }, 2668 { ISD::CTLZ, MVT::v8i32, 1 }, 2669 { ISD::CTLZ, MVT::v16i16, 4 }, 2670 { ISD::CTLZ, MVT::v32i8, 10 }, 2671 { ISD::CTLZ, MVT::v2i64, 1 }, 2672 { ISD::CTLZ, MVT::v4i32, 1 }, 2673 { ISD::CTLZ, MVT::v8i16, 4 }, 2674 { ISD::CTLZ, MVT::v16i8, 4 }, 2675 }; 2676 static const CostTblEntry AVX512BWCostTbl[] = { 2677 { ISD::ABS, MVT::v32i16, 1 }, 2678 { ISD::ABS, MVT::v64i8, 1 }, 2679 { ISD::BITREVERSE, MVT::v8i64, 3 }, 2680 { ISD::BITREVERSE, MVT::v16i32, 3 }, 2681 { ISD::BITREVERSE, MVT::v32i16, 3 }, 2682 { ISD::BITREVERSE, MVT::v64i8, 2 }, 2683 { ISD::BSWAP, MVT::v8i64, 1 }, 2684 { ISD::BSWAP, MVT::v16i32, 1 }, 2685 { ISD::BSWAP, MVT::v32i16, 1 }, 2686 { ISD::CTLZ, MVT::v8i64, 23 }, 2687 { ISD::CTLZ, MVT::v16i32, 22 }, 2688 { ISD::CTLZ, MVT::v32i16, 18 }, 2689 { ISD::CTLZ, MVT::v64i8, 17 }, 2690 { ISD::CTPOP, MVT::v8i64, 7 }, 2691 { ISD::CTPOP, MVT::v16i32, 11 }, 2692 { ISD::CTPOP, MVT::v32i16, 9 }, 2693 { ISD::CTPOP, MVT::v64i8, 6 }, 2694 { ISD::CTTZ, MVT::v8i64, 10 }, 2695 { ISD::CTTZ, MVT::v16i32, 14 }, 2696 { ISD::CTTZ, MVT::v32i16, 12 }, 2697 { ISD::CTTZ, MVT::v64i8, 9 }, 2698 { ISD::SADDSAT, MVT::v32i16, 1 }, 2699 { ISD::SADDSAT, MVT::v64i8, 1 }, 2700 { ISD::SMAX, MVT::v32i16, 1 }, 2701 { ISD::SMAX, MVT::v64i8, 1 }, 2702 { ISD::SMIN, MVT::v32i16, 1 }, 2703 { ISD::SMIN, MVT::v64i8, 1 }, 2704 { ISD::SSUBSAT, MVT::v32i16, 1 }, 2705 { ISD::SSUBSAT, MVT::v64i8, 1 }, 2706 { ISD::UADDSAT, MVT::v32i16, 1 }, 2707 { ISD::UADDSAT, MVT::v64i8, 1 }, 2708 { ISD::UMAX, MVT::v32i16, 1 }, 2709 { ISD::UMAX, MVT::v64i8, 1 }, 2710 { ISD::UMIN, MVT::v32i16, 1 }, 2711 { ISD::UMIN, MVT::v64i8, 1 }, 2712 { ISD::USUBSAT, MVT::v32i16, 1 }, 2713 { ISD::USUBSAT, MVT::v64i8, 1 }, 2714 }; 2715 static const CostTblEntry AVX512CostTbl[] = { 2716 { ISD::ABS, MVT::v8i64, 1 }, 2717 { ISD::ABS, MVT::v16i32, 1 }, 2718 { ISD::ABS, MVT::v32i16, 2 }, 2719 { ISD::ABS, MVT::v64i8, 2 }, 2720 { ISD::ABS, MVT::v4i64, 1 }, 2721 { ISD::ABS, MVT::v2i64, 1 }, 2722 { ISD::BITREVERSE, MVT::v8i64, 36 }, 2723 { ISD::BITREVERSE, MVT::v16i32, 24 }, 2724 { ISD::BITREVERSE, MVT::v32i16, 10 }, 2725 { ISD::BITREVERSE, MVT::v64i8, 10 }, 2726 { ISD::BSWAP, MVT::v8i64, 4 }, 2727 { ISD::BSWAP, MVT::v16i32, 4 }, 2728 { ISD::BSWAP, MVT::v32i16, 4 }, 2729 { ISD::CTLZ, MVT::v8i64, 29 }, 2730 { ISD::CTLZ, MVT::v16i32, 35 }, 2731 { ISD::CTLZ, MVT::v32i16, 28 }, 2732 { ISD::CTLZ, MVT::v64i8, 18 }, 2733 { ISD::CTPOP, MVT::v8i64, 16 }, 2734 { ISD::CTPOP, MVT::v16i32, 24 }, 2735 { ISD::CTPOP, MVT::v32i16, 18 }, 2736 { ISD::CTPOP, MVT::v64i8, 12 }, 2737 { ISD::CTTZ, MVT::v8i64, 20 }, 2738 { ISD::CTTZ, MVT::v16i32, 28 }, 2739 { ISD::CTTZ, MVT::v32i16, 24 }, 2740 { ISD::CTTZ, MVT::v64i8, 18 }, 2741 { ISD::SMAX, MVT::v8i64, 1 }, 2742 { ISD::SMAX, MVT::v16i32, 1 }, 2743 { ISD::SMAX, MVT::v32i16, 2 }, 2744 { ISD::SMAX, MVT::v64i8, 2 }, 2745 { ISD::SMAX, MVT::v4i64, 1 }, 2746 { ISD::SMAX, MVT::v2i64, 1 }, 2747 { ISD::SMIN, MVT::v8i64, 1 }, 2748 { ISD::SMIN, MVT::v16i32, 1 }, 2749 { ISD::SMIN, MVT::v32i16, 2 }, 2750 { ISD::SMIN, MVT::v64i8, 2 }, 2751 { ISD::SMIN, MVT::v4i64, 1 }, 2752 { ISD::SMIN, MVT::v2i64, 1 }, 2753 { ISD::UMAX, MVT::v8i64, 1 }, 2754 { ISD::UMAX, MVT::v16i32, 1 }, 2755 { ISD::UMAX, MVT::v32i16, 2 }, 2756 { ISD::UMAX, MVT::v64i8, 2 }, 2757 { ISD::UMAX, MVT::v4i64, 1 }, 2758 { ISD::UMAX, MVT::v2i64, 1 }, 2759 { ISD::UMIN, MVT::v8i64, 1 }, 2760 { ISD::UMIN, MVT::v16i32, 1 }, 2761 { ISD::UMIN, MVT::v32i16, 2 }, 2762 { ISD::UMIN, MVT::v64i8, 2 }, 2763 { ISD::UMIN, MVT::v4i64, 1 }, 2764 { ISD::UMIN, MVT::v2i64, 1 }, 2765 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd 2766 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq 2767 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq 2768 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq 2769 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd 2770 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq 2771 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq 2772 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq 2773 { ISD::SADDSAT, MVT::v32i16, 2 }, 2774 { ISD::SADDSAT, MVT::v64i8, 2 }, 2775 { ISD::SSUBSAT, MVT::v32i16, 2 }, 2776 { ISD::SSUBSAT, MVT::v64i8, 2 }, 2777 { ISD::UADDSAT, MVT::v32i16, 2 }, 2778 { ISD::UADDSAT, MVT::v64i8, 2 }, 2779 { ISD::USUBSAT, MVT::v32i16, 2 }, 2780 { ISD::USUBSAT, MVT::v64i8, 2 }, 2781 { ISD::FMAXNUM, MVT::f32, 2 }, 2782 { ISD::FMAXNUM, MVT::v4f32, 2 }, 2783 { ISD::FMAXNUM, MVT::v8f32, 2 }, 2784 { ISD::FMAXNUM, MVT::v16f32, 2 }, 2785 { ISD::FMAXNUM, MVT::f64, 2 }, 2786 { ISD::FMAXNUM, MVT::v2f64, 2 }, 2787 { ISD::FMAXNUM, MVT::v4f64, 2 }, 2788 { ISD::FMAXNUM, MVT::v8f64, 2 }, 2789 }; 2790 static const CostTblEntry XOPCostTbl[] = { 2791 { ISD::BITREVERSE, MVT::v4i64, 4 }, 2792 { ISD::BITREVERSE, MVT::v8i32, 4 }, 2793 { ISD::BITREVERSE, MVT::v16i16, 4 }, 2794 { ISD::BITREVERSE, MVT::v32i8, 4 }, 2795 { ISD::BITREVERSE, MVT::v2i64, 1 }, 2796 { ISD::BITREVERSE, MVT::v4i32, 1 }, 2797 { ISD::BITREVERSE, MVT::v8i16, 1 }, 2798 { ISD::BITREVERSE, MVT::v16i8, 1 }, 2799 { ISD::BITREVERSE, MVT::i64, 3 }, 2800 { ISD::BITREVERSE, MVT::i32, 3 }, 2801 { ISD::BITREVERSE, MVT::i16, 3 }, 2802 { ISD::BITREVERSE, MVT::i8, 3 } 2803 }; 2804 static const CostTblEntry AVX2CostTbl[] = { 2805 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2806 { ISD::ABS, MVT::v8i32, 1 }, 2807 { ISD::ABS, MVT::v16i16, 1 }, 2808 { ISD::ABS, MVT::v32i8, 1 }, 2809 { ISD::BITREVERSE, MVT::v2i64, 3 }, 2810 { ISD::BITREVERSE, MVT::v4i64, 3 }, 2811 { ISD::BITREVERSE, MVT::v4i32, 3 }, 2812 { ISD::BITREVERSE, MVT::v8i32, 3 }, 2813 { ISD::BITREVERSE, MVT::v8i16, 3 }, 2814 { ISD::BITREVERSE, MVT::v16i16, 3 }, 2815 { ISD::BITREVERSE, MVT::v16i8, 3 }, 2816 { ISD::BITREVERSE, MVT::v32i8, 3 }, 2817 { ISD::BSWAP, MVT::v4i64, 1 }, 2818 { ISD::BSWAP, MVT::v8i32, 1 }, 2819 { ISD::BSWAP, MVT::v16i16, 1 }, 2820 { ISD::CTLZ, MVT::v2i64, 7 }, 2821 { ISD::CTLZ, MVT::v4i64, 7 }, 2822 { ISD::CTLZ, MVT::v4i32, 5 }, 2823 { ISD::CTLZ, MVT::v8i32, 5 }, 2824 { ISD::CTLZ, MVT::v8i16, 4 }, 2825 { ISD::CTLZ, MVT::v16i16, 4 }, 2826 { ISD::CTLZ, MVT::v16i8, 3 }, 2827 { ISD::CTLZ, MVT::v32i8, 3 }, 2828 { ISD::CTPOP, MVT::v2i64, 3 }, 2829 { ISD::CTPOP, MVT::v4i64, 3 }, 2830 { ISD::CTPOP, MVT::v4i32, 7 }, 2831 { ISD::CTPOP, MVT::v8i32, 7 }, 2832 { ISD::CTPOP, MVT::v8i16, 3 }, 2833 { ISD::CTPOP, MVT::v16i16, 3 }, 2834 { ISD::CTPOP, MVT::v16i8, 2 }, 2835 { ISD::CTPOP, MVT::v32i8, 2 }, 2836 { ISD::CTTZ, MVT::v2i64, 4 }, 2837 { ISD::CTTZ, MVT::v4i64, 4 }, 2838 { ISD::CTTZ, MVT::v4i32, 7 }, 2839 { ISD::CTTZ, MVT::v8i32, 7 }, 2840 { ISD::CTTZ, MVT::v8i16, 4 }, 2841 { ISD::CTTZ, MVT::v16i16, 4 }, 2842 { ISD::CTTZ, MVT::v16i8, 3 }, 2843 { ISD::CTTZ, MVT::v32i8, 3 }, 2844 { ISD::SADDSAT, MVT::v16i16, 1 }, 2845 { ISD::SADDSAT, MVT::v32i8, 1 }, 2846 { ISD::SMAX, MVT::v8i32, 1 }, 2847 { ISD::SMAX, MVT::v16i16, 1 }, 2848 { ISD::SMAX, MVT::v32i8, 1 }, 2849 { ISD::SMIN, MVT::v8i32, 1 }, 2850 { ISD::SMIN, MVT::v16i16, 1 }, 2851 { ISD::SMIN, MVT::v32i8, 1 }, 2852 { ISD::SSUBSAT, MVT::v16i16, 1 }, 2853 { ISD::SSUBSAT, MVT::v32i8, 1 }, 2854 { ISD::UADDSAT, MVT::v16i16, 1 }, 2855 { ISD::UADDSAT, MVT::v32i8, 1 }, 2856 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd 2857 { ISD::UMAX, MVT::v8i32, 1 }, 2858 { ISD::UMAX, MVT::v16i16, 1 }, 2859 { ISD::UMAX, MVT::v32i8, 1 }, 2860 { ISD::UMIN, MVT::v8i32, 1 }, 2861 { ISD::UMIN, MVT::v16i16, 1 }, 2862 { ISD::UMIN, MVT::v32i8, 1 }, 2863 { ISD::USUBSAT, MVT::v16i16, 1 }, 2864 { ISD::USUBSAT, MVT::v32i8, 1 }, 2865 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd 2866 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2867 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2868 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 2869 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 2870 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 2871 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 2872 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 2873 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 2874 }; 2875 static const CostTblEntry AVX1CostTbl[] = { 2876 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2877 { ISD::ABS, MVT::v8i32, 3 }, 2878 { ISD::ABS, MVT::v16i16, 3 }, 2879 { ISD::ABS, MVT::v32i8, 3 }, 2880 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 2881 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 2882 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 2883 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 2884 { ISD::BSWAP, MVT::v4i64, 4 }, 2885 { ISD::BSWAP, MVT::v8i32, 4 }, 2886 { ISD::BSWAP, MVT::v16i16, 4 }, 2887 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 2888 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 2889 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 2890 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2891 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 2892 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 2893 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 2894 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 2895 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 2896 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 2897 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 2898 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2899 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2900 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2901 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2902 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2903 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2904 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2905 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2906 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2907 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2908 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2909 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2910 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2911 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert 2912 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2913 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2914 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2915 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2916 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2917 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2918 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2919 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2920 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert 2921 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS 2922 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2923 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ? 2924 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD 2925 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2926 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ? 2927 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 2928 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 2929 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 2930 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 2931 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 2932 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 2933 }; 2934 static const CostTblEntry GLMCostTbl[] = { 2935 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 2936 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 2937 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 2938 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 2939 }; 2940 static const CostTblEntry SLMCostTbl[] = { 2941 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 2942 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 2943 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 2944 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 2945 }; 2946 static const CostTblEntry SSE42CostTbl[] = { 2947 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd 2948 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd 2949 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 2950 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 2951 }; 2952 static const CostTblEntry SSE41CostTbl[] = { 2953 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X) 2954 { ISD::SMAX, MVT::v4i32, 1 }, 2955 { ISD::SMAX, MVT::v16i8, 1 }, 2956 { ISD::SMIN, MVT::v4i32, 1 }, 2957 { ISD::SMIN, MVT::v16i8, 1 }, 2958 { ISD::UMAX, MVT::v4i32, 1 }, 2959 { ISD::UMAX, MVT::v8i16, 1 }, 2960 { ISD::UMIN, MVT::v4i32, 1 }, 2961 { ISD::UMIN, MVT::v8i16, 1 }, 2962 }; 2963 static const CostTblEntry SSSE3CostTbl[] = { 2964 { ISD::ABS, MVT::v4i32, 1 }, 2965 { ISD::ABS, MVT::v8i16, 1 }, 2966 { ISD::ABS, MVT::v16i8, 1 }, 2967 { ISD::BITREVERSE, MVT::v2i64, 5 }, 2968 { ISD::BITREVERSE, MVT::v4i32, 5 }, 2969 { ISD::BITREVERSE, MVT::v8i16, 5 }, 2970 { ISD::BITREVERSE, MVT::v16i8, 5 }, 2971 { ISD::BSWAP, MVT::v2i64, 1 }, 2972 { ISD::BSWAP, MVT::v4i32, 1 }, 2973 { ISD::BSWAP, MVT::v8i16, 1 }, 2974 { ISD::CTLZ, MVT::v2i64, 23 }, 2975 { ISD::CTLZ, MVT::v4i32, 18 }, 2976 { ISD::CTLZ, MVT::v8i16, 14 }, 2977 { ISD::CTLZ, MVT::v16i8, 9 }, 2978 { ISD::CTPOP, MVT::v2i64, 7 }, 2979 { ISD::CTPOP, MVT::v4i32, 11 }, 2980 { ISD::CTPOP, MVT::v8i16, 9 }, 2981 { ISD::CTPOP, MVT::v16i8, 6 }, 2982 { ISD::CTTZ, MVT::v2i64, 10 }, 2983 { ISD::CTTZ, MVT::v4i32, 14 }, 2984 { ISD::CTTZ, MVT::v8i16, 12 }, 2985 { ISD::CTTZ, MVT::v16i8, 9 } 2986 }; 2987 static const CostTblEntry SSE2CostTbl[] = { 2988 { ISD::ABS, MVT::v2i64, 4 }, 2989 { ISD::ABS, MVT::v4i32, 3 }, 2990 { ISD::ABS, MVT::v8i16, 2 }, 2991 { ISD::ABS, MVT::v16i8, 2 }, 2992 { ISD::BITREVERSE, MVT::v2i64, 29 }, 2993 { ISD::BITREVERSE, MVT::v4i32, 27 }, 2994 { ISD::BITREVERSE, MVT::v8i16, 27 }, 2995 { ISD::BITREVERSE, MVT::v16i8, 20 }, 2996 { ISD::BSWAP, MVT::v2i64, 7 }, 2997 { ISD::BSWAP, MVT::v4i32, 7 }, 2998 { ISD::BSWAP, MVT::v8i16, 7 }, 2999 { ISD::CTLZ, MVT::v2i64, 25 }, 3000 { ISD::CTLZ, MVT::v4i32, 26 }, 3001 { ISD::CTLZ, MVT::v8i16, 20 }, 3002 { ISD::CTLZ, MVT::v16i8, 17 }, 3003 { ISD::CTPOP, MVT::v2i64, 12 }, 3004 { ISD::CTPOP, MVT::v4i32, 15 }, 3005 { ISD::CTPOP, MVT::v8i16, 13 }, 3006 { ISD::CTPOP, MVT::v16i8, 10 }, 3007 { ISD::CTTZ, MVT::v2i64, 14 }, 3008 { ISD::CTTZ, MVT::v4i32, 18 }, 3009 { ISD::CTTZ, MVT::v8i16, 16 }, 3010 { ISD::CTTZ, MVT::v16i8, 13 }, 3011 { ISD::SADDSAT, MVT::v8i16, 1 }, 3012 { ISD::SADDSAT, MVT::v16i8, 1 }, 3013 { ISD::SMAX, MVT::v8i16, 1 }, 3014 { ISD::SMIN, MVT::v8i16, 1 }, 3015 { ISD::SSUBSAT, MVT::v8i16, 1 }, 3016 { ISD::SSUBSAT, MVT::v16i8, 1 }, 3017 { ISD::UADDSAT, MVT::v8i16, 1 }, 3018 { ISD::UADDSAT, MVT::v16i8, 1 }, 3019 { ISD::UMAX, MVT::v8i16, 2 }, 3020 { ISD::UMAX, MVT::v16i8, 1 }, 3021 { ISD::UMIN, MVT::v8i16, 2 }, 3022 { ISD::UMIN, MVT::v16i8, 1 }, 3023 { ISD::USUBSAT, MVT::v8i16, 1 }, 3024 { ISD::USUBSAT, MVT::v16i8, 1 }, 3025 { ISD::FMAXNUM, MVT::f64, 4 }, 3026 { ISD::FMAXNUM, MVT::v2f64, 4 }, 3027 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 3028 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 3029 }; 3030 static const CostTblEntry SSE1CostTbl[] = { 3031 { ISD::FMAXNUM, MVT::f32, 4 }, 3032 { ISD::FMAXNUM, MVT::v4f32, 4 }, 3033 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 3034 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 3035 }; 3036 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets 3037 { ISD::CTTZ, MVT::i64, 1 }, 3038 }; 3039 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets 3040 { ISD::CTTZ, MVT::i32, 1 }, 3041 { ISD::CTTZ, MVT::i16, 1 }, 3042 { ISD::CTTZ, MVT::i8, 1 }, 3043 }; 3044 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets 3045 { ISD::CTLZ, MVT::i64, 1 }, 3046 }; 3047 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets 3048 { ISD::CTLZ, MVT::i32, 1 }, 3049 { ISD::CTLZ, MVT::i16, 1 }, 3050 { ISD::CTLZ, MVT::i8, 1 }, 3051 }; 3052 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets 3053 { ISD::CTPOP, MVT::i64, 1 }, 3054 }; 3055 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets 3056 { ISD::CTPOP, MVT::i32, 1 }, 3057 { ISD::CTPOP, MVT::i16, 1 }, 3058 { ISD::CTPOP, MVT::i8, 1 }, 3059 }; 3060 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3061 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV 3062 { ISD::BITREVERSE, MVT::i64, 14 }, 3063 { ISD::BSWAP, MVT::i64, 1 }, 3064 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV 3065 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH 3066 { ISD::CTPOP, MVT::i64, 10 }, 3067 { ISD::SADDO, MVT::i64, 1 }, 3068 { ISD::UADDO, MVT::i64, 1 }, 3069 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto 3070 }; 3071 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3072 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV 3073 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV 3074 { ISD::BITREVERSE, MVT::i32, 14 }, 3075 { ISD::BITREVERSE, MVT::i16, 14 }, 3076 { ISD::BITREVERSE, MVT::i8, 11 }, 3077 { ISD::BSWAP, MVT::i32, 1 }, 3078 { ISD::BSWAP, MVT::i16, 1 }, // ROL 3079 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV 3080 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV 3081 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV 3082 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH 3083 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH 3084 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH 3085 { ISD::CTPOP, MVT::i32, 8 }, 3086 { ISD::CTPOP, MVT::i16, 9 }, 3087 { ISD::CTPOP, MVT::i8, 7 }, 3088 { ISD::SADDO, MVT::i32, 1 }, 3089 { ISD::SADDO, MVT::i16, 1 }, 3090 { ISD::SADDO, MVT::i8, 1 }, 3091 { ISD::UADDO, MVT::i32, 1 }, 3092 { ISD::UADDO, MVT::i16, 1 }, 3093 { ISD::UADDO, MVT::i8, 1 }, 3094 { ISD::UMULO, MVT::i32, 2 }, // mul + seto 3095 { ISD::UMULO, MVT::i16, 2 }, 3096 { ISD::UMULO, MVT::i8, 2 }, 3097 }; 3098 3099 Type *RetTy = ICA.getReturnType(); 3100 Type *OpTy = RetTy; 3101 Intrinsic::ID IID = ICA.getID(); 3102 unsigned ISD = ISD::DELETED_NODE; 3103 switch (IID) { 3104 default: 3105 break; 3106 case Intrinsic::abs: 3107 ISD = ISD::ABS; 3108 break; 3109 case Intrinsic::bitreverse: 3110 ISD = ISD::BITREVERSE; 3111 break; 3112 case Intrinsic::bswap: 3113 ISD = ISD::BSWAP; 3114 break; 3115 case Intrinsic::ctlz: 3116 ISD = ISD::CTLZ; 3117 break; 3118 case Intrinsic::ctpop: 3119 ISD = ISD::CTPOP; 3120 break; 3121 case Intrinsic::cttz: 3122 ISD = ISD::CTTZ; 3123 break; 3124 case Intrinsic::maxnum: 3125 case Intrinsic::minnum: 3126 // FMINNUM has same costs so don't duplicate. 3127 ISD = ISD::FMAXNUM; 3128 break; 3129 case Intrinsic::sadd_sat: 3130 ISD = ISD::SADDSAT; 3131 break; 3132 case Intrinsic::smax: 3133 ISD = ISD::SMAX; 3134 break; 3135 case Intrinsic::smin: 3136 ISD = ISD::SMIN; 3137 break; 3138 case Intrinsic::ssub_sat: 3139 ISD = ISD::SSUBSAT; 3140 break; 3141 case Intrinsic::uadd_sat: 3142 ISD = ISD::UADDSAT; 3143 break; 3144 case Intrinsic::umax: 3145 ISD = ISD::UMAX; 3146 break; 3147 case Intrinsic::umin: 3148 ISD = ISD::UMIN; 3149 break; 3150 case Intrinsic::usub_sat: 3151 ISD = ISD::USUBSAT; 3152 break; 3153 case Intrinsic::sqrt: 3154 ISD = ISD::FSQRT; 3155 break; 3156 case Intrinsic::sadd_with_overflow: 3157 case Intrinsic::ssub_with_overflow: 3158 // SSUBO has same costs so don't duplicate. 3159 ISD = ISD::SADDO; 3160 OpTy = RetTy->getContainedType(0); 3161 break; 3162 case Intrinsic::uadd_with_overflow: 3163 case Intrinsic::usub_with_overflow: 3164 // USUBO has same costs so don't duplicate. 3165 ISD = ISD::UADDO; 3166 OpTy = RetTy->getContainedType(0); 3167 break; 3168 case Intrinsic::umul_with_overflow: 3169 case Intrinsic::smul_with_overflow: 3170 // SMULO has same costs so don't duplicate. 3171 ISD = ISD::UMULO; 3172 OpTy = RetTy->getContainedType(0); 3173 break; 3174 } 3175 3176 if (ISD != ISD::DELETED_NODE) { 3177 // Legalize the type. 3178 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); 3179 MVT MTy = LT.second; 3180 3181 // Attempt to lookup cost. 3182 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() && 3183 MTy.isVector()) { 3184 // With PSHUFB the code is very similar for all types. If we have integer 3185 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types 3186 // we also need a PSHUFB. 3187 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2; 3188 3189 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB 3190 // instructions. We also need an extract and an insert. 3191 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) || 3192 (ST->hasBWI() && MTy.is512BitVector()))) 3193 Cost = Cost * 2 + 2; 3194 3195 return LT.first * Cost; 3196 } 3197 3198 auto adjustTableCost = [](const CostTblEntry &Entry, 3199 InstructionCost LegalizationCost, 3200 FastMathFlags FMF) { 3201 // If there are no NANs to deal with, then these are reduced to a 3202 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we 3203 // assume is used in the non-fast case. 3204 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) { 3205 if (FMF.noNaNs()) 3206 return LegalizationCost * 1; 3207 } 3208 return LegalizationCost * (int)Entry.Cost; 3209 }; 3210 3211 if (ST->useGLMDivSqrtCosts()) 3212 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 3213 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3214 3215 if (ST->useSLMArithCosts()) 3216 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 3217 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3218 3219 if (ST->hasBITALG()) 3220 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy)) 3221 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3222 3223 if (ST->hasVPOPCNTDQ()) 3224 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy)) 3225 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3226 3227 if (ST->hasCDI()) 3228 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 3229 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3230 3231 if (ST->hasBWI()) 3232 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3233 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3234 3235 if (ST->hasAVX512()) 3236 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3237 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3238 3239 if (ST->hasXOP()) 3240 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3241 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3242 3243 if (ST->hasAVX2()) 3244 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 3245 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3246 3247 if (ST->hasAVX()) 3248 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 3249 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3250 3251 if (ST->hasSSE42()) 3252 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 3253 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3254 3255 if (ST->hasSSE41()) 3256 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 3257 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3258 3259 if (ST->hasSSSE3()) 3260 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 3261 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3262 3263 if (ST->hasSSE2()) 3264 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 3265 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3266 3267 if (ST->hasSSE1()) 3268 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 3269 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3270 3271 if (ST->hasBMI()) { 3272 if (ST->is64Bit()) 3273 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) 3274 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3275 3276 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) 3277 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3278 } 3279 3280 if (ST->hasLZCNT()) { 3281 if (ST->is64Bit()) 3282 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) 3283 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3284 3285 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) 3286 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3287 } 3288 3289 if (ST->hasPOPCNT()) { 3290 if (ST->is64Bit()) 3291 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) 3292 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3293 3294 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) 3295 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3296 } 3297 3298 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) { 3299 if (const Instruction *II = ICA.getInst()) { 3300 if (II->hasOneUse() && isa<StoreInst>(II->user_back())) 3301 return TTI::TCC_Free; 3302 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) { 3303 if (LI->hasOneUse()) 3304 return TTI::TCC_Free; 3305 } 3306 } 3307 } 3308 3309 if (ST->is64Bit()) 3310 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3311 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3312 3313 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3314 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 3315 } 3316 3317 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3318 } 3319 3320 InstructionCost 3321 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 3322 TTI::TargetCostKind CostKind) { 3323 if (ICA.isTypeBasedOnly()) 3324 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 3325 3326 static const CostTblEntry AVX512CostTbl[] = { 3327 { ISD::ROTL, MVT::v8i64, 1 }, 3328 { ISD::ROTL, MVT::v4i64, 1 }, 3329 { ISD::ROTL, MVT::v2i64, 1 }, 3330 { ISD::ROTL, MVT::v16i32, 1 }, 3331 { ISD::ROTL, MVT::v8i32, 1 }, 3332 { ISD::ROTL, MVT::v4i32, 1 }, 3333 { ISD::ROTR, MVT::v8i64, 1 }, 3334 { ISD::ROTR, MVT::v4i64, 1 }, 3335 { ISD::ROTR, MVT::v2i64, 1 }, 3336 { ISD::ROTR, MVT::v16i32, 1 }, 3337 { ISD::ROTR, MVT::v8i32, 1 }, 3338 { ISD::ROTR, MVT::v4i32, 1 } 3339 }; 3340 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) 3341 static const CostTblEntry XOPCostTbl[] = { 3342 { ISD::ROTL, MVT::v4i64, 4 }, 3343 { ISD::ROTL, MVT::v8i32, 4 }, 3344 { ISD::ROTL, MVT::v16i16, 4 }, 3345 { ISD::ROTL, MVT::v32i8, 4 }, 3346 { ISD::ROTL, MVT::v2i64, 1 }, 3347 { ISD::ROTL, MVT::v4i32, 1 }, 3348 { ISD::ROTL, MVT::v8i16, 1 }, 3349 { ISD::ROTL, MVT::v16i8, 1 }, 3350 { ISD::ROTR, MVT::v4i64, 6 }, 3351 { ISD::ROTR, MVT::v8i32, 6 }, 3352 { ISD::ROTR, MVT::v16i16, 6 }, 3353 { ISD::ROTR, MVT::v32i8, 6 }, 3354 { ISD::ROTR, MVT::v2i64, 2 }, 3355 { ISD::ROTR, MVT::v4i32, 2 }, 3356 { ISD::ROTR, MVT::v8i16, 2 }, 3357 { ISD::ROTR, MVT::v16i8, 2 } 3358 }; 3359 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 3360 { ISD::ROTL, MVT::i64, 1 }, 3361 { ISD::ROTR, MVT::i64, 1 }, 3362 { ISD::FSHL, MVT::i64, 4 } 3363 }; 3364 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 3365 { ISD::ROTL, MVT::i32, 1 }, 3366 { ISD::ROTL, MVT::i16, 1 }, 3367 { ISD::ROTL, MVT::i8, 1 }, 3368 { ISD::ROTR, MVT::i32, 1 }, 3369 { ISD::ROTR, MVT::i16, 1 }, 3370 { ISD::ROTR, MVT::i8, 1 }, 3371 { ISD::FSHL, MVT::i32, 4 }, 3372 { ISD::FSHL, MVT::i16, 4 }, 3373 { ISD::FSHL, MVT::i8, 4 } 3374 }; 3375 3376 Intrinsic::ID IID = ICA.getID(); 3377 Type *RetTy = ICA.getReturnType(); 3378 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 3379 unsigned ISD = ISD::DELETED_NODE; 3380 switch (IID) { 3381 default: 3382 break; 3383 case Intrinsic::fshl: 3384 ISD = ISD::FSHL; 3385 if (Args[0] == Args[1]) 3386 ISD = ISD::ROTL; 3387 break; 3388 case Intrinsic::fshr: 3389 // FSHR has same costs so don't duplicate. 3390 ISD = ISD::FSHL; 3391 if (Args[0] == Args[1]) 3392 ISD = ISD::ROTR; 3393 break; 3394 } 3395 3396 if (ISD != ISD::DELETED_NODE) { 3397 // Legalize the type. 3398 std::pair<InstructionCost, MVT> LT = 3399 TLI->getTypeLegalizationCost(DL, RetTy); 3400 MVT MTy = LT.second; 3401 3402 // Attempt to lookup cost. 3403 if (ST->hasAVX512()) 3404 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3405 return LT.first * Entry->Cost; 3406 3407 if (ST->hasXOP()) 3408 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3409 return LT.first * Entry->Cost; 3410 3411 if (ST->is64Bit()) 3412 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3413 return LT.first * Entry->Cost; 3414 3415 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3416 return LT.first * Entry->Cost; 3417 } 3418 3419 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3420 } 3421 3422 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 3423 unsigned Index) { 3424 static const CostTblEntry SLMCostTbl[] = { 3425 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, 3426 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, 3427 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, 3428 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } 3429 }; 3430 3431 assert(Val->isVectorTy() && "This must be a vector type"); 3432 Type *ScalarType = Val->getScalarType(); 3433 int RegisterFileMoveCost = 0; 3434 3435 // Non-immediate extraction/insertion can be handled as a sequence of 3436 // aliased loads+stores via the stack. 3437 if (Index == -1U && (Opcode == Instruction::ExtractElement || 3438 Opcode == Instruction::InsertElement)) { 3439 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns: 3440 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0. 3441 3442 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling. 3443 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected"); 3444 Align VecAlign = DL.getPrefTypeAlign(Val); 3445 Align SclAlign = DL.getPrefTypeAlign(ScalarType); 3446 3447 // Extract - store vector to stack, load scalar. 3448 if (Opcode == Instruction::ExtractElement) { 3449 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3450 TTI::TargetCostKind::TCK_RecipThroughput) + 3451 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0, 3452 TTI::TargetCostKind::TCK_RecipThroughput); 3453 } 3454 // Insert - store vector to stack, store scalar, load vector. 3455 if (Opcode == Instruction::InsertElement) { 3456 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, 3457 TTI::TargetCostKind::TCK_RecipThroughput) + 3458 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0, 3459 TTI::TargetCostKind::TCK_RecipThroughput) + 3460 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, 3461 TTI::TargetCostKind::TCK_RecipThroughput); 3462 } 3463 } 3464 3465 if (Index != -1U && (Opcode == Instruction::ExtractElement || 3466 Opcode == Instruction::InsertElement)) { 3467 // Legalize the type. 3468 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 3469 3470 // This type is legalized to a scalar type. 3471 if (!LT.second.isVector()) 3472 return 0; 3473 3474 // The type may be split. Normalize the index to the new type. 3475 unsigned NumElts = LT.second.getVectorNumElements(); 3476 unsigned SubNumElts = NumElts; 3477 Index = Index % NumElts; 3478 3479 // For >128-bit vectors, we need to extract higher 128-bit subvectors. 3480 // For inserts, we also need to insert the subvector back. 3481 if (LT.second.getSizeInBits() > 128) { 3482 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"); 3483 unsigned NumSubVecs = LT.second.getSizeInBits() / 128; 3484 SubNumElts = NumElts / NumSubVecs; 3485 if (SubNumElts <= Index) { 3486 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1); 3487 Index %= SubNumElts; 3488 } 3489 } 3490 3491 if (Index == 0) { 3492 // Floating point scalars are already located in index #0. 3493 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume 3494 // true for all. 3495 if (ScalarType->isFloatingPointTy()) 3496 return RegisterFileMoveCost; 3497 3498 // Assume movd/movq XMM -> GPR is relatively cheap on all targets. 3499 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) 3500 return 1 + RegisterFileMoveCost; 3501 } 3502 3503 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3504 assert(ISD && "Unexpected vector opcode"); 3505 MVT MScalarTy = LT.second.getScalarType(); 3506 if (ST->useSLMArithCosts()) 3507 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) 3508 return Entry->Cost + RegisterFileMoveCost; 3509 3510 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. 3511 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3512 (MScalarTy.isInteger() && ST->hasSSE41())) 3513 return 1 + RegisterFileMoveCost; 3514 3515 // Assume insertps is relatively cheap on all targets. 3516 if (MScalarTy == MVT::f32 && ST->hasSSE41() && 3517 Opcode == Instruction::InsertElement) 3518 return 1 + RegisterFileMoveCost; 3519 3520 // For extractions we just need to shuffle the element to index 0, which 3521 // should be very cheap (assume cost = 1). For insertions we need to shuffle 3522 // the elements to its destination. In both cases we must handle the 3523 // subvector move(s). 3524 // If the vector type is already less than 128-bits then don't reduce it. 3525 // TODO: Under what circumstances should we shuffle using the full width? 3526 InstructionCost ShuffleCost = 1; 3527 if (Opcode == Instruction::InsertElement) { 3528 auto *SubTy = cast<VectorType>(Val); 3529 EVT VT = TLI->getValueType(DL, Val); 3530 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) 3531 SubTy = FixedVectorType::get(ScalarType, SubNumElts); 3532 ShuffleCost = 3533 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy); 3534 } 3535 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; 3536 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; 3537 } 3538 3539 // Add to the base cost if we know that the extracted element of a vector is 3540 // destined to be moved to and used in the integer register file. 3541 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 3542 RegisterFileMoveCost += 1; 3543 3544 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 3545 } 3546 3547 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty, 3548 const APInt &DemandedElts, 3549 bool Insert, 3550 bool Extract) { 3551 InstructionCost Cost = 0; 3552 3553 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much 3554 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. 3555 if (Insert) { 3556 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3557 MVT MScalarTy = LT.second.getScalarType(); 3558 3559 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3560 (MScalarTy.isInteger() && ST->hasSSE41()) || 3561 (MScalarTy == MVT::f32 && ST->hasSSE41())) { 3562 // For types we can insert directly, insertion into 128-bit sub vectors is 3563 // cheap, followed by a cheap chain of concatenations. 3564 if (LT.second.getSizeInBits() <= 128) { 3565 Cost += 3566 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); 3567 } else { 3568 // In each 128-lane, if at least one index is demanded but not all 3569 // indices are demanded and this 128-lane is not the first 128-lane of 3570 // the legalized-vector, then this 128-lane needs a extracti128; If in 3571 // each 128-lane, there is at least one demanded index, this 128-lane 3572 // needs a inserti128. 3573 3574 // The following cases will help you build a better understanding: 3575 // Assume we insert several elements into a v8i32 vector in avx2, 3576 // Case#1: inserting into 1th index needs vpinsrd + inserti128. 3577 // Case#2: inserting into 5th index needs extracti128 + vpinsrd + 3578 // inserti128. 3579 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128. 3580 const int CostValue = *LT.first.getValue(); 3581 assert(CostValue >= 0 && "Negative cost!"); 3582 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue; 3583 unsigned NumElts = LT.second.getVectorNumElements() * CostValue; 3584 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); 3585 unsigned Scale = NumElts / Num128Lanes; 3586 // We iterate each 128-lane, and check if we need a 3587 // extracti128/inserti128 for this 128-lane. 3588 for (unsigned I = 0; I < NumElts; I += Scale) { 3589 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale); 3590 APInt MaskedDE = Mask & WidenedDemandedElts; 3591 unsigned Population = MaskedDE.countPopulation(); 3592 Cost += (Population > 0 && Population != Scale && 3593 I % LT.second.getVectorNumElements() != 0); 3594 Cost += Population > 0; 3595 } 3596 Cost += DemandedElts.countPopulation(); 3597 3598 // For vXf32 cases, insertion into the 0'th index in each v4f32 3599 // 128-bit vector is free. 3600 // NOTE: This assumes legalization widens vXf32 vectors. 3601 if (MScalarTy == MVT::f32) 3602 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements(); 3603 i < e; i += 4) 3604 if (DemandedElts[i]) 3605 Cost--; 3606 } 3607 } else if (LT.second.isVector()) { 3608 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded 3609 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a 3610 // series of UNPCK followed by CONCAT_VECTORS - all of these can be 3611 // considered cheap. 3612 if (Ty->isIntOrIntVectorTy()) 3613 Cost += DemandedElts.countPopulation(); 3614 3615 // Get the smaller of the legalized or original pow2-extended number of 3616 // vector elements, which represents the number of unpacks we'll end up 3617 // performing. 3618 unsigned NumElts = LT.second.getVectorNumElements(); 3619 unsigned Pow2Elts = 3620 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements()); 3621 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; 3622 } 3623 } 3624 3625 // TODO: Use default extraction for now, but we should investigate extending this 3626 // to handle repeated subvector extraction. 3627 if (Extract) 3628 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); 3629 3630 return Cost; 3631 } 3632 3633 InstructionCost 3634 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor, 3635 int VF, const APInt &DemandedDstElts, 3636 TTI::TargetCostKind CostKind) { 3637 const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy); 3638 // We don't differentiate element types here, only element bit width. 3639 EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits); 3640 3641 auto bailout = [&]() { 3642 return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF, 3643 DemandedDstElts, CostKind); 3644 }; 3645 3646 // For now, only deal with AVX512 cases. 3647 if (!ST->hasAVX512()) 3648 return bailout(); 3649 3650 // Do we have a native shuffle for this element type, or should we promote? 3651 unsigned PromEltTyBits = EltTyBits; 3652 switch (EltTyBits) { 3653 case 32: 3654 case 64: 3655 break; // AVX512F. 3656 case 16: 3657 if (!ST->hasBWI()) 3658 PromEltTyBits = 32; // promote to i32, AVX512F. 3659 break; // AVX512BW 3660 case 8: 3661 if (!ST->hasVBMI()) 3662 PromEltTyBits = 32; // promote to i32, AVX512F. 3663 break; // AVX512VBMI 3664 default: 3665 return bailout(); 3666 } 3667 auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits); 3668 3669 auto *SrcVecTy = FixedVectorType::get(EltTy, VF); 3670 auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF); 3671 3672 int NumDstElements = VF * ReplicationFactor; 3673 auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements); 3674 auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements); 3675 3676 // Legalize the types. 3677 MVT LegalSrcVecTy = TLI->getTypeLegalizationCost(DL, SrcVecTy).second; 3678 MVT LegalPromSrcVecTy = TLI->getTypeLegalizationCost(DL, PromSrcVecTy).second; 3679 MVT LegalPromDstVecTy = TLI->getTypeLegalizationCost(DL, PromDstVecTy).second; 3680 MVT LegalDstVecTy = TLI->getTypeLegalizationCost(DL, DstVecTy).second; 3681 // They should have legalized into vector types. 3682 if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() || 3683 !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector()) 3684 return bailout(); 3685 3686 if (PromEltTyBits != EltTyBits) { 3687 // If we have to perform the shuffle with wider elt type than our data type, 3688 // then we will first need to anyext (we don't care about the new bits) 3689 // the source elements, and then truncate Dst elements. 3690 InstructionCost PromotionCost; 3691 PromotionCost += getCastInstrCost( 3692 Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy, 3693 TargetTransformInfo::CastContextHint::None, CostKind); 3694 PromotionCost += 3695 getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy, 3696 /*Src=*/PromDstVecTy, 3697 TargetTransformInfo::CastContextHint::None, CostKind); 3698 return PromotionCost + getReplicationShuffleCost(PromEltTy, 3699 ReplicationFactor, VF, 3700 DemandedDstElts, CostKind); 3701 } 3702 3703 assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && 3704 LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && 3705 "We expect that the legalization doesn't affect the element width, " 3706 "doesn't coalesce/split elements."); 3707 3708 unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements(); 3709 unsigned NumDstVectors = 3710 divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec); 3711 3712 auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec); 3713 3714 // Not all the produced Dst elements may be demanded. In our case, 3715 // given that a single Dst vector is formed by a single shuffle, 3716 // if all elements that will form a single Dst vector aren't demanded, 3717 // then we won't need to do that shuffle, so adjust the cost accordingly. 3718 APInt DemandedDstVectors = APIntOps::ScaleBitMask( 3719 DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec), 3720 NumDstVectors); 3721 unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation(); 3722 3723 InstructionCost SingleShuffleCost = 3724 getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy, 3725 /*Mask=*/None, /*Index=*/0, /*SubTp=*/nullptr); 3726 return NumDstVectorsDemanded * SingleShuffleCost; 3727 } 3728 3729 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 3730 MaybeAlign Alignment, 3731 unsigned AddressSpace, 3732 TTI::TargetCostKind CostKind, 3733 const Instruction *I) { 3734 // TODO: Handle other cost kinds. 3735 if (CostKind != TTI::TCK_RecipThroughput) { 3736 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3737 // Store instruction with index and scale costs 2 Uops. 3738 // Check the preceding GEP to identify non-const indices. 3739 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) { 3740 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 3741 return TTI::TCC_Basic * 2; 3742 } 3743 } 3744 return TTI::TCC_Basic; 3745 } 3746 3747 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 3748 "Invalid Opcode"); 3749 // Type legalization can't handle structs 3750 if (TLI->getValueType(DL, Src, true) == MVT::Other) 3751 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3752 CostKind); 3753 3754 // Legalize the type. 3755 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 3756 3757 auto *VTy = dyn_cast<FixedVectorType>(Src); 3758 3759 // Handle the simple case of non-vectors. 3760 // NOTE: this assumes that legalization never creates vector from scalars! 3761 if (!VTy || !LT.second.isVector()) 3762 // Each load/store unit costs 1. 3763 return LT.first * 1; 3764 3765 bool IsLoad = Opcode == Instruction::Load; 3766 3767 Type *EltTy = VTy->getElementType(); 3768 3769 const int EltTyBits = DL.getTypeSizeInBits(EltTy); 3770 3771 InstructionCost Cost = 0; 3772 3773 // Source of truth: how many elements were there in the original IR vector? 3774 const unsigned SrcNumElt = VTy->getNumElements(); 3775 3776 // How far have we gotten? 3777 int NumEltRemaining = SrcNumElt; 3778 // Note that we intentionally capture by-reference, NumEltRemaining changes. 3779 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; }; 3780 3781 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8); 3782 3783 // Note that even if we can store 64 bits of an XMM, we still operate on XMM. 3784 const unsigned XMMBits = 128; 3785 if (XMMBits % EltTyBits != 0) 3786 // Vector size must be a multiple of the element size. I.e. no padding. 3787 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3788 CostKind); 3789 const int NumEltPerXMM = XMMBits / EltTyBits; 3790 3791 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM); 3792 3793 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0; 3794 NumEltRemaining > 0; CurrOpSizeBytes /= 2) { 3795 // How many elements would a single op deal with at once? 3796 if ((8 * CurrOpSizeBytes) % EltTyBits != 0) 3797 // Vector size must be a multiple of the element size. I.e. no padding. 3798 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3799 CostKind); 3800 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits; 3801 3802 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?"); 3803 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || 3804 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && 3805 "Unless we haven't halved the op size yet, " 3806 "we have less than two op's sized units of work left."); 3807 3808 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM 3809 ? FixedVectorType::get(EltTy, CurrNumEltPerOp) 3810 : XMMVecTy; 3811 3812 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && 3813 "After halving sizes, the vector elt count is no longer a multiple " 3814 "of number of elements per operation?"); 3815 auto *CoalescedVecTy = 3816 CurrNumEltPerOp == 1 3817 ? CurrVecTy 3818 : FixedVectorType::get( 3819 IntegerType::get(Src->getContext(), 3820 EltTyBits * CurrNumEltPerOp), 3821 CurrVecTy->getNumElements() / CurrNumEltPerOp); 3822 assert(DL.getTypeSizeInBits(CoalescedVecTy) == 3823 DL.getTypeSizeInBits(CurrVecTy) && 3824 "coalesciing elements doesn't change vector width."); 3825 3826 while (NumEltRemaining > 0) { 3827 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"); 3828 3829 // Can we use this vector size, as per the remaining element count? 3830 // Iff the vector is naturally aligned, we can do a wide load regardless. 3831 if (NumEltRemaining < CurrNumEltPerOp && 3832 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) && 3833 CurrOpSizeBytes != 1) 3834 break; // Try smalled vector size. 3835 3836 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0; 3837 3838 // If we have fully processed the previous reg, we need to replenish it. 3839 if (SubVecEltsLeft == 0) { 3840 SubVecEltsLeft += CurrVecTy->getNumElements(); 3841 // And that's free only for the 0'th subvector of a legalized vector. 3842 if (!Is0thSubVec) 3843 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector 3844 : TTI::ShuffleKind::SK_ExtractSubvector, 3845 VTy, None, NumEltDone(), CurrVecTy); 3846 } 3847 3848 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM, 3849 // for smaller widths (32/16/8) we have to insert/extract them separately. 3850 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide, 3851 // but let's pretend that it is also true for 16/8 bit wide ops...) 3852 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) { 3853 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM; 3854 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && ""); 3855 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp; 3856 APInt DemandedElts = 3857 APInt::getBitsSet(CoalescedVecTy->getNumElements(), 3858 CoalescedVecEltIdx, CoalescedVecEltIdx + 1); 3859 assert(DemandedElts.countPopulation() == 1 && "Inserting single value"); 3860 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad, 3861 !IsLoad); 3862 } 3863 3864 // This isn't exactly right. We're using slow unaligned 32-byte accesses 3865 // as a proxy for a double-pumped AVX memory interface such as on 3866 // Sandybridge. 3867 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow()) 3868 Cost += 2; 3869 else 3870 Cost += 1; 3871 3872 SubVecEltsLeft -= CurrNumEltPerOp; 3873 NumEltRemaining -= CurrNumEltPerOp; 3874 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes); 3875 } 3876 } 3877 3878 assert(NumEltRemaining <= 0 && "Should have processed all the elements."); 3879 3880 return Cost; 3881 } 3882 3883 InstructionCost 3884 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, 3885 unsigned AddressSpace, 3886 TTI::TargetCostKind CostKind) { 3887 bool IsLoad = (Instruction::Load == Opcode); 3888 bool IsStore = (Instruction::Store == Opcode); 3889 3890 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy); 3891 if (!SrcVTy) 3892 // To calculate scalar take the regular cost, without mask 3893 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind); 3894 3895 unsigned NumElem = SrcVTy->getNumElements(); 3896 auto *MaskTy = 3897 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 3898 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) || 3899 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) { 3900 // Scalarization 3901 APInt DemandedElts = APInt::getAllOnes(NumElem); 3902 InstructionCost MaskSplitCost = 3903 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 3904 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 3905 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, 3906 CmpInst::BAD_ICMP_PREDICATE, CostKind); 3907 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 3908 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 3909 InstructionCost ValueSplitCost = 3910 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); 3911 InstructionCost MemopCost = 3912 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 3913 Alignment, AddressSpace, CostKind); 3914 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 3915 } 3916 3917 // Legalize the type. 3918 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 3919 auto VT = TLI->getValueType(DL, SrcVTy); 3920 InstructionCost Cost = 0; 3921 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 3922 LT.second.getVectorNumElements() == NumElem) 3923 // Promotion requires extend/truncate for data and a shuffle for mask. 3924 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) + 3925 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr); 3926 3927 else if (LT.first * LT.second.getVectorNumElements() > NumElem) { 3928 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), 3929 LT.second.getVectorNumElements()); 3930 // Expanding requires fill mask with zeroes 3931 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy); 3932 } 3933 3934 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. 3935 if (!ST->hasAVX512()) 3936 return Cost + LT.first * (IsLoad ? 2 : 8); 3937 3938 // AVX-512 masked load/store is cheapper 3939 return Cost + LT.first; 3940 } 3941 3942 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty, 3943 ScalarEvolution *SE, 3944 const SCEV *Ptr) { 3945 // Address computations in vectorized code with non-consecutive addresses will 3946 // likely result in more instructions compared to scalar code where the 3947 // computation can more often be merged into the index mode. The resulting 3948 // extra micro-ops can significantly decrease throughput. 3949 const unsigned NumVectorInstToHideOverhead = 10; 3950 3951 // Cost modeling of Strided Access Computation is hidden by the indexing 3952 // modes of X86 regardless of the stride value. We dont believe that there 3953 // is a difference between constant strided access in gerenal and constant 3954 // strided value which is less than or equal to 64. 3955 // Even in the case of (loop invariant) stride whose value is not known at 3956 // compile time, the address computation will not incur more than one extra 3957 // ADD instruction. 3958 if (Ty->isVectorTy() && SE) { 3959 if (!BaseT::isStridedAccess(Ptr)) 3960 return NumVectorInstToHideOverhead; 3961 if (!BaseT::getConstantStrideStep(SE, Ptr)) 3962 return 1; 3963 } 3964 3965 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 3966 } 3967 3968 InstructionCost 3969 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 3970 Optional<FastMathFlags> FMF, 3971 TTI::TargetCostKind CostKind) { 3972 if (TTI::requiresOrderedReduction(FMF)) 3973 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind); 3974 3975 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 3976 // and make it as the cost. 3977 3978 static const CostTblEntry SLMCostTblNoPairWise[] = { 3979 { ISD::FADD, MVT::v2f64, 3 }, 3980 { ISD::ADD, MVT::v2i64, 5 }, 3981 }; 3982 3983 static const CostTblEntry SSE2CostTblNoPairWise[] = { 3984 { ISD::FADD, MVT::v2f64, 2 }, 3985 { ISD::FADD, MVT::v2f32, 2 }, 3986 { ISD::FADD, MVT::v4f32, 4 }, 3987 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 3988 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 3989 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 3990 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". 3991 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". 3992 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 3993 { ISD::ADD, MVT::v2i8, 2 }, 3994 { ISD::ADD, MVT::v4i8, 2 }, 3995 { ISD::ADD, MVT::v8i8, 2 }, 3996 { ISD::ADD, MVT::v16i8, 3 }, 3997 }; 3998 3999 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4000 { ISD::FADD, MVT::v4f64, 3 }, 4001 { ISD::FADD, MVT::v4f32, 3 }, 4002 { ISD::FADD, MVT::v8f32, 4 }, 4003 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 4004 { ISD::ADD, MVT::v4i64, 3 }, 4005 { ISD::ADD, MVT::v8i32, 5 }, 4006 { ISD::ADD, MVT::v16i16, 5 }, 4007 { ISD::ADD, MVT::v32i8, 4 }, 4008 }; 4009 4010 int ISD = TLI->InstructionOpcodeToISD(Opcode); 4011 assert(ISD && "Invalid opcode"); 4012 4013 // Before legalizing the type, give a chance to look up illegal narrow types 4014 // in the table. 4015 // FIXME: Is there a better way to do this? 4016 EVT VT = TLI->getValueType(DL, ValTy); 4017 if (VT.isSimple()) { 4018 MVT MTy = VT.getSimpleVT(); 4019 if (ST->useSLMArithCosts()) 4020 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 4021 return Entry->Cost; 4022 4023 if (ST->hasAVX()) 4024 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4025 return Entry->Cost; 4026 4027 if (ST->hasSSE2()) 4028 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4029 return Entry->Cost; 4030 } 4031 4032 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4033 4034 MVT MTy = LT.second; 4035 4036 auto *ValVTy = cast<FixedVectorType>(ValTy); 4037 4038 // Special case: vXi8 mul reductions are performed as vXi16. 4039 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) { 4040 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16); 4041 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements()); 4042 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy, 4043 TargetTransformInfo::CastContextHint::None, 4044 CostKind) + 4045 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind); 4046 } 4047 4048 InstructionCost ArithmeticCost = 0; 4049 if (LT.first != 1 && MTy.isVector() && 4050 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4051 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4052 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 4053 MTy.getVectorNumElements()); 4054 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 4055 ArithmeticCost *= LT.first - 1; 4056 } 4057 4058 if (ST->useSLMArithCosts()) 4059 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 4060 return ArithmeticCost + Entry->Cost; 4061 4062 if (ST->hasAVX()) 4063 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4064 return ArithmeticCost + Entry->Cost; 4065 4066 if (ST->hasSSE2()) 4067 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4068 return ArithmeticCost + Entry->Cost; 4069 4070 // FIXME: These assume a naive kshift+binop lowering, which is probably 4071 // conservative in most cases. 4072 static const CostTblEntry AVX512BoolReduction[] = { 4073 { ISD::AND, MVT::v2i1, 3 }, 4074 { ISD::AND, MVT::v4i1, 5 }, 4075 { ISD::AND, MVT::v8i1, 7 }, 4076 { ISD::AND, MVT::v16i1, 9 }, 4077 { ISD::AND, MVT::v32i1, 11 }, 4078 { ISD::AND, MVT::v64i1, 13 }, 4079 { ISD::OR, MVT::v2i1, 3 }, 4080 { ISD::OR, MVT::v4i1, 5 }, 4081 { ISD::OR, MVT::v8i1, 7 }, 4082 { ISD::OR, MVT::v16i1, 9 }, 4083 { ISD::OR, MVT::v32i1, 11 }, 4084 { ISD::OR, MVT::v64i1, 13 }, 4085 }; 4086 4087 static const CostTblEntry AVX2BoolReduction[] = { 4088 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp 4089 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp 4090 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp 4091 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp 4092 }; 4093 4094 static const CostTblEntry AVX1BoolReduction[] = { 4095 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp 4096 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp 4097 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 4098 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 4099 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp 4100 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp 4101 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 4102 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 4103 }; 4104 4105 static const CostTblEntry SSE2BoolReduction[] = { 4106 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp 4107 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp 4108 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp 4109 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp 4110 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp 4111 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp 4112 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp 4113 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp 4114 }; 4115 4116 // Handle bool allof/anyof patterns. 4117 if (ValVTy->getElementType()->isIntegerTy(1)) { 4118 InstructionCost ArithmeticCost = 0; 4119 if (LT.first != 1 && MTy.isVector() && 4120 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4121 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4122 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 4123 MTy.getVectorNumElements()); 4124 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 4125 ArithmeticCost *= LT.first - 1; 4126 } 4127 4128 if (ST->hasAVX512()) 4129 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) 4130 return ArithmeticCost + Entry->Cost; 4131 if (ST->hasAVX2()) 4132 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) 4133 return ArithmeticCost + Entry->Cost; 4134 if (ST->hasAVX()) 4135 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) 4136 return ArithmeticCost + Entry->Cost; 4137 if (ST->hasSSE2()) 4138 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) 4139 return ArithmeticCost + Entry->Cost; 4140 4141 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 4142 } 4143 4144 unsigned NumVecElts = ValVTy->getNumElements(); 4145 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); 4146 4147 // Special case power of 2 reductions where the scalar type isn't changed 4148 // by type legalization. 4149 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) 4150 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind); 4151 4152 InstructionCost ReductionCost = 0; 4153 4154 auto *Ty = ValVTy; 4155 if (LT.first != 1 && MTy.isVector() && 4156 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4157 // Type needs to be split. We need LT.first - 1 arithmetic ops. 4158 Ty = FixedVectorType::get(ValVTy->getElementType(), 4159 MTy.getVectorNumElements()); 4160 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 4161 ReductionCost *= LT.first - 1; 4162 NumVecElts = MTy.getVectorNumElements(); 4163 } 4164 4165 // Now handle reduction with the legal type, taking into account size changes 4166 // at each level. 4167 while (NumVecElts > 1) { 4168 // Determine the size of the remaining vector we need to reduce. 4169 unsigned Size = NumVecElts * ScalarSize; 4170 NumVecElts /= 2; 4171 // If we're reducing from 256/512 bits, use an extract_subvector. 4172 if (Size > 128) { 4173 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4174 ReductionCost += 4175 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4176 Ty = SubTy; 4177 } else if (Size == 128) { 4178 // Reducing from 128 bits is a permute of v2f64/v2i64. 4179 FixedVectorType *ShufTy; 4180 if (ValVTy->isFloatingPointTy()) 4181 ShufTy = 4182 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); 4183 else 4184 ShufTy = 4185 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); 4186 ReductionCost += 4187 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4188 } else if (Size == 64) { 4189 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4190 FixedVectorType *ShufTy; 4191 if (ValVTy->isFloatingPointTy()) 4192 ShufTy = 4193 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); 4194 else 4195 ShufTy = 4196 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); 4197 ReductionCost += 4198 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4199 } else { 4200 // Reducing from smaller size is a shift by immediate. 4201 auto *ShiftTy = FixedVectorType::get( 4202 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); 4203 ReductionCost += getArithmeticInstrCost( 4204 Instruction::LShr, ShiftTy, CostKind, 4205 TargetTransformInfo::OK_AnyValue, 4206 TargetTransformInfo::OK_UniformConstantValue, 4207 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4208 } 4209 4210 // Add the arithmetic op for this level. 4211 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); 4212 } 4213 4214 // Add the final extract element to the cost. 4215 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4216 } 4217 4218 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, 4219 bool IsUnsigned) { 4220 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 4221 4222 MVT MTy = LT.second; 4223 4224 int ISD; 4225 if (Ty->isIntOrIntVectorTy()) { 4226 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4227 } else { 4228 assert(Ty->isFPOrFPVectorTy() && 4229 "Expected float point or integer vector type."); 4230 ISD = ISD::FMINNUM; 4231 } 4232 4233 static const CostTblEntry SSE1CostTbl[] = { 4234 {ISD::FMINNUM, MVT::v4f32, 1}, 4235 }; 4236 4237 static const CostTblEntry SSE2CostTbl[] = { 4238 {ISD::FMINNUM, MVT::v2f64, 1}, 4239 {ISD::SMIN, MVT::v8i16, 1}, 4240 {ISD::UMIN, MVT::v16i8, 1}, 4241 }; 4242 4243 static const CostTblEntry SSE41CostTbl[] = { 4244 {ISD::SMIN, MVT::v4i32, 1}, 4245 {ISD::UMIN, MVT::v4i32, 1}, 4246 {ISD::UMIN, MVT::v8i16, 1}, 4247 {ISD::SMIN, MVT::v16i8, 1}, 4248 }; 4249 4250 static const CostTblEntry SSE42CostTbl[] = { 4251 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd 4252 }; 4253 4254 static const CostTblEntry AVX1CostTbl[] = { 4255 {ISD::FMINNUM, MVT::v8f32, 1}, 4256 {ISD::FMINNUM, MVT::v4f64, 1}, 4257 {ISD::SMIN, MVT::v8i32, 3}, 4258 {ISD::UMIN, MVT::v8i32, 3}, 4259 {ISD::SMIN, MVT::v16i16, 3}, 4260 {ISD::UMIN, MVT::v16i16, 3}, 4261 {ISD::SMIN, MVT::v32i8, 3}, 4262 {ISD::UMIN, MVT::v32i8, 3}, 4263 }; 4264 4265 static const CostTblEntry AVX2CostTbl[] = { 4266 {ISD::SMIN, MVT::v8i32, 1}, 4267 {ISD::UMIN, MVT::v8i32, 1}, 4268 {ISD::SMIN, MVT::v16i16, 1}, 4269 {ISD::UMIN, MVT::v16i16, 1}, 4270 {ISD::SMIN, MVT::v32i8, 1}, 4271 {ISD::UMIN, MVT::v32i8, 1}, 4272 }; 4273 4274 static const CostTblEntry AVX512CostTbl[] = { 4275 {ISD::FMINNUM, MVT::v16f32, 1}, 4276 {ISD::FMINNUM, MVT::v8f64, 1}, 4277 {ISD::SMIN, MVT::v2i64, 1}, 4278 {ISD::UMIN, MVT::v2i64, 1}, 4279 {ISD::SMIN, MVT::v4i64, 1}, 4280 {ISD::UMIN, MVT::v4i64, 1}, 4281 {ISD::SMIN, MVT::v8i64, 1}, 4282 {ISD::UMIN, MVT::v8i64, 1}, 4283 {ISD::SMIN, MVT::v16i32, 1}, 4284 {ISD::UMIN, MVT::v16i32, 1}, 4285 }; 4286 4287 static const CostTblEntry AVX512BWCostTbl[] = { 4288 {ISD::SMIN, MVT::v32i16, 1}, 4289 {ISD::UMIN, MVT::v32i16, 1}, 4290 {ISD::SMIN, MVT::v64i8, 1}, 4291 {ISD::UMIN, MVT::v64i8, 1}, 4292 }; 4293 4294 // If we have a native MIN/MAX instruction for this type, use it. 4295 if (ST->hasBWI()) 4296 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 4297 return LT.first * Entry->Cost; 4298 4299 if (ST->hasAVX512()) 4300 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 4301 return LT.first * Entry->Cost; 4302 4303 if (ST->hasAVX2()) 4304 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 4305 return LT.first * Entry->Cost; 4306 4307 if (ST->hasAVX()) 4308 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 4309 return LT.first * Entry->Cost; 4310 4311 if (ST->hasSSE42()) 4312 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 4313 return LT.first * Entry->Cost; 4314 4315 if (ST->hasSSE41()) 4316 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 4317 return LT.first * Entry->Cost; 4318 4319 if (ST->hasSSE2()) 4320 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 4321 return LT.first * Entry->Cost; 4322 4323 if (ST->hasSSE1()) 4324 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 4325 return LT.first * Entry->Cost; 4326 4327 unsigned CmpOpcode; 4328 if (Ty->isFPOrFPVectorTy()) { 4329 CmpOpcode = Instruction::FCmp; 4330 } else { 4331 assert(Ty->isIntOrIntVectorTy() && 4332 "expecting floating point or integer type for min/max reduction"); 4333 CmpOpcode = Instruction::ICmp; 4334 } 4335 4336 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4337 // Otherwise fall back to cmp+select. 4338 InstructionCost Result = 4339 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE, 4340 CostKind) + 4341 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, 4342 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4343 return Result; 4344 } 4345 4346 InstructionCost 4347 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, 4348 bool IsUnsigned, 4349 TTI::TargetCostKind CostKind) { 4350 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 4351 4352 MVT MTy = LT.second; 4353 4354 int ISD; 4355 if (ValTy->isIntOrIntVectorTy()) { 4356 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 4357 } else { 4358 assert(ValTy->isFPOrFPVectorTy() && 4359 "Expected float point or integer vector type."); 4360 ISD = ISD::FMINNUM; 4361 } 4362 4363 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 4364 // and make it as the cost. 4365 4366 static const CostTblEntry SSE2CostTblNoPairWise[] = { 4367 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw 4368 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw 4369 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw 4370 }; 4371 4372 static const CostTblEntry SSE41CostTblNoPairWise[] = { 4373 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 4374 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 4375 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 4376 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 4377 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor 4378 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax 4379 {ISD::SMIN, MVT::v2i8, 3}, // pminsb 4380 {ISD::SMIN, MVT::v4i8, 5}, // pminsb 4381 {ISD::SMIN, MVT::v8i8, 7}, // pminsb 4382 {ISD::SMIN, MVT::v16i8, 6}, 4383 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 4384 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 4385 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 4386 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax 4387 }; 4388 4389 static const CostTblEntry AVX1CostTblNoPairWise[] = { 4390 {ISD::SMIN, MVT::v16i16, 6}, 4391 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax 4392 {ISD::SMIN, MVT::v32i8, 8}, 4393 {ISD::UMIN, MVT::v32i8, 8}, 4394 }; 4395 4396 static const CostTblEntry AVX512BWCostTblNoPairWise[] = { 4397 {ISD::SMIN, MVT::v32i16, 8}, 4398 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax 4399 {ISD::SMIN, MVT::v64i8, 10}, 4400 {ISD::UMIN, MVT::v64i8, 10}, 4401 }; 4402 4403 // Before legalizing the type, give a chance to look up illegal narrow types 4404 // in the table. 4405 // FIXME: Is there a better way to do this? 4406 EVT VT = TLI->getValueType(DL, ValTy); 4407 if (VT.isSimple()) { 4408 MVT MTy = VT.getSimpleVT(); 4409 if (ST->hasBWI()) 4410 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4411 return Entry->Cost; 4412 4413 if (ST->hasAVX()) 4414 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4415 return Entry->Cost; 4416 4417 if (ST->hasSSE41()) 4418 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4419 return Entry->Cost; 4420 4421 if (ST->hasSSE2()) 4422 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4423 return Entry->Cost; 4424 } 4425 4426 auto *ValVTy = cast<FixedVectorType>(ValTy); 4427 unsigned NumVecElts = ValVTy->getNumElements(); 4428 4429 auto *Ty = ValVTy; 4430 InstructionCost MinMaxCost = 0; 4431 if (LT.first != 1 && MTy.isVector() && 4432 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 4433 // Type needs to be split. We need LT.first - 1 operations ops. 4434 Ty = FixedVectorType::get(ValVTy->getElementType(), 4435 MTy.getVectorNumElements()); 4436 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(), 4437 MTy.getVectorNumElements()); 4438 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4439 MinMaxCost *= LT.first - 1; 4440 NumVecElts = MTy.getVectorNumElements(); 4441 } 4442 4443 if (ST->hasBWI()) 4444 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 4445 return MinMaxCost + Entry->Cost; 4446 4447 if (ST->hasAVX()) 4448 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 4449 return MinMaxCost + Entry->Cost; 4450 4451 if (ST->hasSSE41()) 4452 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 4453 return MinMaxCost + Entry->Cost; 4454 4455 if (ST->hasSSE2()) 4456 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 4457 return MinMaxCost + Entry->Cost; 4458 4459 unsigned ScalarSize = ValTy->getScalarSizeInBits(); 4460 4461 // Special case power of 2 reductions where the scalar type isn't changed 4462 // by type legalization. 4463 if (!isPowerOf2_32(ValVTy->getNumElements()) || 4464 ScalarSize != MTy.getScalarSizeInBits()) 4465 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind); 4466 4467 // Now handle reduction with the legal type, taking into account size changes 4468 // at each level. 4469 while (NumVecElts > 1) { 4470 // Determine the size of the remaining vector we need to reduce. 4471 unsigned Size = NumVecElts * ScalarSize; 4472 NumVecElts /= 2; 4473 // If we're reducing from 256/512 bits, use an extract_subvector. 4474 if (Size > 128) { 4475 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 4476 MinMaxCost += 4477 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 4478 Ty = SubTy; 4479 } else if (Size == 128) { 4480 // Reducing from 128 bits is a permute of v2f64/v2i64. 4481 VectorType *ShufTy; 4482 if (ValTy->isFloatingPointTy()) 4483 ShufTy = 4484 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); 4485 else 4486 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); 4487 MinMaxCost += 4488 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4489 } else if (Size == 64) { 4490 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 4491 FixedVectorType *ShufTy; 4492 if (ValTy->isFloatingPointTy()) 4493 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); 4494 else 4495 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); 4496 MinMaxCost += 4497 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 4498 } else { 4499 // Reducing from smaller size is a shift by immediate. 4500 auto *ShiftTy = FixedVectorType::get( 4501 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); 4502 MinMaxCost += getArithmeticInstrCost( 4503 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, 4504 TargetTransformInfo::OK_AnyValue, 4505 TargetTransformInfo::OK_UniformConstantValue, 4506 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 4507 } 4508 4509 // Add the arithmetic op for this level. 4510 auto *SubCondTy = 4511 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); 4512 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); 4513 } 4514 4515 // Add the final extract element to the cost. 4516 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 4517 } 4518 4519 /// Calculate the cost of materializing a 64-bit value. This helper 4520 /// method might only calculate a fraction of a larger immediate. Therefore it 4521 /// is valid to return a cost of ZERO. 4522 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) { 4523 if (Val == 0) 4524 return TTI::TCC_Free; 4525 4526 if (isInt<32>(Val)) 4527 return TTI::TCC_Basic; 4528 4529 return 2 * TTI::TCC_Basic; 4530 } 4531 4532 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 4533 TTI::TargetCostKind CostKind) { 4534 assert(Ty->isIntegerTy()); 4535 4536 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4537 if (BitSize == 0) 4538 return ~0U; 4539 4540 // Never hoist constants larger than 128bit, because this might lead to 4541 // incorrect code generation or assertions in codegen. 4542 // Fixme: Create a cost model for types larger than i128 once the codegen 4543 // issues have been fixed. 4544 if (BitSize > 128) 4545 return TTI::TCC_Free; 4546 4547 if (Imm == 0) 4548 return TTI::TCC_Free; 4549 4550 // Sign-extend all constants to a multiple of 64-bit. 4551 APInt ImmVal = Imm; 4552 if (BitSize % 64 != 0) 4553 ImmVal = Imm.sext(alignTo(BitSize, 64)); 4554 4555 // Split the constant into 64-bit chunks and calculate the cost for each 4556 // chunk. 4557 InstructionCost Cost = 0; 4558 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 4559 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 4560 int64_t Val = Tmp.getSExtValue(); 4561 Cost += getIntImmCost(Val); 4562 } 4563 // We need at least one instruction to materialize the constant. 4564 return std::max<InstructionCost>(1, Cost); 4565 } 4566 4567 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 4568 const APInt &Imm, Type *Ty, 4569 TTI::TargetCostKind CostKind, 4570 Instruction *Inst) { 4571 assert(Ty->isIntegerTy()); 4572 4573 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4574 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4575 // here, so that constant hoisting will ignore this constant. 4576 if (BitSize == 0) 4577 return TTI::TCC_Free; 4578 4579 unsigned ImmIdx = ~0U; 4580 switch (Opcode) { 4581 default: 4582 return TTI::TCC_Free; 4583 case Instruction::GetElementPtr: 4584 // Always hoist the base address of a GetElementPtr. This prevents the 4585 // creation of new constants for every base constant that gets constant 4586 // folded with the offset. 4587 if (Idx == 0) 4588 return 2 * TTI::TCC_Basic; 4589 return TTI::TCC_Free; 4590 case Instruction::Store: 4591 ImmIdx = 0; 4592 break; 4593 case Instruction::ICmp: 4594 // This is an imperfect hack to prevent constant hoisting of 4595 // compares that might be trying to check if a 64-bit value fits in 4596 // 32-bits. The backend can optimize these cases using a right shift by 32. 4597 // Ideally we would check the compare predicate here. There also other 4598 // similar immediates the backend can use shifts for. 4599 if (Idx == 1 && Imm.getBitWidth() == 64) { 4600 uint64_t ImmVal = Imm.getZExtValue(); 4601 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 4602 return TTI::TCC_Free; 4603 } 4604 ImmIdx = 1; 4605 break; 4606 case Instruction::And: 4607 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 4608 // by using a 32-bit operation with implicit zero extension. Detect such 4609 // immediates here as the normal path expects bit 31 to be sign extended. 4610 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 4611 return TTI::TCC_Free; 4612 ImmIdx = 1; 4613 break; 4614 case Instruction::Add: 4615 case Instruction::Sub: 4616 // For add/sub, we can use the opposite instruction for INT32_MIN. 4617 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 4618 return TTI::TCC_Free; 4619 ImmIdx = 1; 4620 break; 4621 case Instruction::UDiv: 4622 case Instruction::SDiv: 4623 case Instruction::URem: 4624 case Instruction::SRem: 4625 // Division by constant is typically expanded later into a different 4626 // instruction sequence. This completely changes the constants. 4627 // Report them as "free" to stop ConstantHoist from marking them as opaque. 4628 return TTI::TCC_Free; 4629 case Instruction::Mul: 4630 case Instruction::Or: 4631 case Instruction::Xor: 4632 ImmIdx = 1; 4633 break; 4634 // Always return TCC_Free for the shift value of a shift instruction. 4635 case Instruction::Shl: 4636 case Instruction::LShr: 4637 case Instruction::AShr: 4638 if (Idx == 1) 4639 return TTI::TCC_Free; 4640 break; 4641 case Instruction::Trunc: 4642 case Instruction::ZExt: 4643 case Instruction::SExt: 4644 case Instruction::IntToPtr: 4645 case Instruction::PtrToInt: 4646 case Instruction::BitCast: 4647 case Instruction::PHI: 4648 case Instruction::Call: 4649 case Instruction::Select: 4650 case Instruction::Ret: 4651 case Instruction::Load: 4652 break; 4653 } 4654 4655 if (Idx == ImmIdx) { 4656 int NumConstants = divideCeil(BitSize, 64); 4657 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4658 return (Cost <= NumConstants * TTI::TCC_Basic) 4659 ? static_cast<int>(TTI::TCC_Free) 4660 : Cost; 4661 } 4662 4663 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4664 } 4665 4666 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 4667 const APInt &Imm, Type *Ty, 4668 TTI::TargetCostKind CostKind) { 4669 assert(Ty->isIntegerTy()); 4670 4671 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4672 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4673 // here, so that constant hoisting will ignore this constant. 4674 if (BitSize == 0) 4675 return TTI::TCC_Free; 4676 4677 switch (IID) { 4678 default: 4679 return TTI::TCC_Free; 4680 case Intrinsic::sadd_with_overflow: 4681 case Intrinsic::uadd_with_overflow: 4682 case Intrinsic::ssub_with_overflow: 4683 case Intrinsic::usub_with_overflow: 4684 case Intrinsic::smul_with_overflow: 4685 case Intrinsic::umul_with_overflow: 4686 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 4687 return TTI::TCC_Free; 4688 break; 4689 case Intrinsic::experimental_stackmap: 4690 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4691 return TTI::TCC_Free; 4692 break; 4693 case Intrinsic::experimental_patchpoint_void: 4694 case Intrinsic::experimental_patchpoint_i64: 4695 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4696 return TTI::TCC_Free; 4697 break; 4698 } 4699 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4700 } 4701 4702 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, 4703 TTI::TargetCostKind CostKind, 4704 const Instruction *I) { 4705 if (CostKind != TTI::TCK_RecipThroughput) 4706 return Opcode == Instruction::PHI ? 0 : 1; 4707 // Branches are assumed to be predicted. 4708 return 0; 4709 } 4710 4711 int X86TTIImpl::getGatherOverhead() const { 4712 // Some CPUs have more overhead for gather. The specified overhead is relative 4713 // to the Load operation. "2" is the number provided by Intel architects. This 4714 // parameter is used for cost estimation of Gather Op and comparison with 4715 // other alternatives. 4716 // TODO: Remove the explicit hasAVX512()?, That would mean we would only 4717 // enable gather with a -march. 4718 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather())) 4719 return 2; 4720 4721 return 1024; 4722 } 4723 4724 int X86TTIImpl::getScatterOverhead() const { 4725 if (ST->hasAVX512()) 4726 return 2; 4727 4728 return 1024; 4729 } 4730 4731 // Return an average cost of Gather / Scatter instruction, maybe improved later. 4732 // FIXME: Add TargetCostKind support. 4733 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, 4734 const Value *Ptr, Align Alignment, 4735 unsigned AddressSpace) { 4736 4737 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 4738 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4739 4740 // Try to reduce index size from 64 bit (default for GEP) 4741 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 4742 // operation will use 16 x 64 indices which do not fit in a zmm and needs 4743 // to split. Also check that the base pointer is the same for all lanes, 4744 // and that there's at most one variable index. 4745 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { 4746 unsigned IndexSize = DL.getPointerSizeInBits(); 4747 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 4748 if (IndexSize < 64 || !GEP) 4749 return IndexSize; 4750 4751 unsigned NumOfVarIndices = 0; 4752 const Value *Ptrs = GEP->getPointerOperand(); 4753 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 4754 return IndexSize; 4755 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 4756 if (isa<Constant>(GEP->getOperand(i))) 4757 continue; 4758 Type *IndxTy = GEP->getOperand(i)->getType(); 4759 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) 4760 IndxTy = IndexVTy->getElementType(); 4761 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 4762 !isa<SExtInst>(GEP->getOperand(i))) || 4763 ++NumOfVarIndices > 1) 4764 return IndexSize; // 64 4765 } 4766 return (unsigned)32; 4767 }; 4768 4769 // Trying to reduce IndexSize to 32 bits for vector 16. 4770 // By default the IndexSize is equal to pointer size. 4771 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 4772 ? getIndexSizeInBits(Ptr, DL) 4773 : DL.getPointerSizeInBits(); 4774 4775 auto *IndexVTy = FixedVectorType::get( 4776 IntegerType::get(SrcVTy->getContext(), IndexSize), VF); 4777 std::pair<InstructionCost, MVT> IdxsLT = 4778 TLI->getTypeLegalizationCost(DL, IndexVTy); 4779 std::pair<InstructionCost, MVT> SrcLT = 4780 TLI->getTypeLegalizationCost(DL, SrcVTy); 4781 InstructionCost::CostType SplitFactor = 4782 *std::max(IdxsLT.first, SrcLT.first).getValue(); 4783 if (SplitFactor > 1) { 4784 // Handle splitting of vector of pointers 4785 auto *SplitSrcTy = 4786 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 4787 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 4788 AddressSpace); 4789 } 4790 4791 // The gather / scatter cost is given by Intel architects. It is a rough 4792 // number since we are looking at one instruction in a time. 4793 const int GSOverhead = (Opcode == Instruction::Load) 4794 ? getGatherOverhead() 4795 : getScatterOverhead(); 4796 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4797 MaybeAlign(Alignment), AddressSpace, 4798 TTI::TCK_RecipThroughput); 4799 } 4800 4801 /// Return the cost of full scalarization of gather / scatter operation. 4802 /// 4803 /// Opcode - Load or Store instruction. 4804 /// SrcVTy - The type of the data vector that should be gathered or scattered. 4805 /// VariableMask - The mask is non-constant at compile time. 4806 /// Alignment - Alignment for one element. 4807 /// AddressSpace - pointer[s] address space. 4808 /// 4809 /// FIXME: Add TargetCostKind support. 4810 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 4811 bool VariableMask, Align Alignment, 4812 unsigned AddressSpace) { 4813 Type *ScalarTy = SrcVTy->getScalarType(); 4814 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4815 APInt DemandedElts = APInt::getAllOnes(VF); 4816 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4817 4818 InstructionCost MaskUnpackCost = 0; 4819 if (VariableMask) { 4820 auto *MaskTy = 4821 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 4822 MaskUnpackCost = getScalarizationOverhead( 4823 MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true); 4824 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 4825 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, 4826 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4827 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 4828 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 4829 } 4830 4831 InstructionCost AddressUnpackCost = getScalarizationOverhead( 4832 FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts, 4833 /*Insert=*/false, /*Extract=*/true); 4834 4835 // The cost of the scalar loads/stores. 4836 InstructionCost MemoryOpCost = 4837 VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment), 4838 AddressSpace, CostKind); 4839 4840 // The cost of forming the vector from loaded scalars/ 4841 // scalarizing the vector to perform scalar stores. 4842 InstructionCost InsertExtractCost = 4843 getScalarizationOverhead(cast<FixedVectorType>(SrcVTy), DemandedElts, 4844 /*Insert=*/Opcode == Instruction::Load, 4845 /*Extract=*/Opcode == Instruction::Store); 4846 4847 return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost; 4848 } 4849 4850 /// Calculate the cost of Gather / Scatter operation 4851 InstructionCost X86TTIImpl::getGatherScatterOpCost( 4852 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, 4853 Align Alignment, TTI::TargetCostKind CostKind, 4854 const Instruction *I = nullptr) { 4855 if (CostKind != TTI::TCK_RecipThroughput) { 4856 if ((Opcode == Instruction::Load && 4857 isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4858 (Opcode == Instruction::Store && 4859 isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4860 return 1; 4861 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask, 4862 Alignment, CostKind, I); 4863 } 4864 4865 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 4866 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4867 if (!PtrTy && Ptr->getType()->isVectorTy()) 4868 PtrTy = dyn_cast<PointerType>( 4869 cast<VectorType>(Ptr->getType())->getElementType()); 4870 assert(PtrTy && "Unexpected type for Ptr argument"); 4871 unsigned AddressSpace = PtrTy->getAddressSpace(); 4872 4873 if ((Opcode == Instruction::Load && 4874 !isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4875 (Opcode == Instruction::Store && 4876 !isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4877 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 4878 AddressSpace); 4879 4880 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 4881 } 4882 4883 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 4884 TargetTransformInfo::LSRCost &C2) { 4885 // X86 specific here are "instruction number 1st priority". 4886 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 4887 C1.NumIVMuls, C1.NumBaseAdds, 4888 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 4889 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 4890 C2.NumIVMuls, C2.NumBaseAdds, 4891 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 4892 } 4893 4894 bool X86TTIImpl::canMacroFuseCmp() { 4895 return ST->hasMacroFusion() || ST->hasBranchFusion(); 4896 } 4897 4898 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 4899 if (!ST->hasAVX()) 4900 return false; 4901 4902 // The backend can't handle a single element vector. 4903 if (isa<VectorType>(DataTy) && 4904 cast<FixedVectorType>(DataTy)->getNumElements() == 1) 4905 return false; 4906 Type *ScalarTy = DataTy->getScalarType(); 4907 4908 if (ScalarTy->isPointerTy()) 4909 return true; 4910 4911 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4912 return true; 4913 4914 if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16()) 4915 return true; 4916 4917 if (!ScalarTy->isIntegerTy()) 4918 return false; 4919 4920 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4921 return IntWidth == 32 || IntWidth == 64 || 4922 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); 4923 } 4924 4925 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { 4926 return isLegalMaskedLoad(DataType, Alignment); 4927 } 4928 4929 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { 4930 unsigned DataSize = DL.getTypeStoreSize(DataType); 4931 // The only supported nontemporal loads are for aligned vectors of 16 or 32 4932 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 4933 // (the equivalent stores only require AVX). 4934 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) 4935 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); 4936 4937 return false; 4938 } 4939 4940 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { 4941 unsigned DataSize = DL.getTypeStoreSize(DataType); 4942 4943 // SSE4A supports nontemporal stores of float and double at arbitrary 4944 // alignment. 4945 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) 4946 return true; 4947 4948 // Besides the SSE4A subtarget exception above, only aligned stores are 4949 // available nontemporaly on any other subtarget. And only stores with a size 4950 // of 4..32 bytes (powers of 2, only) are permitted. 4951 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || 4952 !isPowerOf2_32(DataSize)) 4953 return false; 4954 4955 // 32-byte vector nontemporal stores are supported by AVX (the equivalent 4956 // loads require AVX2). 4957 if (DataSize == 32) 4958 return ST->hasAVX(); 4959 if (DataSize == 16) 4960 return ST->hasSSE1(); 4961 return true; 4962 } 4963 4964 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { 4965 if (!isa<VectorType>(DataTy)) 4966 return false; 4967 4968 if (!ST->hasAVX512()) 4969 return false; 4970 4971 // The backend can't handle a single element vector. 4972 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1) 4973 return false; 4974 4975 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); 4976 4977 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4978 return true; 4979 4980 if (!ScalarTy->isIntegerTy()) 4981 return false; 4982 4983 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4984 return IntWidth == 32 || IntWidth == 64 || 4985 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); 4986 } 4987 4988 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { 4989 return isLegalMaskedExpandLoad(DataTy); 4990 } 4991 4992 bool X86TTIImpl::supportsGather() const { 4993 // Some CPUs have better gather performance than others. 4994 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 4995 // enable gather with a -march. 4996 return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()); 4997 } 4998 4999 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { 5000 if (!supportsGather()) 5001 return false; 5002 5003 // This function is called now in two cases: from the Loop Vectorizer 5004 // and from the Scalarizer. 5005 // When the Loop Vectorizer asks about legality of the feature, 5006 // the vectorization factor is not calculated yet. The Loop Vectorizer 5007 // sends a scalar type and the decision is based on the width of the 5008 // scalar element. 5009 // Later on, the cost model will estimate usage this intrinsic based on 5010 // the vector type. 5011 // The Scalarizer asks again about legality. It sends a vector type. 5012 // In this case we can reject non-power-of-2 vectors. 5013 // We also reject single element vectors as the type legalizer can't 5014 // scalarize it. 5015 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) { 5016 unsigned NumElts = DataVTy->getNumElements(); 5017 if (NumElts == 1) 5018 return false; 5019 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 5020 // Vector-4 of gather/scatter instruction does not exist on KNL. 5021 // We can extend it to 8 elements, but zeroing upper bits of 5022 // the mask vector will add more instructions. Right now we give the scalar 5023 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter 5024 // instruction is better in the VariableMask case. 5025 if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX()))) 5026 return false; 5027 } 5028 Type *ScalarTy = DataTy->getScalarType(); 5029 if (ScalarTy->isPointerTy()) 5030 return true; 5031 5032 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 5033 return true; 5034 5035 if (!ScalarTy->isIntegerTy()) 5036 return false; 5037 5038 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 5039 return IntWidth == 32 || IntWidth == 64; 5040 } 5041 5042 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { 5043 // AVX2 doesn't support scatter 5044 if (!ST->hasAVX512()) 5045 return false; 5046 return isLegalMaskedGather(DataType, Alignment); 5047 } 5048 5049 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 5050 EVT VT = TLI->getValueType(DL, DataType); 5051 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 5052 } 5053 5054 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 5055 return false; 5056 } 5057 5058 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 5059 const Function *Callee) const { 5060 const TargetMachine &TM = getTLI()->getTargetMachine(); 5061 5062 // Work this as a subsetting of subtarget features. 5063 const FeatureBitset &CallerBits = 5064 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 5065 const FeatureBitset &CalleeBits = 5066 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 5067 5068 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 5069 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 5070 return (RealCallerBits & RealCalleeBits) == RealCalleeBits; 5071 } 5072 5073 bool X86TTIImpl::areFunctionArgsABICompatible( 5074 const Function *Caller, const Function *Callee, 5075 SmallPtrSetImpl<Argument *> &Args) const { 5076 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) 5077 return false; 5078 5079 // If we get here, we know the target features match. If one function 5080 // considers 512-bit vectors legal and the other does not, consider them 5081 // incompatible. 5082 const TargetMachine &TM = getTLI()->getTargetMachine(); 5083 5084 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == 5085 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) 5086 return true; 5087 5088 // Consider the arguments compatible if they aren't vectors or aggregates. 5089 // FIXME: Look at the size of vectors. 5090 // FIXME: Look at the element types of aggregates to see if there are vectors. 5091 // FIXME: The API of this function seems intended to allow arguments 5092 // to be removed from the set, but the caller doesn't check if the set 5093 // becomes empty so that may not work in practice. 5094 return llvm::none_of(Args, [](Argument *A) { 5095 auto *EltTy = cast<PointerType>(A->getType())->getElementType(); 5096 return EltTy->isVectorTy() || EltTy->isAggregateType(); 5097 }); 5098 } 5099 5100 X86TTIImpl::TTI::MemCmpExpansionOptions 5101 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 5102 TTI::MemCmpExpansionOptions Options; 5103 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 5104 Options.NumLoadsPerBlock = 2; 5105 // All GPR and vector loads can be unaligned. 5106 Options.AllowOverlappingLoads = true; 5107 if (IsZeroCmp) { 5108 // Only enable vector loads for equality comparison. Right now the vector 5109 // version is not as fast for three way compare (see #33329). 5110 const unsigned PreferredWidth = ST->getPreferVectorWidth(); 5111 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); 5112 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); 5113 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); 5114 } 5115 if (ST->is64Bit()) { 5116 Options.LoadSizes.push_back(8); 5117 } 5118 Options.LoadSizes.push_back(4); 5119 Options.LoadSizes.push_back(2); 5120 Options.LoadSizes.push_back(1); 5121 return Options; 5122 } 5123 5124 bool X86TTIImpl::prefersVectorizedAddressing() const { 5125 return supportsGather(); 5126 } 5127 5128 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const { 5129 return false; 5130 } 5131 5132 bool X86TTIImpl::enableInterleavedAccessVectorization() { 5133 // TODO: We expect this to be beneficial regardless of arch, 5134 // but there are currently some unexplained performance artifacts on Atom. 5135 // As a temporary solution, disable on Atom. 5136 return !(ST->isAtom()); 5137 } 5138 5139 // Get estimation for interleaved load/store operations and strided load. 5140 // \p Indices contains indices for strided load. 5141 // \p Factor - the factor of interleaving. 5142 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 5143 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( 5144 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 5145 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 5146 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 5147 // VecTy for interleave memop is <VF*Factor x Elt>. 5148 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5149 // VecTy = <12 x i32>. 5150 5151 // Calculate the number of memory operations (NumOfMemOps), required 5152 // for load/store the VecTy. 5153 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5154 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 5155 unsigned LegalVTSize = LegalVT.getStoreSize(); 5156 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 5157 5158 // Get the cost of one memory operation. 5159 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), 5160 LegalVT.getVectorNumElements()); 5161 InstructionCost MemOpCost; 5162 if (UseMaskForCond || UseMaskForGaps) 5163 MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment, 5164 AddressSpace, CostKind); 5165 else 5166 MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment), 5167 AddressSpace, CostKind); 5168 5169 unsigned VF = VecTy->getNumElements() / Factor; 5170 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 5171 5172 // FIXME: this is the most conservative estimate for the mask cost. 5173 InstructionCost MaskCost; 5174 if (UseMaskForCond || UseMaskForGaps) { 5175 APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements()); 5176 for (unsigned Index : Indices) { 5177 assert(Index < Factor && "Invalid index for interleaved memory op"); 5178 for (unsigned Elm = 0; Elm < VF; Elm++) 5179 DemandedLoadStoreElts.setBit(Index + Elm * Factor); 5180 } 5181 5182 Type *I8Type = Type::getInt8Ty(VecTy->getContext()); 5183 5184 MaskCost = getReplicationShuffleCost( 5185 I8Type, Factor, VF, 5186 UseMaskForGaps ? DemandedLoadStoreElts 5187 : APInt::getAllOnes(VecTy->getNumElements()), 5188 CostKind); 5189 5190 // The Gaps mask is invariant and created outside the loop, therefore the 5191 // cost of creating it is not accounted for here. However if we have both 5192 // a MaskForGaps and some other mask that guards the execution of the 5193 // memory access, we need to account for the cost of And-ing the two masks 5194 // inside the loop. 5195 if (UseMaskForGaps) { 5196 auto *MaskVT = FixedVectorType::get(I8Type, VecTy->getNumElements()); 5197 MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind); 5198 } 5199 } 5200 5201 if (Opcode == Instruction::Load) { 5202 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 5203 // contain the cost of the optimized shuffle sequence that the 5204 // X86InterleavedAccess pass will generate. 5205 // The cost of loads and stores are computed separately from the table. 5206 5207 // X86InterleavedAccess support only the following interleaved-access group. 5208 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 5209 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 5210 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 5211 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 5212 }; 5213 5214 if (const auto *Entry = 5215 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 5216 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost; 5217 //If an entry does not exist, fallback to the default implementation. 5218 5219 // Kind of shuffle depends on number of loaded values. 5220 // If we load the entire data in one register, we can use a 1-src shuffle. 5221 // Otherwise, we'll merge 2 sources in each operation. 5222 TTI::ShuffleKind ShuffleKind = 5223 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 5224 5225 InstructionCost ShuffleCost = 5226 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr); 5227 5228 unsigned NumOfLoadsInInterleaveGrp = 5229 Indices.size() ? Indices.size() : Factor; 5230 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(), 5231 VecTy->getNumElements() / Factor); 5232 InstructionCost NumOfResults = 5233 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 5234 NumOfLoadsInInterleaveGrp; 5235 5236 // About a half of the loads may be folded in shuffles when we have only 5237 // one result. If we have more than one result, we do not fold loads at all. 5238 unsigned NumOfUnfoldedLoads = 5239 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 5240 5241 // Get a number of shuffle operations per result. 5242 unsigned NumOfShufflesPerResult = 5243 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 5244 5245 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5246 // When we have more than one destination, we need additional instructions 5247 // to keep sources. 5248 InstructionCost NumOfMoves = 0; 5249 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 5250 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 5251 5252 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 5253 MaskCost + NumOfUnfoldedLoads * MemOpCost + 5254 NumOfMoves; 5255 5256 return Cost; 5257 } 5258 5259 // Store. 5260 assert(Opcode == Instruction::Store && 5261 "Expected Store Instruction at this point"); 5262 // X86InterleavedAccess support only the following interleaved-access group. 5263 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 5264 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 5265 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 5266 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 5267 5268 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 5269 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 5270 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 5271 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 5272 }; 5273 5274 if (const auto *Entry = 5275 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 5276 return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost; 5277 //If an entry does not exist, fallback to the default implementation. 5278 5279 // There is no strided stores meanwhile. And store can't be folded in 5280 // shuffle. 5281 unsigned NumOfSources = Factor; // The number of values to be merged. 5282 InstructionCost ShuffleCost = 5283 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr); 5284 unsigned NumOfShufflesPerStore = NumOfSources - 1; 5285 5286 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 5287 // We need additional instructions to keep sources. 5288 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 5289 InstructionCost Cost = 5290 MaskCost + 5291 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 5292 NumOfMoves; 5293 return Cost; 5294 } 5295 5296 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost( 5297 unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices, 5298 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 5299 bool UseMaskForCond, bool UseMaskForGaps) { 5300 auto *VecTy = cast<FixedVectorType>(BaseTy); 5301 5302 auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) { 5303 Type *EltTy = cast<VectorType>(VecTy)->getElementType(); 5304 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 5305 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 5306 return true; 5307 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) || 5308 (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy())) 5309 return HasBW; 5310 return false; 5311 }; 5312 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 5313 return getInterleavedMemoryOpCostAVX512( 5314 Opcode, VecTy, Factor, Indices, Alignment, 5315 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 5316 5317 if (UseMaskForCond || UseMaskForGaps) 5318 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5319 Alignment, AddressSpace, CostKind, 5320 UseMaskForCond, UseMaskForGaps); 5321 5322 // Get estimation for interleaved load/store operations for SSE-AVX2. 5323 // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow 5324 // computing the cost using a generic formula as a function of generic 5325 // shuffles. We therefore use a lookup table instead, filled according to 5326 // the instruction sequences that codegen currently generates. 5327 5328 // VecTy for interleave memop is <VF*Factor x Elt>. 5329 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 5330 // VecTy = <12 x i32>. 5331 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 5332 5333 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 5334 // the VF=2, while v2i128 is an unsupported MVT vector type 5335 // (see MachineValueType.h::getVectorVT()). 5336 if (!LegalVT.isVector()) 5337 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5338 Alignment, AddressSpace, CostKind); 5339 5340 unsigned VF = VecTy->getNumElements() / Factor; 5341 Type *ScalarTy = VecTy->getElementType(); 5342 // Deduplicate entries, model floats/pointers as appropriately-sized integers. 5343 if (!ScalarTy->isIntegerTy()) 5344 ScalarTy = 5345 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy)); 5346 5347 // Get the cost of all the memory operations. 5348 // FIXME: discount dead loads. 5349 InstructionCost MemOpCosts = getMemoryOpCost( 5350 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind); 5351 5352 auto *VT = FixedVectorType::get(ScalarTy, VF); 5353 EVT ETy = TLI->getValueType(DL, VT); 5354 if (!ETy.isSimple()) 5355 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5356 Alignment, AddressSpace, CostKind); 5357 5358 // TODO: Complete for other data-types and strides. 5359 // Each combination of Stride, element bit width and VF results in a different 5360 // sequence; The cost tables are therefore accessed with: 5361 // Factor (stride) and VectorType=VFxiN. 5362 // The Cost accounts only for the shuffle sequence; 5363 // The cost of the loads/stores is accounted for separately. 5364 // 5365 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 5366 {2, MVT::v2i8, 2}, // (load 4i8 and) deinterleave into 2 x 2i8 5367 {2, MVT::v4i8, 2}, // (load 8i8 and) deinterleave into 2 x 4i8 5368 {2, MVT::v8i8, 2}, // (load 16i8 and) deinterleave into 2 x 8i8 5369 {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8 5370 {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8 5371 5372 {2, MVT::v8i16, 6}, // (load 16i16 and) deinterleave into 2 x 8i16 5373 {2, MVT::v16i16, 9}, // (load 32i16 and) deinterleave into 2 x 16i16 5374 {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16 5375 5376 {2, MVT::v8i32, 4}, // (load 16i32 and) deinterleave into 2 x 8i32 5377 {2, MVT::v16i32, 8}, // (load 32i32 and) deinterleave into 2 x 16i32 5378 {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32 5379 5380 {2, MVT::v4i64, 4}, // (load 8i64 and) deinterleave into 2 x 4i64 5381 {2, MVT::v8i64, 8}, // (load 16i64 and) deinterleave into 2 x 8i64 5382 {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64 5383 {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64 5384 5385 {3, MVT::v2i8, 3}, // (load 6i8 and) deinterleave into 3 x 2i8 5386 {3, MVT::v4i8, 3}, // (load 12i8 and) deinterleave into 3 x 4i8 5387 {3, MVT::v8i8, 6}, // (load 24i8 and) deinterleave into 3 x 8i8 5388 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8 5389 {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8 5390 5391 {3, MVT::v2i16, 5}, // (load 6i16 and) deinterleave into 3 x 2i16 5392 {3, MVT::v4i16, 7}, // (load 12i16 and) deinterleave into 3 x 4i16 5393 {3, MVT::v8i16, 9}, // (load 24i16 and) deinterleave into 3 x 8i16 5394 {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16 5395 {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16 5396 5397 {3, MVT::v2i32, 3}, // (load 6i32 and) deinterleave into 3 x 2i32 5398 {3, MVT::v4i32, 3}, // (load 12i32 and) deinterleave into 3 x 4i32 5399 {3, MVT::v8i32, 7}, // (load 24i32 and) deinterleave into 3 x 8i32 5400 {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32 5401 {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32 5402 5403 {3, MVT::v2i64, 1}, // (load 6i64 and) deinterleave into 3 x 2i64 5404 {3, MVT::v4i64, 5}, // (load 12i64 and) deinterleave into 3 x 4i64 5405 {3, MVT::v8i64, 10}, // (load 24i64 and) deinterleave into 3 x 8i64 5406 {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64 5407 5408 {4, MVT::v2i8, 4}, // (load 8i8 and) deinterleave into 4 x 2i8 5409 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8 5410 {4, MVT::v8i8, 12}, // (load 32i8 and) deinterleave into 4 x 8i8 5411 {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8 5412 {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8 5413 5414 {4, MVT::v2i16, 6}, // (load 8i16 and) deinterleave into 4 x 2i16 5415 {4, MVT::v4i16, 17}, // (load 16i16 and) deinterleave into 4 x 4i16 5416 {4, MVT::v8i16, 33}, // (load 32i16 and) deinterleave into 4 x 8i16 5417 {4, MVT::v16i16, 75}, // (load 64i16 and) deinterleave into 4 x 16i16 5418 {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16 5419 5420 {4, MVT::v2i32, 4}, // (load 8i32 and) deinterleave into 4 x 2i32 5421 {4, MVT::v4i32, 8}, // (load 16i32 and) deinterleave into 4 x 4i32 5422 {4, MVT::v8i32, 16}, // (load 32i32 and) deinterleave into 4 x 8i32 5423 {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32 5424 {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32 5425 5426 {4, MVT::v2i64, 6}, // (load 8i64 and) deinterleave into 4 x 2i64 5427 {4, MVT::v4i64, 8}, // (load 16i64 and) deinterleave into 4 x 4i64 5428 {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64 5429 {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64 5430 5431 {6, MVT::v2i8, 6}, // (load 12i8 and) deinterleave into 6 x 2i8 5432 {6, MVT::v4i8, 14}, // (load 24i8 and) deinterleave into 6 x 4i8 5433 {6, MVT::v8i8, 18}, // (load 48i8 and) deinterleave into 6 x 8i8 5434 {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8 5435 {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8 5436 5437 {6, MVT::v2i16, 13}, // (load 12i16 and) deinterleave into 6 x 2i16 5438 {6, MVT::v4i16, 9}, // (load 24i16 and) deinterleave into 6 x 4i16 5439 {6, MVT::v8i16, 39}, // (load 48i16 and) deinterleave into 6 x 8i16 5440 {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16 5441 {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16 5442 5443 {6, MVT::v2i32, 6}, // (load 12i32 and) deinterleave into 6 x 2i32 5444 {6, MVT::v4i32, 15}, // (load 24i32 and) deinterleave into 6 x 4i32 5445 {6, MVT::v8i32, 31}, // (load 48i32 and) deinterleave into 6 x 8i32 5446 {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32 5447 5448 {6, MVT::v2i64, 6}, // (load 12i64 and) deinterleave into 6 x 2i64 5449 {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64 5450 {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64 5451 5452 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32 5453 }; 5454 5455 static const CostTblEntry SSSE3InterleavedLoadTbl[] = { 5456 {2, MVT::v4i16, 2}, // (load 8i16 and) deinterleave into 2 x 4i16 5457 }; 5458 5459 static const CostTblEntry SSE2InterleavedLoadTbl[] = { 5460 {2, MVT::v2i16, 2}, // (load 4i16 and) deinterleave into 2 x 2i16 5461 {2, MVT::v4i16, 7}, // (load 8i16 and) deinterleave into 2 x 4i16 5462 5463 {2, MVT::v2i32, 2}, // (load 4i32 and) deinterleave into 2 x 2i32 5464 {2, MVT::v4i32, 2}, // (load 8i32 and) deinterleave into 2 x 4i32 5465 5466 {2, MVT::v2i64, 2}, // (load 4i64 and) deinterleave into 2 x 2i64 5467 }; 5468 5469 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 5470 {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store) 5471 {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store) 5472 5473 {2, MVT::v8i16, 3}, // interleave 2 x 8i16 into 16i16 (and store) 5474 {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store) 5475 {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store) 5476 5477 {2, MVT::v4i32, 2}, // interleave 2 x 4i32 into 8i32 (and store) 5478 {2, MVT::v8i32, 4}, // interleave 2 x 8i32 into 16i32 (and store) 5479 {2, MVT::v16i32, 8}, // interleave 2 x 16i32 into 32i32 (and store) 5480 {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store) 5481 5482 {2, MVT::v2i64, 2}, // interleave 2 x 2i64 into 4i64 (and store) 5483 {2, MVT::v4i64, 4}, // interleave 2 x 4i64 into 8i64 (and store) 5484 {2, MVT::v8i64, 8}, // interleave 2 x 8i64 into 16i64 (and store) 5485 {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store) 5486 {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store) 5487 5488 {3, MVT::v2i8, 4}, // interleave 3 x 2i8 into 6i8 (and store) 5489 {3, MVT::v4i8, 4}, // interleave 3 x 4i8 into 12i8 (and store) 5490 {3, MVT::v8i8, 6}, // interleave 3 x 8i8 into 24i8 (and store) 5491 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store) 5492 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store) 5493 5494 {3, MVT::v2i16, 4}, // interleave 3 x 2i16 into 6i16 (and store) 5495 {3, MVT::v4i16, 6}, // interleave 3 x 4i16 into 12i16 (and store) 5496 {3, MVT::v8i16, 12}, // interleave 3 x 8i16 into 24i16 (and store) 5497 {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store) 5498 {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store) 5499 5500 {3, MVT::v2i32, 4}, // interleave 3 x 2i32 into 6i32 (and store) 5501 {3, MVT::v4i32, 5}, // interleave 3 x 4i32 into 12i32 (and store) 5502 {3, MVT::v8i32, 11}, // interleave 3 x 8i32 into 24i32 (and store) 5503 {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store) 5504 {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store) 5505 5506 {3, MVT::v2i64, 4}, // interleave 3 x 2i64 into 6i64 (and store) 5507 {3, MVT::v4i64, 6}, // interleave 3 x 4i64 into 12i64 (and store) 5508 {3, MVT::v8i64, 12}, // interleave 3 x 8i64 into 24i64 (and store) 5509 {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store) 5510 5511 {4, MVT::v2i8, 4}, // interleave 4 x 2i8 into 8i8 (and store) 5512 {4, MVT::v4i8, 4}, // interleave 4 x 4i8 into 16i8 (and store) 5513 {4, MVT::v8i8, 4}, // interleave 4 x 8i8 into 32i8 (and store) 5514 {4, MVT::v16i8, 8}, // interleave 4 x 16i8 into 64i8 (and store) 5515 {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store) 5516 5517 {4, MVT::v2i16, 2}, // interleave 4 x 2i16 into 8i16 (and store) 5518 {4, MVT::v4i16, 6}, // interleave 4 x 4i16 into 16i16 (and store) 5519 {4, MVT::v8i16, 10}, // interleave 4 x 8i16 into 32i16 (and store) 5520 {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store) 5521 {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store) 5522 5523 {4, MVT::v2i32, 5}, // interleave 4 x 2i32 into 8i32 (and store) 5524 {4, MVT::v4i32, 6}, // interleave 4 x 4i32 into 16i32 (and store) 5525 {4, MVT::v8i32, 16}, // interleave 4 x 8i32 into 32i32 (and store) 5526 {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store) 5527 {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store) 5528 5529 {4, MVT::v2i64, 6}, // interleave 4 x 2i64 into 8i64 (and store) 5530 {4, MVT::v4i64, 8}, // interleave 4 x 4i64 into 16i64 (and store) 5531 {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store) 5532 {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store) 5533 5534 {6, MVT::v2i8, 7}, // interleave 6 x 2i8 into 12i8 (and store) 5535 {6, MVT::v4i8, 9}, // interleave 6 x 4i8 into 24i8 (and store) 5536 {6, MVT::v8i8, 16}, // interleave 6 x 8i8 into 48i8 (and store) 5537 {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store) 5538 {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store) 5539 5540 {6, MVT::v2i16, 10}, // interleave 6 x 2i16 into 12i16 (and store) 5541 {6, MVT::v4i16, 15}, // interleave 6 x 4i16 into 24i16 (and store) 5542 {6, MVT::v8i16, 21}, // interleave 6 x 8i16 into 48i16 (and store) 5543 {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store) 5544 {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store) 5545 5546 {6, MVT::v2i32, 9}, // interleave 6 x 2i32 into 12i32 (and store) 5547 {6, MVT::v4i32, 12}, // interleave 6 x 4i32 into 24i32 (and store) 5548 {6, MVT::v8i32, 33}, // interleave 6 x 8i32 into 48i32 (and store) 5549 {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store) 5550 5551 {6, MVT::v2i64, 8}, // interleave 6 x 2i64 into 12i64 (and store) 5552 {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store) 5553 {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store) 5554 }; 5555 5556 static const CostTblEntry SSE2InterleavedStoreTbl[] = { 5557 {2, MVT::v2i8, 1}, // interleave 2 x 2i8 into 4i8 (and store) 5558 {2, MVT::v4i8, 1}, // interleave 2 x 4i8 into 8i8 (and store) 5559 {2, MVT::v8i8, 1}, // interleave 2 x 8i8 into 16i8 (and store) 5560 5561 {2, MVT::v2i16, 1}, // interleave 2 x 2i16 into 4i16 (and store) 5562 {2, MVT::v4i16, 1}, // interleave 2 x 4i16 into 8i16 (and store) 5563 5564 {2, MVT::v2i32, 1}, // interleave 2 x 2i32 into 4i32 (and store) 5565 }; 5566 5567 if (Opcode == Instruction::Load) { 5568 auto GetDiscountedCost = [Factor, NumMembers = Indices.size(), 5569 MemOpCosts](const CostTblEntry *Entry) { 5570 // NOTE: this is just an approximation! 5571 // It can over/under -estimate the cost! 5572 return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor); 5573 }; 5574 5575 if (ST->hasAVX2()) 5576 if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor, 5577 ETy.getSimpleVT())) 5578 return GetDiscountedCost(Entry); 5579 5580 if (ST->hasSSSE3()) 5581 if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor, 5582 ETy.getSimpleVT())) 5583 return GetDiscountedCost(Entry); 5584 5585 if (ST->hasSSE2()) 5586 if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor, 5587 ETy.getSimpleVT())) 5588 return GetDiscountedCost(Entry); 5589 } else { 5590 assert(Opcode == Instruction::Store && 5591 "Expected Store Instruction at this point"); 5592 assert((!Indices.size() || Indices.size() == Factor) && 5593 "Interleaved store only supports fully-interleaved groups."); 5594 if (ST->hasAVX2()) 5595 if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor, 5596 ETy.getSimpleVT())) 5597 return MemOpCosts + Entry->Cost; 5598 5599 if (ST->hasSSE2()) 5600 if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor, 5601 ETy.getSimpleVT())) 5602 return MemOpCosts + Entry->Cost; 5603 } 5604 5605 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 5606 Alignment, AddressSpace, CostKind, 5607 UseMaskForCond, UseMaskForGaps); 5608 } 5609