1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements a TargetTransformInfo analysis pass specific to the 10 /// X86 target machine. It uses the target's detailed information to provide 11 /// more precise answers to certain TTI queries, while letting the target 12 /// independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 /// About Cost Model numbers used below it's necessary to say the following: 16 /// the numbers correspond to some "generic" X86 CPU instead of usage of 17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 19 /// the lookups below the cost is based on Nehalem as that was the first CPU 20 /// to support that feature level and thus has most likely the worst case cost. 21 /// Some examples of other technologies/CPUs: 22 /// SSE 3 - Pentium4 / Athlon64 23 /// SSE 4.1 - Penryn 24 /// SSE 4.2 - Nehalem 25 /// AVX - Sandy Bridge 26 /// AVX2 - Haswell 27 /// AVX-512 - Xeon Phi / Skylake 28 /// And some examples of instruction target dependent costs (latency) 29 /// divss sqrtss rsqrtss 30 /// AMD K7 11-16 19 3 31 /// Piledriver 9-24 13-15 5 32 /// Jaguar 14 16 2 33 /// Pentium II,III 18 30 2 34 /// Nehalem 7-14 7-18 3 35 /// Haswell 10-13 11 5 36 /// TODO: Develop and implement the target dependent cost model and 37 /// specialize cost numbers for different Cost Model Targets such as throughput, 38 /// code size, latency and uop count. 39 //===----------------------------------------------------------------------===// 40 41 #include "X86TargetTransformInfo.h" 42 #include "llvm/Analysis/TargetTransformInfo.h" 43 #include "llvm/CodeGen/BasicTTIImpl.h" 44 #include "llvm/CodeGen/CostTable.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/Support/Debug.h" 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "x86tti" 52 53 //===----------------------------------------------------------------------===// 54 // 55 // X86 cost model. 56 // 57 //===----------------------------------------------------------------------===// 58 59 TargetTransformInfo::PopcntSupportKind 60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 62 // TODO: Currently the __builtin_popcount() implementation using SSE3 63 // instructions is inefficient. Once the problem is fixed, we should 64 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 66 } 67 68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 69 TargetTransformInfo::CacheLevel Level) const { 70 switch (Level) { 71 case TargetTransformInfo::CacheLevel::L1D: 72 // - Penryn 73 // - Nehalem 74 // - Westmere 75 // - Sandy Bridge 76 // - Ivy Bridge 77 // - Haswell 78 // - Broadwell 79 // - Skylake 80 // - Kabylake 81 return 32 * 1024; // 32 KByte 82 case TargetTransformInfo::CacheLevel::L2D: 83 // - Penryn 84 // - Nehalem 85 // - Westmere 86 // - Sandy Bridge 87 // - Ivy Bridge 88 // - Haswell 89 // - Broadwell 90 // - Skylake 91 // - Kabylake 92 return 256 * 1024; // 256 KByte 93 } 94 95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 96 } 97 98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 99 TargetTransformInfo::CacheLevel Level) const { 100 // - Penryn 101 // - Nehalem 102 // - Westmere 103 // - Sandy Bridge 104 // - Ivy Bridge 105 // - Haswell 106 // - Broadwell 107 // - Skylake 108 // - Kabylake 109 switch (Level) { 110 case TargetTransformInfo::CacheLevel::L1D: 111 LLVM_FALLTHROUGH; 112 case TargetTransformInfo::CacheLevel::L2D: 113 return 8; 114 } 115 116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 117 } 118 119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { 120 bool Vector = (ClassID == 1); 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 TypeSize 133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { 134 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 135 switch (K) { 136 case TargetTransformInfo::RGK_Scalar: 137 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32); 138 case TargetTransformInfo::RGK_FixedWidthVector: 139 if (ST->hasAVX512() && PreferVectorWidth >= 512) 140 return TypeSize::getFixed(512); 141 if (ST->hasAVX() && PreferVectorWidth >= 256) 142 return TypeSize::getFixed(256); 143 if (ST->hasSSE1() && PreferVectorWidth >= 128) 144 return TypeSize::getFixed(128); 145 return TypeSize::getFixed(0); 146 case TargetTransformInfo::RGK_ScalableVector: 147 return TypeSize::getScalable(0); 148 } 149 150 llvm_unreachable("Unsupported register kind"); 151 } 152 153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 154 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) 155 .getFixedSize(); 156 } 157 158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 159 // If the loop will not be vectorized, don't interleave the loop. 160 // Let regular unroll to unroll the loop, which saves the overflow 161 // check and memory check cost. 162 if (VF == 1) 163 return 1; 164 165 if (ST->isAtom()) 166 return 1; 167 168 // Sandybridge and Haswell have multiple execution ports and pipelined 169 // vector units. 170 if (ST->hasAVX()) 171 return 4; 172 173 return 2; 174 } 175 176 InstructionCost X86TTIImpl::getArithmeticInstrCost( 177 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, 178 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info, 179 TTI::OperandValueProperties Opd1PropInfo, 180 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args, 181 const Instruction *CxtI) { 182 // TODO: Handle more cost kinds. 183 if (CostKind != TTI::TCK_RecipThroughput) 184 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 185 Op2Info, Opd1PropInfo, 186 Opd2PropInfo, Args, CxtI); 187 // Legalize the type. 188 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 189 190 int ISD = TLI->InstructionOpcodeToISD(Opcode); 191 assert(ISD && "Invalid opcode"); 192 193 static const CostTblEntry GLMCostTable[] = { 194 { ISD::FDIV, MVT::f32, 18 }, // divss 195 { ISD::FDIV, MVT::v4f32, 35 }, // divps 196 { ISD::FDIV, MVT::f64, 33 }, // divsd 197 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 198 }; 199 200 if (ST->useGLMDivSqrtCosts()) 201 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 202 LT.second)) 203 return LT.first * Entry->Cost; 204 205 static const CostTblEntry SLMCostTable[] = { 206 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 207 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 208 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 209 { ISD::FMUL, MVT::f64, 2 }, // mulsd 210 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 211 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 212 { ISD::FDIV, MVT::f32, 17 }, // divss 213 { ISD::FDIV, MVT::v4f32, 39 }, // divps 214 { ISD::FDIV, MVT::f64, 32 }, // divsd 215 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 216 { ISD::FADD, MVT::v2f64, 2 }, // addpd 217 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 218 // v2i64/v4i64 mul is custom lowered as a series of long: 219 // multiplies(3), shifts(3) and adds(2) 220 // slm muldq version throughput is 2 and addq throughput 4 221 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 222 // 3X4 (addq throughput) = 17 223 { ISD::MUL, MVT::v2i64, 17 }, 224 // slm addq\subq throughput is 4 225 { ISD::ADD, MVT::v2i64, 4 }, 226 { ISD::SUB, MVT::v2i64, 4 }, 227 }; 228 229 if (ST->isSLM()) { 230 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 231 // Check if the operands can be shrinked into a smaller datatype. 232 bool Op1Signed = false; 233 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 234 bool Op2Signed = false; 235 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 236 237 bool SignedMode = Op1Signed || Op2Signed; 238 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 239 240 if (OpMinSize <= 7) 241 return LT.first * 3; // pmullw/sext 242 if (!SignedMode && OpMinSize <= 8) 243 return LT.first * 3; // pmullw/zext 244 if (OpMinSize <= 15) 245 return LT.first * 5; // pmullw/pmulhw/pshuf 246 if (!SignedMode && OpMinSize <= 16) 247 return LT.first * 5; // pmullw/pmulhw/pshuf 248 } 249 250 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 251 LT.second)) { 252 return LT.first * Entry->Cost; 253 } 254 } 255 256 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV || 257 ISD == ISD::UREM) && 258 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 259 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 260 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 261 if (ISD == ISD::SDIV || ISD == ISD::SREM) { 262 // On X86, vector signed division by constants power-of-two are 263 // normally expanded to the sequence SRA + SRL + ADD + SRA. 264 // The OperandValue properties may not be the same as that of the previous 265 // operation; conservatively assume OP_None. 266 InstructionCost Cost = 267 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, 268 Op2Info, TargetTransformInfo::OP_None, 269 TargetTransformInfo::OP_None); 270 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 271 Op2Info, 272 TargetTransformInfo::OP_None, 273 TargetTransformInfo::OP_None); 274 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, 275 Op2Info, 276 TargetTransformInfo::OP_None, 277 TargetTransformInfo::OP_None); 278 279 if (ISD == ISD::SREM) { 280 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 281 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, 282 Op2Info); 283 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, 284 Op2Info); 285 } 286 287 return Cost; 288 } 289 290 // Vector unsigned division/remainder will be simplified to shifts/masks. 291 if (ISD == ISD::UDIV) 292 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, 293 Op1Info, Op2Info, 294 TargetTransformInfo::OP_None, 295 TargetTransformInfo::OP_None); 296 297 else // UREM 298 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, 299 Op1Info, Op2Info, 300 TargetTransformInfo::OP_None, 301 TargetTransformInfo::OP_None); 302 } 303 304 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 305 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 306 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 307 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 308 }; 309 310 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 311 ST->hasBWI()) { 312 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 313 LT.second)) 314 return LT.first * Entry->Cost; 315 } 316 317 static const CostTblEntry AVX512UniformConstCostTable[] = { 318 { ISD::SRA, MVT::v2i64, 1 }, 319 { ISD::SRA, MVT::v4i64, 1 }, 320 { ISD::SRA, MVT::v8i64, 1 }, 321 322 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. 323 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. 324 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. 325 326 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence 327 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence 328 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence 329 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence 330 }; 331 332 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 333 ST->hasAVX512()) { 334 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 335 LT.second)) 336 return LT.first * Entry->Cost; 337 } 338 339 static const CostTblEntry AVX2UniformConstCostTable[] = { 340 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 341 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 342 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 343 344 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 345 346 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence 347 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence 348 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence 349 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence 350 }; 351 352 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 353 ST->hasAVX2()) { 354 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 355 LT.second)) 356 return LT.first * Entry->Cost; 357 } 358 359 static const CostTblEntry SSE2UniformConstCostTable[] = { 360 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 361 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 362 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 363 364 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 365 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 366 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 367 368 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split. 369 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split. 370 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence 371 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence 372 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split. 373 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split. 374 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence 375 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence 376 }; 377 378 // XOP has faster vXi8 shifts. 379 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 380 ST->hasSSE2() && !ST->hasXOP()) { 381 if (const auto *Entry = 382 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 383 return LT.first * Entry->Cost; 384 } 385 386 static const CostTblEntry AVX512BWConstCostTable[] = { 387 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 388 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 389 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 390 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 391 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 392 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 393 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 394 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 395 }; 396 397 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 398 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 399 ST->hasBWI()) { 400 if (const auto *Entry = 401 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 402 return LT.first * Entry->Cost; 403 } 404 405 static const CostTblEntry AVX512ConstCostTable[] = { 406 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 407 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 408 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 409 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 410 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 411 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 412 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 413 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 414 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence 415 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence 416 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence 417 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence 418 }; 419 420 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 421 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 422 ST->hasAVX512()) { 423 if (const auto *Entry = 424 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 425 return LT.first * Entry->Cost; 426 } 427 428 static const CostTblEntry AVX2ConstCostTable[] = { 429 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 430 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 431 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 432 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 433 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 434 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 435 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 436 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 437 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 438 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 439 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 440 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 441 }; 442 443 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 444 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 445 ST->hasAVX2()) { 446 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 447 return LT.first * Entry->Cost; 448 } 449 450 static const CostTblEntry SSE2ConstCostTable[] = { 451 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 452 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 453 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 454 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 455 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 456 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 457 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 458 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 459 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 460 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 461 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 462 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 463 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 464 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 465 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 466 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 467 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 468 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 469 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 470 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 471 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 472 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 473 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 474 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 475 }; 476 477 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 478 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 479 ST->hasSSE2()) { 480 // pmuldq sequence. 481 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 482 return LT.first * 32; 483 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 484 return LT.first * 38; 485 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 486 return LT.first * 15; 487 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 488 return LT.first * 20; 489 490 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 491 return LT.first * Entry->Cost; 492 } 493 494 static const CostTblEntry AVX512BWShiftCostTable[] = { 495 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 496 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 497 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 498 499 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 500 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 501 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 502 503 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 504 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 505 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 506 }; 507 508 if (ST->hasBWI()) 509 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) 510 return LT.first * Entry->Cost; 511 512 static const CostTblEntry AVX2UniformCostTable[] = { 513 // Uniform splats are cheaper for the following instructions. 514 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 515 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 516 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 517 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. 518 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. 519 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. 520 }; 521 522 if (ST->hasAVX2() && 523 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 524 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 525 if (const auto *Entry = 526 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 527 return LT.first * Entry->Cost; 528 } 529 530 static const CostTblEntry SSE2UniformCostTable[] = { 531 // Uniform splats are cheaper for the following instructions. 532 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 533 { ISD::SHL, MVT::v4i32, 1 }, // pslld 534 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 535 536 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 537 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 538 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 539 540 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 541 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 542 }; 543 544 if (ST->hasSSE2() && 545 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 546 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 547 if (const auto *Entry = 548 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 549 return LT.first * Entry->Cost; 550 } 551 552 static const CostTblEntry AVX512DQCostTable[] = { 553 { ISD::MUL, MVT::v2i64, 1 }, 554 { ISD::MUL, MVT::v4i64, 1 }, 555 { ISD::MUL, MVT::v8i64, 1 } 556 }; 557 558 // Look for AVX512DQ lowering tricks for custom cases. 559 if (ST->hasDQI()) 560 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 561 return LT.first * Entry->Cost; 562 563 static const CostTblEntry AVX512BWCostTable[] = { 564 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 565 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 566 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 567 568 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 569 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 570 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 571 }; 572 573 // Look for AVX512BW lowering tricks for custom cases. 574 if (ST->hasBWI()) 575 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 576 return LT.first * Entry->Cost; 577 578 static const CostTblEntry AVX512CostTable[] = { 579 { ISD::SHL, MVT::v16i32, 1 }, 580 { ISD::SRL, MVT::v16i32, 1 }, 581 { ISD::SRA, MVT::v16i32, 1 }, 582 583 { ISD::SHL, MVT::v8i64, 1 }, 584 { ISD::SRL, MVT::v8i64, 1 }, 585 586 { ISD::SRA, MVT::v2i64, 1 }, 587 { ISD::SRA, MVT::v4i64, 1 }, 588 { ISD::SRA, MVT::v8i64, 1 }, 589 590 { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence. 591 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 592 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 593 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 594 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 595 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 596 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 597 598 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 599 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 600 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 601 602 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 603 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 604 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 605 }; 606 607 if (ST->hasAVX512()) 608 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 609 return LT.first * Entry->Cost; 610 611 static const CostTblEntry AVX2ShiftCostTable[] = { 612 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 613 // customize them to detect the cases where shift amount is a scalar one. 614 { ISD::SHL, MVT::v4i32, 1 }, 615 { ISD::SRL, MVT::v4i32, 1 }, 616 { ISD::SRA, MVT::v4i32, 1 }, 617 { ISD::SHL, MVT::v8i32, 1 }, 618 { ISD::SRL, MVT::v8i32, 1 }, 619 { ISD::SRA, MVT::v8i32, 1 }, 620 { ISD::SHL, MVT::v2i64, 1 }, 621 { ISD::SRL, MVT::v2i64, 1 }, 622 { ISD::SHL, MVT::v4i64, 1 }, 623 { ISD::SRL, MVT::v4i64, 1 }, 624 }; 625 626 if (ST->hasAVX512()) { 627 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && 628 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 629 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 630 // On AVX512, a packed v32i16 shift left by a constant build_vector 631 // is lowered into a vector multiply (vpmullw). 632 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 633 Op1Info, Op2Info, 634 TargetTransformInfo::OP_None, 635 TargetTransformInfo::OP_None); 636 } 637 638 // Look for AVX2 lowering tricks. 639 if (ST->hasAVX2()) { 640 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 641 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 642 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 643 // On AVX2, a packed v16i16 shift left by a constant build_vector 644 // is lowered into a vector multiply (vpmullw). 645 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 646 Op1Info, Op2Info, 647 TargetTransformInfo::OP_None, 648 TargetTransformInfo::OP_None); 649 650 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 651 return LT.first * Entry->Cost; 652 } 653 654 static const CostTblEntry XOPShiftCostTable[] = { 655 // 128bit shifts take 1cy, but right shifts require negation beforehand. 656 { ISD::SHL, MVT::v16i8, 1 }, 657 { ISD::SRL, MVT::v16i8, 2 }, 658 { ISD::SRA, MVT::v16i8, 2 }, 659 { ISD::SHL, MVT::v8i16, 1 }, 660 { ISD::SRL, MVT::v8i16, 2 }, 661 { ISD::SRA, MVT::v8i16, 2 }, 662 { ISD::SHL, MVT::v4i32, 1 }, 663 { ISD::SRL, MVT::v4i32, 2 }, 664 { ISD::SRA, MVT::v4i32, 2 }, 665 { ISD::SHL, MVT::v2i64, 1 }, 666 { ISD::SRL, MVT::v2i64, 2 }, 667 { ISD::SRA, MVT::v2i64, 2 }, 668 // 256bit shifts require splitting if AVX2 didn't catch them above. 669 { ISD::SHL, MVT::v32i8, 2+2 }, 670 { ISD::SRL, MVT::v32i8, 4+2 }, 671 { ISD::SRA, MVT::v32i8, 4+2 }, 672 { ISD::SHL, MVT::v16i16, 2+2 }, 673 { ISD::SRL, MVT::v16i16, 4+2 }, 674 { ISD::SRA, MVT::v16i16, 4+2 }, 675 { ISD::SHL, MVT::v8i32, 2+2 }, 676 { ISD::SRL, MVT::v8i32, 4+2 }, 677 { ISD::SRA, MVT::v8i32, 4+2 }, 678 { ISD::SHL, MVT::v4i64, 2+2 }, 679 { ISD::SRL, MVT::v4i64, 4+2 }, 680 { ISD::SRA, MVT::v4i64, 4+2 }, 681 }; 682 683 // Look for XOP lowering tricks. 684 if (ST->hasXOP()) { 685 // If the right shift is constant then we'll fold the negation so 686 // it's as cheap as a left shift. 687 int ShiftISD = ISD; 688 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && 689 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 690 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 691 ShiftISD = ISD::SHL; 692 if (const auto *Entry = 693 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) 694 return LT.first * Entry->Cost; 695 } 696 697 static const CostTblEntry SSE2UniformShiftCostTable[] = { 698 // Uniform splats are cheaper for the following instructions. 699 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 700 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 701 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 702 703 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 704 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 705 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 706 707 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 708 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 709 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 710 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 711 }; 712 713 if (ST->hasSSE2() && 714 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 715 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 716 717 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 718 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 719 return LT.first * 4; // 2*psrad + shuffle. 720 721 if (const auto *Entry = 722 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 723 return LT.first * Entry->Cost; 724 } 725 726 if (ISD == ISD::SHL && 727 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 728 MVT VT = LT.second; 729 // Vector shift left by non uniform constant can be lowered 730 // into vector multiply. 731 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 732 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 733 ISD = ISD::MUL; 734 } 735 736 static const CostTblEntry AVX2CostTable[] = { 737 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 738 { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence. 739 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 740 { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence. 741 742 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 743 { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence. 744 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 745 { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence. 746 747 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 748 { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence. 749 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 750 { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence. 751 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 752 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 753 754 { ISD::SUB, MVT::v32i8, 1 }, // psubb 755 { ISD::ADD, MVT::v32i8, 1 }, // paddb 756 { ISD::SUB, MVT::v16i16, 1 }, // psubw 757 { ISD::ADD, MVT::v16i16, 1 }, // paddw 758 { ISD::SUB, MVT::v8i32, 1 }, // psubd 759 { ISD::ADD, MVT::v8i32, 1 }, // paddd 760 { ISD::SUB, MVT::v4i64, 1 }, // psubq 761 { ISD::ADD, MVT::v4i64, 1 }, // paddq 762 763 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 764 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 765 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 766 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 767 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 768 769 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 770 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 771 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 772 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 773 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 774 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 775 776 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 777 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 778 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 779 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 780 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 781 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 782 }; 783 784 // Look for AVX2 lowering tricks for custom cases. 785 if (ST->hasAVX2()) 786 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 787 return LT.first * Entry->Cost; 788 789 static const CostTblEntry AVX1CostTable[] = { 790 // We don't have to scalarize unsupported ops. We can issue two half-sized 791 // operations and we only need to extract the upper YMM half. 792 // Two ops + 1 extract + 1 insert = 4. 793 { ISD::MUL, MVT::v16i16, 4 }, 794 { ISD::MUL, MVT::v8i32, 4 }, 795 { ISD::SUB, MVT::v32i8, 4 }, 796 { ISD::ADD, MVT::v32i8, 4 }, 797 { ISD::SUB, MVT::v16i16, 4 }, 798 { ISD::ADD, MVT::v16i16, 4 }, 799 { ISD::SUB, MVT::v8i32, 4 }, 800 { ISD::ADD, MVT::v8i32, 4 }, 801 { ISD::SUB, MVT::v4i64, 4 }, 802 { ISD::ADD, MVT::v4i64, 4 }, 803 804 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 805 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 806 // Because we believe v4i64 to be a legal type, we must also include the 807 // extract+insert in the cost table. Therefore, the cost here is 18 808 // instead of 8. 809 { ISD::MUL, MVT::v4i64, 18 }, 810 811 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 812 813 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 814 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 815 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 816 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 817 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 818 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 819 }; 820 821 if (ST->hasAVX()) 822 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 823 return LT.first * Entry->Cost; 824 825 static const CostTblEntry SSE42CostTable[] = { 826 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 827 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 828 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 829 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 830 831 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 832 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 833 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 834 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 835 836 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 837 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 838 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 839 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 840 841 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 842 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 843 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 844 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 845 }; 846 847 if (ST->hasSSE42()) 848 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 849 return LT.first * Entry->Cost; 850 851 static const CostTblEntry SSE41CostTable[] = { 852 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 853 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. 854 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 855 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 856 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 857 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split 858 859 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 860 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. 861 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 862 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 863 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 864 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. 865 866 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 867 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. 868 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 869 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 870 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 871 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. 872 873 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 874 }; 875 876 if (ST->hasSSE41()) 877 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 878 return LT.first * Entry->Cost; 879 880 static const CostTblEntry SSE2CostTable[] = { 881 // We don't correctly identify costs of casts because they are marked as 882 // custom. 883 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 884 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 885 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 886 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 887 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 888 889 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 890 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 891 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 892 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 893 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 894 895 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 896 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 897 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 898 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 899 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. 900 901 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 902 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 903 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 904 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 905 906 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 907 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 908 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 909 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 910 911 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 912 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 913 914 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 915 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 916 }; 917 918 if (ST->hasSSE2()) 919 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 920 return LT.first * Entry->Cost; 921 922 static const CostTblEntry SSE1CostTable[] = { 923 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 924 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 925 926 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 927 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 928 929 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 930 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 931 932 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 933 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 934 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 935 936 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 937 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 938 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 939 }; 940 941 if (ST->hasSSE1()) 942 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 943 return LT.first * Entry->Cost; 944 945 // It is not a good idea to vectorize division. We have to scalarize it and 946 // in the process we will often end up having to spilling regular 947 // registers. The overhead of division is going to dominate most kernels 948 // anyways so try hard to prevent vectorization of division - it is 949 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 950 // to hide "20 cycles" for each lane. 951 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 952 ISD == ISD::UDIV || ISD == ISD::UREM)) { 953 InstructionCost ScalarCost = getArithmeticInstrCost( 954 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, 955 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 956 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 957 } 958 959 // Fallback to the default implementation. 960 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); 961 } 962 963 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, 964 VectorType *BaseTp, 965 ArrayRef<int> Mask, int Index, 966 VectorType *SubTp) { 967 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 968 // 64-bit packed integer vectors (v2i32) are widened to type v4i32. 969 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); 970 971 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 972 if (Kind == TTI::SK_Transpose) 973 Kind = TTI::SK_PermuteTwoSrc; 974 975 // For Broadcasts we are splatting the first element from the first input 976 // register, so only need to reference that input and all the output 977 // registers are the same. 978 if (Kind == TTI::SK_Broadcast) 979 LT.first = 1; 980 981 // Subvector extractions are free if they start at the beginning of a 982 // vector and cheap if the subvectors are aligned. 983 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { 984 int NumElts = LT.second.getVectorNumElements(); 985 if ((Index % NumElts) == 0) 986 return 0; 987 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp); 988 if (SubLT.second.isVector()) { 989 int NumSubElts = SubLT.second.getVectorNumElements(); 990 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 991 return SubLT.first; 992 // Handle some cases for widening legalization. For now we only handle 993 // cases where the original subvector was naturally aligned and evenly 994 // fit in its legalized subvector type. 995 // FIXME: Remove some of the alignment restrictions. 996 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit 997 // vectors. 998 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements(); 999 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && 1000 (NumSubElts % OrigSubElts) == 0 && 1001 LT.second.getVectorElementType() == 1002 SubLT.second.getVectorElementType() && 1003 LT.second.getVectorElementType().getSizeInBits() == 1004 BaseTp->getElementType()->getPrimitiveSizeInBits()) { 1005 assert(NumElts >= NumSubElts && NumElts > OrigSubElts && 1006 "Unexpected number of elements!"); 1007 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(), 1008 LT.second.getVectorNumElements()); 1009 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), 1010 SubLT.second.getVectorNumElements()); 1011 int ExtractIndex = alignDown((Index % NumElts), NumSubElts); 1012 InstructionCost ExtractCost = getShuffleCost( 1013 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy); 1014 1015 // If the original size is 32-bits or more, we can use pshufd. Otherwise 1016 // if we have SSSE3 we can use pshufb. 1017 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) 1018 return ExtractCost + 1; // pshufd or pshufb 1019 1020 assert(SubTp->getPrimitiveSizeInBits() == 16 && 1021 "Unexpected vector size"); 1022 1023 return ExtractCost + 2; // worst case pshufhw + pshufd 1024 } 1025 } 1026 } 1027 1028 // Subvector insertions are cheap if the subvectors are aligned. 1029 // Note that in general, the insertion starting at the beginning of a vector 1030 // isn't free, because we need to preserve the rest of the wide vector. 1031 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) { 1032 int NumElts = LT.second.getVectorNumElements(); 1033 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp); 1034 if (SubLT.second.isVector()) { 1035 int NumSubElts = SubLT.second.getVectorNumElements(); 1036 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 1037 return SubLT.first; 1038 } 1039 } 1040 1041 // Handle some common (illegal) sub-vector types as they are often very cheap 1042 // to shuffle even on targets without PSHUFB. 1043 EVT VT = TLI->getValueType(DL, BaseTp); 1044 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && 1045 !ST->hasSSSE3()) { 1046 static const CostTblEntry SSE2SubVectorShuffleTbl[] = { 1047 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw 1048 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw 1049 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw 1050 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw 1051 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck 1052 1053 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw 1054 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw 1055 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus 1056 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck 1057 1058 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw 1059 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw 1060 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw 1061 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw 1062 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck 1063 1064 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw 1065 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw 1066 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw 1067 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw 1068 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck 1069 }; 1070 1071 if (ST->hasSSE2()) 1072 if (const auto *Entry = 1073 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) 1074 return Entry->Cost; 1075 } 1076 1077 // We are going to permute multiple sources and the result will be in multiple 1078 // destinations. Providing an accurate cost only for splits where the element 1079 // type remains the same. 1080 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 1081 MVT LegalVT = LT.second; 1082 if (LegalVT.isVector() && 1083 LegalVT.getVectorElementType().getSizeInBits() == 1084 BaseTp->getElementType()->getPrimitiveSizeInBits() && 1085 LegalVT.getVectorNumElements() < 1086 cast<FixedVectorType>(BaseTp)->getNumElements()) { 1087 1088 unsigned VecTySize = DL.getTypeStoreSize(BaseTp); 1089 unsigned LegalVTSize = LegalVT.getStoreSize(); 1090 // Number of source vectors after legalization: 1091 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 1092 // Number of destination vectors after legalization: 1093 unsigned NumOfDests = LT.first; 1094 1095 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(), 1096 LegalVT.getVectorNumElements()); 1097 1098 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 1099 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 1100 None, 0, nullptr); 1101 } 1102 1103 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1104 } 1105 1106 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 1107 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 1108 // We assume that source and destination have the same vector type. 1109 int NumOfDests = LT.first; 1110 int NumOfShufflesPerDest = LT.first * 2 - 1; 1111 LT.first = NumOfDests * NumOfShufflesPerDest; 1112 } 1113 1114 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 1115 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb 1116 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb 1117 1118 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb 1119 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb 1120 1121 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b 1122 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b 1123 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b 1124 }; 1125 1126 if (ST->hasVBMI()) 1127 if (const auto *Entry = 1128 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 1129 return LT.first * Entry->Cost; 1130 1131 static const CostTblEntry AVX512BWShuffleTbl[] = { 1132 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1133 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1134 1135 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw 1136 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw 1137 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 1138 1139 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw 1140 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw 1141 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 1142 1143 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w 1144 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w 1145 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w 1146 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 1147 1148 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw 1149 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb 1150 }; 1151 1152 if (ST->hasBWI()) 1153 if (const auto *Entry = 1154 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 1155 return LT.first * Entry->Cost; 1156 1157 static const CostTblEntry AVX512ShuffleTbl[] = { 1158 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd 1159 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps 1160 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq 1161 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd 1162 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1163 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1164 1165 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd 1166 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps 1167 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq 1168 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd 1169 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca 1170 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca 1171 1172 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd 1173 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1174 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd 1175 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps 1176 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1177 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps 1178 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq 1179 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1180 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq 1181 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd 1182 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1183 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd 1184 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1185 1186 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd 1187 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps 1188 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q 1189 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d 1190 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd 1191 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps 1192 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q 1193 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d 1194 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd 1195 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps 1196 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q 1197 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d 1198 1199 // FIXME: This just applies the type legalization cost rules above 1200 // assuming these completely split. 1201 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, 1202 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, 1203 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, 1204 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, 1205 1206 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq 1207 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq 1208 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd 1209 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps 1210 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq 1211 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd 1212 }; 1213 1214 if (ST->hasAVX512()) 1215 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 1216 return LT.first * Entry->Cost; 1217 1218 static const CostTblEntry AVX2ShuffleTbl[] = { 1219 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd 1220 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps 1221 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq 1222 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd 1223 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw 1224 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb 1225 1226 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd 1227 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps 1228 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq 1229 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd 1230 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb 1231 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb 1232 1233 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb 1234 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb 1235 1236 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1237 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1238 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1239 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1240 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb 1241 // + vpblendvb 1242 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb 1243 // + vpblendvb 1244 1245 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd 1246 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps 1247 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd 1248 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd 1249 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb 1250 // + vpblendvb 1251 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb 1252 // + vpblendvb 1253 }; 1254 1255 if (ST->hasAVX2()) 1256 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1257 return LT.first * Entry->Cost; 1258 1259 static const CostTblEntry XOPShuffleTbl[] = { 1260 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd 1261 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps 1262 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd 1263 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps 1264 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm 1265 // + vinsertf128 1266 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm 1267 // + vinsertf128 1268 1269 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm 1270 // + vinsertf128 1271 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm 1272 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm 1273 // + vinsertf128 1274 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm 1275 }; 1276 1277 if (ST->hasXOP()) 1278 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1279 return LT.first * Entry->Cost; 1280 1281 static const CostTblEntry AVX1ShuffleTbl[] = { 1282 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1283 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1284 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1285 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1286 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 1287 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 1288 1289 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1290 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1291 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1292 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1293 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb 1294 // + vinsertf128 1295 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb 1296 // + vinsertf128 1297 1298 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd 1299 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd 1300 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps 1301 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps 1302 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor 1303 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor 1304 1305 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd 1306 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd 1307 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1308 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1309 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb 1310 // + 2*por + vinsertf128 1311 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb 1312 // + 2*por + vinsertf128 1313 1314 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd 1315 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd 1316 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1317 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1318 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb 1319 // + 4*por + vinsertf128 1320 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb 1321 // + 4*por + vinsertf128 1322 }; 1323 1324 if (ST->hasAVX()) 1325 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1326 return LT.first * Entry->Cost; 1327 1328 static const CostTblEntry SSE41ShuffleTbl[] = { 1329 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw 1330 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1331 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw 1332 {TTI::SK_Select, MVT::v4f32, 1}, // blendps 1333 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw 1334 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb 1335 }; 1336 1337 if (ST->hasSSE41()) 1338 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1339 return LT.first * Entry->Cost; 1340 1341 static const CostTblEntry SSSE3ShuffleTbl[] = { 1342 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb 1343 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb 1344 1345 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb 1346 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb 1347 1348 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por 1349 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por 1350 1351 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb 1352 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1353 1354 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por 1355 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por 1356 }; 1357 1358 if (ST->hasSSSE3()) 1359 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1360 return LT.first * Entry->Cost; 1361 1362 static const CostTblEntry SSE2ShuffleTbl[] = { 1363 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd 1364 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd 1365 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd 1366 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd 1367 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd 1368 1369 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd 1370 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd 1371 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd 1372 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd 1373 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw 1374 // + 2*pshufd + 2*unpck + packus 1375 1376 {TTI::SK_Select, MVT::v2i64, 1}, // movsd 1377 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1378 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps 1379 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por 1380 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por 1381 1382 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd 1383 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd 1384 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd 1385 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw 1386 // + pshufd/unpck 1387 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1388 // + 2*pshufd + 2*unpck + 2*packus 1389 1390 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1391 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1392 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1393 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1394 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1395 }; 1396 1397 if (ST->hasSSE2()) 1398 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1399 return LT.first * Entry->Cost; 1400 1401 static const CostTblEntry SSE1ShuffleTbl[] = { 1402 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1403 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1404 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1405 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1406 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1407 }; 1408 1409 if (ST->hasSSE1()) 1410 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1411 return LT.first * Entry->Cost; 1412 1413 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); 1414 } 1415 1416 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, 1417 Type *Src, 1418 TTI::CastContextHint CCH, 1419 TTI::TargetCostKind CostKind, 1420 const Instruction *I) { 1421 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1422 assert(ISD && "Invalid opcode"); 1423 1424 // TODO: Allow non-throughput costs that aren't binary. 1425 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { 1426 if (CostKind != TTI::TCK_RecipThroughput) 1427 return Cost == 0 ? 0 : 1; 1428 return Cost; 1429 }; 1430 1431 // FIXME: Need a better design of the cost table to handle non-simple types of 1432 // potential massive combinations (elem_num x src_type x dst_type). 1433 1434 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { 1435 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1436 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1437 1438 // Mask sign extend has an instruction. 1439 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1440 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1441 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1442 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1443 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1444 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1445 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1446 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1447 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1448 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, 1449 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, 1450 1451 // Mask zero extend is a sext + shift. 1452 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1453 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1454 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1455 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1456 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1457 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1458 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1459 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1460 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1461 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, 1462 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, 1463 1464 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, 1465 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm 1466 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm 1467 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm 1468 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm 1469 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm 1470 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm 1471 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm 1472 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm 1473 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm 1474 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm 1475 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, 1476 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, 1477 }; 1478 1479 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1480 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1481 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1482 1483 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1484 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1485 1486 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1487 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1488 1489 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1490 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1491 }; 1492 1493 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1494 // 256-bit wide vectors. 1495 1496 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1497 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1498 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1499 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1500 1501 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1502 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1503 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1504 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd 1505 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1506 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1507 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1508 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd 1509 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd 1510 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd 1511 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd 1512 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd 1513 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq 1514 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq 1515 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq 1516 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, 1517 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, 1518 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, 1519 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, 1520 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 1521 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd 1522 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb 1523 1524 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 1525 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, 1526 1527 // Sign extend is zmm vpternlogd+vptruncdb. 1528 // Zero extend is zmm broadcast load+vptruncdw. 1529 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, 1530 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, 1531 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, 1532 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, 1533 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, 1534 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, 1535 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, 1536 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, 1537 1538 // Sign extend is zmm vpternlogd+vptruncdw. 1539 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. 1540 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, 1541 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1542 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, 1543 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1544 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, 1545 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1546 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, 1547 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1548 1549 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd 1550 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld 1551 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd 1552 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld 1553 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd 1554 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld 1555 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq 1556 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq 1557 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq 1558 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq 1559 1560 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd 1561 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld 1562 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq 1563 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq 1564 1565 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1566 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1567 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1568 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1569 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1570 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1571 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1572 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1573 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1574 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1575 1576 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1577 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1578 1579 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1580 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1581 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1582 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1583 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1584 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1585 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1586 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1587 1588 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1589 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1590 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1591 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1592 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1593 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1594 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1595 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1596 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1597 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1598 1599 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 }, 1600 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, 1601 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 }, 1602 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 }, 1603 1604 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1605 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, 1606 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, 1607 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1608 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, 1609 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, 1610 }; 1611 1612 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { 1613 // Mask sign extend has an instruction. 1614 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1615 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1616 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1617 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1618 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1619 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1620 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1621 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1622 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1623 1624 // Mask zero extend is a sext + shift. 1625 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1626 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1627 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1628 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1629 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1630 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1631 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1632 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1633 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1634 1635 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, 1636 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb 1637 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw 1638 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb 1639 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw 1640 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb 1641 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw 1642 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb 1643 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw 1644 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb 1645 }; 1646 1647 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { 1648 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1649 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1650 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1651 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1652 1653 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1654 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1655 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1656 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1657 1658 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 1659 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1660 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1661 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1662 1663 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 1664 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1665 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1666 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1667 }; 1668 1669 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { 1670 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1671 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1672 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1673 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 1674 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1675 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1676 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1677 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 1678 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd 1679 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd 1680 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd 1681 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq 1682 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq 1683 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd 1684 1685 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb 1686 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb 1687 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, 1688 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, 1689 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, 1690 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, 1691 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, 1692 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, 1693 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, 1694 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, 1695 1696 // sign extend is vpcmpeq+maskedmove+vpmovdw 1697 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw 1698 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1699 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, 1700 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1701 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, 1702 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1703 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, 1704 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, 1705 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, 1706 1707 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd 1708 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld 1709 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd 1710 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld 1711 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd 1712 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld 1713 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq 1714 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq 1715 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq 1716 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq 1717 1718 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1719 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1720 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1721 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1722 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1723 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1724 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1725 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1726 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1727 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1728 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1729 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1730 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1731 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 1732 1733 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, 1734 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 1735 1736 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 }, 1737 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 }, 1738 1739 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, 1740 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, 1741 1742 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1743 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1744 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 }, 1745 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 1746 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1747 }; 1748 1749 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1750 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1751 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1752 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1753 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1754 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 }, 1755 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 }, 1756 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 }, 1757 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 }, 1758 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1759 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1760 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1761 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1762 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 }, 1763 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 }, 1764 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1765 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1766 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1767 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1768 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1769 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1770 1771 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1772 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1773 1774 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1775 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1776 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1777 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1778 1779 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1780 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1781 1782 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1783 }; 1784 1785 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1786 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1787 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1788 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1789 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1790 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1791 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1792 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1793 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1794 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1795 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1796 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1797 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1798 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 }, 1799 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1800 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1801 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1802 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1803 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1804 1805 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, 1806 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, 1807 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, 1808 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, 1809 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, 1810 1811 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1812 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1813 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1814 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1815 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1816 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1817 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 }, 1818 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 }, 1819 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, 1820 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 }, 1821 1822 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1823 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1824 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1825 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1826 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1827 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1828 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1829 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1830 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1831 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1832 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1833 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1834 1835 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1836 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1837 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1838 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1839 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1840 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1841 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1842 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1843 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1844 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1845 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1846 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1847 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1848 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1849 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 }, 1850 // The generic code to compute the scalar overhead is currently broken. 1851 // Workaround this limitation by estimating the scalarization overhead 1852 // here. We have roughly 10 instructions per scalar element. 1853 // Multiply that by the vector width. 1854 // FIXME: remove that when PR19268 is fixed. 1855 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1856 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1857 1858 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 }, 1859 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 }, 1860 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 }, 1861 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 }, 1862 1863 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 }, 1864 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 }, 1865 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 }, 1866 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 }, 1867 // This node is expanded into scalarized operations but BasicTTI is overly 1868 // optimistic estimating its cost. It computes 3 per element (one 1869 // vector-extract, one scalar conversion and one vector-insert). The 1870 // problem is that the inserts form a read-modify-write chain so latency 1871 // should be factored in too. Inflating the cost per element by 1. 1872 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1873 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1874 1875 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1876 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1877 }; 1878 1879 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1880 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1881 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1882 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1883 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1884 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1885 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1886 1887 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1888 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1889 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1890 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1891 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1892 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1893 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1894 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1895 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1896 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1897 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1898 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1899 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1900 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1901 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1902 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1903 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1904 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1905 1906 // These truncates end up widening elements. 1907 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ 1908 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ 1909 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD 1910 1911 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 }, 1912 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 }, 1913 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1914 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1915 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1916 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1917 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1918 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1919 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB 1920 1921 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, 1922 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 1923 1924 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 }, 1925 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 }, 1926 1927 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 }, 1928 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 }, 1929 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 1930 }; 1931 1932 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1933 // These are somewhat magic numbers justified by looking at the output of 1934 // Intel's IACA, running some kernels and making sure when we take 1935 // legalization into account the throughput will be overestimated. 1936 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1937 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1938 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1939 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1940 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1941 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 }, 1942 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 }, 1943 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1944 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1945 1946 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1947 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1948 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1949 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1950 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1951 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1952 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 }, 1953 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1954 1955 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 }, 1956 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 }, 1957 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 1958 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 1959 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 1960 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 }, 1961 1962 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 }, 1963 1964 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 }, 1965 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 }, 1966 1967 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 1968 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, 1969 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 }, 1970 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 }, 1971 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 1972 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 }, 1973 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 1974 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 }, 1975 1976 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1977 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1978 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1979 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1980 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1981 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1982 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1983 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1984 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1985 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1986 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1987 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1988 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1989 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1990 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1991 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1992 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1993 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1994 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1995 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1996 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1997 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1998 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1999 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 2000 2001 // These truncates are really widening elements. 2002 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD 2003 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ 2004 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD 2005 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD 2006 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD 2007 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW 2008 2009 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB 2010 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB 2011 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB 2012 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 2013 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB 2014 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, 2015 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 2016 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 2017 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 2018 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 2019 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 2020 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 2021 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB 2022 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW 2023 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD 2024 }; 2025 2026 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 2027 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 2028 2029 if (ST->hasSSE2() && !ST->hasAVX()) { 2030 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2031 LTDest.second, LTSrc.second)) 2032 return AdjustCost(LTSrc.first * Entry->Cost); 2033 } 2034 2035 EVT SrcTy = TLI->getValueType(DL, Src); 2036 EVT DstTy = TLI->getValueType(DL, Dst); 2037 2038 // The function getSimpleVT only handles simple value types. 2039 if (!SrcTy.isSimple() || !DstTy.isSimple()) 2040 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind)); 2041 2042 MVT SimpleSrcTy = SrcTy.getSimpleVT(); 2043 MVT SimpleDstTy = DstTy.getSimpleVT(); 2044 2045 if (ST->useAVX512Regs()) { 2046 if (ST->hasBWI()) 2047 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD, 2048 SimpleDstTy, SimpleSrcTy)) 2049 return AdjustCost(Entry->Cost); 2050 2051 if (ST->hasDQI()) 2052 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 2053 SimpleDstTy, SimpleSrcTy)) 2054 return AdjustCost(Entry->Cost); 2055 2056 if (ST->hasAVX512()) 2057 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 2058 SimpleDstTy, SimpleSrcTy)) 2059 return AdjustCost(Entry->Cost); 2060 } 2061 2062 if (ST->hasBWI()) 2063 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, 2064 SimpleDstTy, SimpleSrcTy)) 2065 return AdjustCost(Entry->Cost); 2066 2067 if (ST->hasDQI()) 2068 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, 2069 SimpleDstTy, SimpleSrcTy)) 2070 return AdjustCost(Entry->Cost); 2071 2072 if (ST->hasAVX512()) 2073 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2074 SimpleDstTy, SimpleSrcTy)) 2075 return AdjustCost(Entry->Cost); 2076 2077 if (ST->hasAVX2()) { 2078 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2079 SimpleDstTy, SimpleSrcTy)) 2080 return AdjustCost(Entry->Cost); 2081 } 2082 2083 if (ST->hasAVX()) { 2084 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2085 SimpleDstTy, SimpleSrcTy)) 2086 return AdjustCost(Entry->Cost); 2087 } 2088 2089 if (ST->hasSSE41()) { 2090 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2091 SimpleDstTy, SimpleSrcTy)) 2092 return AdjustCost(Entry->Cost); 2093 } 2094 2095 if (ST->hasSSE2()) { 2096 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2097 SimpleDstTy, SimpleSrcTy)) 2098 return AdjustCost(Entry->Cost); 2099 } 2100 2101 return AdjustCost( 2102 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); 2103 } 2104 2105 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 2106 Type *CondTy, 2107 CmpInst::Predicate VecPred, 2108 TTI::TargetCostKind CostKind, 2109 const Instruction *I) { 2110 // TODO: Handle other cost kinds. 2111 if (CostKind != TTI::TCK_RecipThroughput) 2112 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, 2113 I); 2114 2115 // Legalize the type. 2116 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2117 2118 MVT MTy = LT.second; 2119 2120 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2121 assert(ISD && "Invalid opcode"); 2122 2123 unsigned ExtraCost = 0; 2124 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) { 2125 // Some vector comparison predicates cost extra instructions. 2126 if (MTy.isVector() && 2127 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || 2128 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || 2129 ST->hasBWI())) { 2130 switch (cast<CmpInst>(I)->getPredicate()) { 2131 case CmpInst::Predicate::ICMP_NE: 2132 // xor(cmpeq(x,y),-1) 2133 ExtraCost = 1; 2134 break; 2135 case CmpInst::Predicate::ICMP_SGE: 2136 case CmpInst::Predicate::ICMP_SLE: 2137 // xor(cmpgt(x,y),-1) 2138 ExtraCost = 1; 2139 break; 2140 case CmpInst::Predicate::ICMP_ULT: 2141 case CmpInst::Predicate::ICMP_UGT: 2142 // cmpgt(xor(x,signbit),xor(y,signbit)) 2143 // xor(cmpeq(pmaxu(x,y),x),-1) 2144 ExtraCost = 2; 2145 break; 2146 case CmpInst::Predicate::ICMP_ULE: 2147 case CmpInst::Predicate::ICMP_UGE: 2148 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || 2149 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { 2150 // cmpeq(psubus(x,y),0) 2151 // cmpeq(pminu(x,y),x) 2152 ExtraCost = 1; 2153 } else { 2154 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) 2155 ExtraCost = 3; 2156 } 2157 break; 2158 default: 2159 break; 2160 } 2161 } 2162 } 2163 2164 static const CostTblEntry SLMCostTbl[] = { 2165 // slm pcmpeq/pcmpgt throughput is 2 2166 { ISD::SETCC, MVT::v2i64, 2 }, 2167 }; 2168 2169 static const CostTblEntry AVX512BWCostTbl[] = { 2170 { ISD::SETCC, MVT::v32i16, 1 }, 2171 { ISD::SETCC, MVT::v64i8, 1 }, 2172 2173 { ISD::SELECT, MVT::v32i16, 1 }, 2174 { ISD::SELECT, MVT::v64i8, 1 }, 2175 }; 2176 2177 static const CostTblEntry AVX512CostTbl[] = { 2178 { ISD::SETCC, MVT::v8i64, 1 }, 2179 { ISD::SETCC, MVT::v16i32, 1 }, 2180 { ISD::SETCC, MVT::v8f64, 1 }, 2181 { ISD::SETCC, MVT::v16f32, 1 }, 2182 2183 { ISD::SELECT, MVT::v8i64, 1 }, 2184 { ISD::SELECT, MVT::v16i32, 1 }, 2185 { ISD::SELECT, MVT::v8f64, 1 }, 2186 { ISD::SELECT, MVT::v16f32, 1 }, 2187 2188 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 2189 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 2190 2191 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3 2192 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3 2193 }; 2194 2195 static const CostTblEntry AVX2CostTbl[] = { 2196 { ISD::SETCC, MVT::v4i64, 1 }, 2197 { ISD::SETCC, MVT::v8i32, 1 }, 2198 { ISD::SETCC, MVT::v16i16, 1 }, 2199 { ISD::SETCC, MVT::v32i8, 1 }, 2200 2201 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb 2202 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb 2203 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb 2204 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb 2205 }; 2206 2207 static const CostTblEntry AVX1CostTbl[] = { 2208 { ISD::SETCC, MVT::v4f64, 1 }, 2209 { ISD::SETCC, MVT::v8f32, 1 }, 2210 // AVX1 does not support 8-wide integer compare. 2211 { ISD::SETCC, MVT::v4i64, 4 }, 2212 { ISD::SETCC, MVT::v8i32, 4 }, 2213 { ISD::SETCC, MVT::v16i16, 4 }, 2214 { ISD::SETCC, MVT::v32i8, 4 }, 2215 2216 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd 2217 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps 2218 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd 2219 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps 2220 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps 2221 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps 2222 }; 2223 2224 static const CostTblEntry SSE42CostTbl[] = { 2225 { ISD::SETCC, MVT::v2f64, 1 }, 2226 { ISD::SETCC, MVT::v4f32, 1 }, 2227 { ISD::SETCC, MVT::v2i64, 1 }, 2228 }; 2229 2230 static const CostTblEntry SSE41CostTbl[] = { 2231 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd 2232 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps 2233 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb 2234 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb 2235 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb 2236 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb 2237 }; 2238 2239 static const CostTblEntry SSE2CostTbl[] = { 2240 { ISD::SETCC, MVT::v2f64, 2 }, 2241 { ISD::SETCC, MVT::f64, 1 }, 2242 { ISD::SETCC, MVT::v2i64, 8 }, 2243 { ISD::SETCC, MVT::v4i32, 1 }, 2244 { ISD::SETCC, MVT::v8i16, 1 }, 2245 { ISD::SETCC, MVT::v16i8, 1 }, 2246 2247 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd 2248 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por 2249 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por 2250 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por 2251 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por 2252 }; 2253 2254 static const CostTblEntry SSE1CostTbl[] = { 2255 { ISD::SETCC, MVT::v4f32, 2 }, 2256 { ISD::SETCC, MVT::f32, 1 }, 2257 2258 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps 2259 }; 2260 2261 if (ST->isSLM()) 2262 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2263 return LT.first * (ExtraCost + Entry->Cost); 2264 2265 if (ST->hasBWI()) 2266 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2267 return LT.first * (ExtraCost + Entry->Cost); 2268 2269 if (ST->hasAVX512()) 2270 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2271 return LT.first * (ExtraCost + Entry->Cost); 2272 2273 if (ST->hasAVX2()) 2274 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2275 return LT.first * (ExtraCost + Entry->Cost); 2276 2277 if (ST->hasAVX()) 2278 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2279 return LT.first * (ExtraCost + Entry->Cost); 2280 2281 if (ST->hasSSE42()) 2282 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2283 return LT.first * (ExtraCost + Entry->Cost); 2284 2285 if (ST->hasSSE41()) 2286 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2287 return LT.first * (ExtraCost + Entry->Cost); 2288 2289 if (ST->hasSSE2()) 2290 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2291 return LT.first * (ExtraCost + Entry->Cost); 2292 2293 if (ST->hasSSE1()) 2294 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2295 return LT.first * (ExtraCost + Entry->Cost); 2296 2297 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); 2298 } 2299 2300 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 2301 2302 InstructionCost 2303 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2304 TTI::TargetCostKind CostKind) { 2305 2306 // Costs should match the codegen from: 2307 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 2308 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 2309 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 2310 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 2311 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 2312 2313 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not 2314 // specialized in these tables yet. 2315 static const CostTblEntry AVX512CDCostTbl[] = { 2316 { ISD::CTLZ, MVT::v8i64, 1 }, 2317 { ISD::CTLZ, MVT::v16i32, 1 }, 2318 { ISD::CTLZ, MVT::v32i16, 8 }, 2319 { ISD::CTLZ, MVT::v64i8, 20 }, 2320 { ISD::CTLZ, MVT::v4i64, 1 }, 2321 { ISD::CTLZ, MVT::v8i32, 1 }, 2322 { ISD::CTLZ, MVT::v16i16, 4 }, 2323 { ISD::CTLZ, MVT::v32i8, 10 }, 2324 { ISD::CTLZ, MVT::v2i64, 1 }, 2325 { ISD::CTLZ, MVT::v4i32, 1 }, 2326 { ISD::CTLZ, MVT::v8i16, 4 }, 2327 { ISD::CTLZ, MVT::v16i8, 4 }, 2328 }; 2329 static const CostTblEntry AVX512BWCostTbl[] = { 2330 { ISD::ABS, MVT::v32i16, 1 }, 2331 { ISD::ABS, MVT::v64i8, 1 }, 2332 { ISD::BITREVERSE, MVT::v8i64, 5 }, 2333 { ISD::BITREVERSE, MVT::v16i32, 5 }, 2334 { ISD::BITREVERSE, MVT::v32i16, 5 }, 2335 { ISD::BITREVERSE, MVT::v64i8, 5 }, 2336 { ISD::CTLZ, MVT::v8i64, 23 }, 2337 { ISD::CTLZ, MVT::v16i32, 22 }, 2338 { ISD::CTLZ, MVT::v32i16, 18 }, 2339 { ISD::CTLZ, MVT::v64i8, 17 }, 2340 { ISD::CTPOP, MVT::v8i64, 7 }, 2341 { ISD::CTPOP, MVT::v16i32, 11 }, 2342 { ISD::CTPOP, MVT::v32i16, 9 }, 2343 { ISD::CTPOP, MVT::v64i8, 6 }, 2344 { ISD::CTTZ, MVT::v8i64, 10 }, 2345 { ISD::CTTZ, MVT::v16i32, 14 }, 2346 { ISD::CTTZ, MVT::v32i16, 12 }, 2347 { ISD::CTTZ, MVT::v64i8, 9 }, 2348 { ISD::SADDSAT, MVT::v32i16, 1 }, 2349 { ISD::SADDSAT, MVT::v64i8, 1 }, 2350 { ISD::SMAX, MVT::v32i16, 1 }, 2351 { ISD::SMAX, MVT::v64i8, 1 }, 2352 { ISD::SMIN, MVT::v32i16, 1 }, 2353 { ISD::SMIN, MVT::v64i8, 1 }, 2354 { ISD::SSUBSAT, MVT::v32i16, 1 }, 2355 { ISD::SSUBSAT, MVT::v64i8, 1 }, 2356 { ISD::UADDSAT, MVT::v32i16, 1 }, 2357 { ISD::UADDSAT, MVT::v64i8, 1 }, 2358 { ISD::UMAX, MVT::v32i16, 1 }, 2359 { ISD::UMAX, MVT::v64i8, 1 }, 2360 { ISD::UMIN, MVT::v32i16, 1 }, 2361 { ISD::UMIN, MVT::v64i8, 1 }, 2362 { ISD::USUBSAT, MVT::v32i16, 1 }, 2363 { ISD::USUBSAT, MVT::v64i8, 1 }, 2364 }; 2365 static const CostTblEntry AVX512CostTbl[] = { 2366 { ISD::ABS, MVT::v8i64, 1 }, 2367 { ISD::ABS, MVT::v16i32, 1 }, 2368 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split 2369 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split 2370 { ISD::ABS, MVT::v4i64, 1 }, 2371 { ISD::ABS, MVT::v2i64, 1 }, 2372 { ISD::BITREVERSE, MVT::v8i64, 36 }, 2373 { ISD::BITREVERSE, MVT::v16i32, 24 }, 2374 { ISD::BITREVERSE, MVT::v32i16, 10 }, 2375 { ISD::BITREVERSE, MVT::v64i8, 10 }, 2376 { ISD::CTLZ, MVT::v8i64, 29 }, 2377 { ISD::CTLZ, MVT::v16i32, 35 }, 2378 { ISD::CTLZ, MVT::v32i16, 28 }, 2379 { ISD::CTLZ, MVT::v64i8, 18 }, 2380 { ISD::CTPOP, MVT::v8i64, 16 }, 2381 { ISD::CTPOP, MVT::v16i32, 24 }, 2382 { ISD::CTPOP, MVT::v32i16, 18 }, 2383 { ISD::CTPOP, MVT::v64i8, 12 }, 2384 { ISD::CTTZ, MVT::v8i64, 20 }, 2385 { ISD::CTTZ, MVT::v16i32, 28 }, 2386 { ISD::CTTZ, MVT::v32i16, 24 }, 2387 { ISD::CTTZ, MVT::v64i8, 18 }, 2388 { ISD::SMAX, MVT::v8i64, 1 }, 2389 { ISD::SMAX, MVT::v16i32, 1 }, 2390 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split 2391 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split 2392 { ISD::SMAX, MVT::v4i64, 1 }, 2393 { ISD::SMAX, MVT::v2i64, 1 }, 2394 { ISD::SMIN, MVT::v8i64, 1 }, 2395 { ISD::SMIN, MVT::v16i32, 1 }, 2396 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split 2397 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split 2398 { ISD::SMIN, MVT::v4i64, 1 }, 2399 { ISD::SMIN, MVT::v2i64, 1 }, 2400 { ISD::UMAX, MVT::v8i64, 1 }, 2401 { ISD::UMAX, MVT::v16i32, 1 }, 2402 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split 2403 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split 2404 { ISD::UMAX, MVT::v4i64, 1 }, 2405 { ISD::UMAX, MVT::v2i64, 1 }, 2406 { ISD::UMIN, MVT::v8i64, 1 }, 2407 { ISD::UMIN, MVT::v16i32, 1 }, 2408 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split 2409 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split 2410 { ISD::UMIN, MVT::v4i64, 1 }, 2411 { ISD::UMIN, MVT::v2i64, 1 }, 2412 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd 2413 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq 2414 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq 2415 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq 2416 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd 2417 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq 2418 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq 2419 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq 2420 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split 2421 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split 2422 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split 2423 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split 2424 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split 2425 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split 2426 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split 2427 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split 2428 { ISD::FMAXNUM, MVT::f32, 2 }, 2429 { ISD::FMAXNUM, MVT::v4f32, 2 }, 2430 { ISD::FMAXNUM, MVT::v8f32, 2 }, 2431 { ISD::FMAXNUM, MVT::v16f32, 2 }, 2432 { ISD::FMAXNUM, MVT::f64, 2 }, 2433 { ISD::FMAXNUM, MVT::v2f64, 2 }, 2434 { ISD::FMAXNUM, MVT::v4f64, 2 }, 2435 { ISD::FMAXNUM, MVT::v8f64, 2 }, 2436 }; 2437 static const CostTblEntry XOPCostTbl[] = { 2438 { ISD::BITREVERSE, MVT::v4i64, 4 }, 2439 { ISD::BITREVERSE, MVT::v8i32, 4 }, 2440 { ISD::BITREVERSE, MVT::v16i16, 4 }, 2441 { ISD::BITREVERSE, MVT::v32i8, 4 }, 2442 { ISD::BITREVERSE, MVT::v2i64, 1 }, 2443 { ISD::BITREVERSE, MVT::v4i32, 1 }, 2444 { ISD::BITREVERSE, MVT::v8i16, 1 }, 2445 { ISD::BITREVERSE, MVT::v16i8, 1 }, 2446 { ISD::BITREVERSE, MVT::i64, 3 }, 2447 { ISD::BITREVERSE, MVT::i32, 3 }, 2448 { ISD::BITREVERSE, MVT::i16, 3 }, 2449 { ISD::BITREVERSE, MVT::i8, 3 } 2450 }; 2451 static const CostTblEntry AVX2CostTbl[] = { 2452 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2453 { ISD::ABS, MVT::v8i32, 1 }, 2454 { ISD::ABS, MVT::v16i16, 1 }, 2455 { ISD::ABS, MVT::v32i8, 1 }, 2456 { ISD::BITREVERSE, MVT::v4i64, 5 }, 2457 { ISD::BITREVERSE, MVT::v8i32, 5 }, 2458 { ISD::BITREVERSE, MVT::v16i16, 5 }, 2459 { ISD::BITREVERSE, MVT::v32i8, 5 }, 2460 { ISD::BSWAP, MVT::v4i64, 1 }, 2461 { ISD::BSWAP, MVT::v8i32, 1 }, 2462 { ISD::BSWAP, MVT::v16i16, 1 }, 2463 { ISD::CTLZ, MVT::v4i64, 23 }, 2464 { ISD::CTLZ, MVT::v8i32, 18 }, 2465 { ISD::CTLZ, MVT::v16i16, 14 }, 2466 { ISD::CTLZ, MVT::v32i8, 9 }, 2467 { ISD::CTPOP, MVT::v4i64, 7 }, 2468 { ISD::CTPOP, MVT::v8i32, 11 }, 2469 { ISD::CTPOP, MVT::v16i16, 9 }, 2470 { ISD::CTPOP, MVT::v32i8, 6 }, 2471 { ISD::CTTZ, MVT::v4i64, 10 }, 2472 { ISD::CTTZ, MVT::v8i32, 14 }, 2473 { ISD::CTTZ, MVT::v16i16, 12 }, 2474 { ISD::CTTZ, MVT::v32i8, 9 }, 2475 { ISD::SADDSAT, MVT::v16i16, 1 }, 2476 { ISD::SADDSAT, MVT::v32i8, 1 }, 2477 { ISD::SMAX, MVT::v8i32, 1 }, 2478 { ISD::SMAX, MVT::v16i16, 1 }, 2479 { ISD::SMAX, MVT::v32i8, 1 }, 2480 { ISD::SMIN, MVT::v8i32, 1 }, 2481 { ISD::SMIN, MVT::v16i16, 1 }, 2482 { ISD::SMIN, MVT::v32i8, 1 }, 2483 { ISD::SSUBSAT, MVT::v16i16, 1 }, 2484 { ISD::SSUBSAT, MVT::v32i8, 1 }, 2485 { ISD::UADDSAT, MVT::v16i16, 1 }, 2486 { ISD::UADDSAT, MVT::v32i8, 1 }, 2487 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd 2488 { ISD::UMAX, MVT::v8i32, 1 }, 2489 { ISD::UMAX, MVT::v16i16, 1 }, 2490 { ISD::UMAX, MVT::v32i8, 1 }, 2491 { ISD::UMIN, MVT::v8i32, 1 }, 2492 { ISD::UMIN, MVT::v16i16, 1 }, 2493 { ISD::UMIN, MVT::v32i8, 1 }, 2494 { ISD::USUBSAT, MVT::v16i16, 1 }, 2495 { ISD::USUBSAT, MVT::v32i8, 1 }, 2496 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd 2497 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2498 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2499 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 2500 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 2501 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 2502 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 2503 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 2504 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 2505 }; 2506 static const CostTblEntry AVX1CostTbl[] = { 2507 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X) 2508 { ISD::ABS, MVT::v8i32, 3 }, 2509 { ISD::ABS, MVT::v16i16, 3 }, 2510 { ISD::ABS, MVT::v32i8, 3 }, 2511 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 2512 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 2513 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 2514 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 2515 { ISD::BSWAP, MVT::v4i64, 4 }, 2516 { ISD::BSWAP, MVT::v8i32, 4 }, 2517 { ISD::BSWAP, MVT::v16i16, 4 }, 2518 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 2519 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 2520 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 2521 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2522 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 2523 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 2524 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 2525 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 2526 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 2527 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 2528 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 2529 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2530 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2531 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2532 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2533 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2534 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2535 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2536 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2537 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2538 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2539 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2540 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2541 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2542 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert 2543 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2544 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2545 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2546 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert 2547 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2548 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2549 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2550 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2551 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert 2552 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS 2553 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS 2554 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ? 2555 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD 2556 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD 2557 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ? 2558 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 2559 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 2560 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 2561 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 2562 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 2563 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 2564 }; 2565 static const CostTblEntry GLMCostTbl[] = { 2566 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 2567 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 2568 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 2569 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 2570 }; 2571 static const CostTblEntry SLMCostTbl[] = { 2572 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 2573 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 2574 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 2575 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 2576 }; 2577 static const CostTblEntry SSE42CostTbl[] = { 2578 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd 2579 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd 2580 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 2581 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 2582 }; 2583 static const CostTblEntry SSE41CostTbl[] = { 2584 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X) 2585 { ISD::SMAX, MVT::v4i32, 1 }, 2586 { ISD::SMAX, MVT::v16i8, 1 }, 2587 { ISD::SMIN, MVT::v4i32, 1 }, 2588 { ISD::SMIN, MVT::v16i8, 1 }, 2589 { ISD::UMAX, MVT::v4i32, 1 }, 2590 { ISD::UMAX, MVT::v8i16, 1 }, 2591 { ISD::UMIN, MVT::v4i32, 1 }, 2592 { ISD::UMIN, MVT::v8i16, 1 }, 2593 }; 2594 static const CostTblEntry SSSE3CostTbl[] = { 2595 { ISD::ABS, MVT::v4i32, 1 }, 2596 { ISD::ABS, MVT::v8i16, 1 }, 2597 { ISD::ABS, MVT::v16i8, 1 }, 2598 { ISD::BITREVERSE, MVT::v2i64, 5 }, 2599 { ISD::BITREVERSE, MVT::v4i32, 5 }, 2600 { ISD::BITREVERSE, MVT::v8i16, 5 }, 2601 { ISD::BITREVERSE, MVT::v16i8, 5 }, 2602 { ISD::BSWAP, MVT::v2i64, 1 }, 2603 { ISD::BSWAP, MVT::v4i32, 1 }, 2604 { ISD::BSWAP, MVT::v8i16, 1 }, 2605 { ISD::CTLZ, MVT::v2i64, 23 }, 2606 { ISD::CTLZ, MVT::v4i32, 18 }, 2607 { ISD::CTLZ, MVT::v8i16, 14 }, 2608 { ISD::CTLZ, MVT::v16i8, 9 }, 2609 { ISD::CTPOP, MVT::v2i64, 7 }, 2610 { ISD::CTPOP, MVT::v4i32, 11 }, 2611 { ISD::CTPOP, MVT::v8i16, 9 }, 2612 { ISD::CTPOP, MVT::v16i8, 6 }, 2613 { ISD::CTTZ, MVT::v2i64, 10 }, 2614 { ISD::CTTZ, MVT::v4i32, 14 }, 2615 { ISD::CTTZ, MVT::v8i16, 12 }, 2616 { ISD::CTTZ, MVT::v16i8, 9 } 2617 }; 2618 static const CostTblEntry SSE2CostTbl[] = { 2619 { ISD::ABS, MVT::v2i64, 4 }, 2620 { ISD::ABS, MVT::v4i32, 3 }, 2621 { ISD::ABS, MVT::v8i16, 2 }, 2622 { ISD::ABS, MVT::v16i8, 2 }, 2623 { ISD::BITREVERSE, MVT::v2i64, 29 }, 2624 { ISD::BITREVERSE, MVT::v4i32, 27 }, 2625 { ISD::BITREVERSE, MVT::v8i16, 27 }, 2626 { ISD::BITREVERSE, MVT::v16i8, 20 }, 2627 { ISD::BSWAP, MVT::v2i64, 7 }, 2628 { ISD::BSWAP, MVT::v4i32, 7 }, 2629 { ISD::BSWAP, MVT::v8i16, 7 }, 2630 { ISD::CTLZ, MVT::v2i64, 25 }, 2631 { ISD::CTLZ, MVT::v4i32, 26 }, 2632 { ISD::CTLZ, MVT::v8i16, 20 }, 2633 { ISD::CTLZ, MVT::v16i8, 17 }, 2634 { ISD::CTPOP, MVT::v2i64, 12 }, 2635 { ISD::CTPOP, MVT::v4i32, 15 }, 2636 { ISD::CTPOP, MVT::v8i16, 13 }, 2637 { ISD::CTPOP, MVT::v16i8, 10 }, 2638 { ISD::CTTZ, MVT::v2i64, 14 }, 2639 { ISD::CTTZ, MVT::v4i32, 18 }, 2640 { ISD::CTTZ, MVT::v8i16, 16 }, 2641 { ISD::CTTZ, MVT::v16i8, 13 }, 2642 { ISD::SADDSAT, MVT::v8i16, 1 }, 2643 { ISD::SADDSAT, MVT::v16i8, 1 }, 2644 { ISD::SMAX, MVT::v8i16, 1 }, 2645 { ISD::SMIN, MVT::v8i16, 1 }, 2646 { ISD::SSUBSAT, MVT::v8i16, 1 }, 2647 { ISD::SSUBSAT, MVT::v16i8, 1 }, 2648 { ISD::UADDSAT, MVT::v8i16, 1 }, 2649 { ISD::UADDSAT, MVT::v16i8, 1 }, 2650 { ISD::UMAX, MVT::v8i16, 2 }, 2651 { ISD::UMAX, MVT::v16i8, 1 }, 2652 { ISD::UMIN, MVT::v8i16, 2 }, 2653 { ISD::UMIN, MVT::v16i8, 1 }, 2654 { ISD::USUBSAT, MVT::v8i16, 1 }, 2655 { ISD::USUBSAT, MVT::v16i8, 1 }, 2656 { ISD::FMAXNUM, MVT::f64, 4 }, 2657 { ISD::FMAXNUM, MVT::v2f64, 4 }, 2658 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 2659 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 2660 }; 2661 static const CostTblEntry SSE1CostTbl[] = { 2662 { ISD::FMAXNUM, MVT::f32, 4 }, 2663 { ISD::FMAXNUM, MVT::v4f32, 4 }, 2664 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 2665 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 2666 }; 2667 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets 2668 { ISD::CTTZ, MVT::i64, 1 }, 2669 }; 2670 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets 2671 { ISD::CTTZ, MVT::i32, 1 }, 2672 { ISD::CTTZ, MVT::i16, 1 }, 2673 { ISD::CTTZ, MVT::i8, 1 }, 2674 }; 2675 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets 2676 { ISD::CTLZ, MVT::i64, 1 }, 2677 }; 2678 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets 2679 { ISD::CTLZ, MVT::i32, 1 }, 2680 { ISD::CTLZ, MVT::i16, 1 }, 2681 { ISD::CTLZ, MVT::i8, 1 }, 2682 }; 2683 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets 2684 { ISD::CTPOP, MVT::i64, 1 }, 2685 }; 2686 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets 2687 { ISD::CTPOP, MVT::i32, 1 }, 2688 { ISD::CTPOP, MVT::i16, 1 }, 2689 { ISD::CTPOP, MVT::i8, 1 }, 2690 }; 2691 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 2692 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV 2693 { ISD::BITREVERSE, MVT::i64, 14 }, 2694 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV 2695 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH 2696 { ISD::CTPOP, MVT::i64, 10 }, 2697 { ISD::SADDO, MVT::i64, 1 }, 2698 { ISD::UADDO, MVT::i64, 1 }, 2699 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto 2700 }; 2701 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 2702 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV 2703 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV 2704 { ISD::BITREVERSE, MVT::i32, 14 }, 2705 { ISD::BITREVERSE, MVT::i16, 14 }, 2706 { ISD::BITREVERSE, MVT::i8, 11 }, 2707 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV 2708 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV 2709 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV 2710 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH 2711 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH 2712 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH 2713 { ISD::CTPOP, MVT::i32, 8 }, 2714 { ISD::CTPOP, MVT::i16, 9 }, 2715 { ISD::CTPOP, MVT::i8, 7 }, 2716 { ISD::SADDO, MVT::i32, 1 }, 2717 { ISD::SADDO, MVT::i16, 1 }, 2718 { ISD::SADDO, MVT::i8, 1 }, 2719 { ISD::UADDO, MVT::i32, 1 }, 2720 { ISD::UADDO, MVT::i16, 1 }, 2721 { ISD::UADDO, MVT::i8, 1 }, 2722 { ISD::UMULO, MVT::i32, 2 }, // mul + seto 2723 { ISD::UMULO, MVT::i16, 2 }, 2724 { ISD::UMULO, MVT::i8, 2 }, 2725 }; 2726 2727 Type *RetTy = ICA.getReturnType(); 2728 Type *OpTy = RetTy; 2729 Intrinsic::ID IID = ICA.getID(); 2730 unsigned ISD = ISD::DELETED_NODE; 2731 switch (IID) { 2732 default: 2733 break; 2734 case Intrinsic::abs: 2735 ISD = ISD::ABS; 2736 break; 2737 case Intrinsic::bitreverse: 2738 ISD = ISD::BITREVERSE; 2739 break; 2740 case Intrinsic::bswap: 2741 ISD = ISD::BSWAP; 2742 break; 2743 case Intrinsic::ctlz: 2744 ISD = ISD::CTLZ; 2745 break; 2746 case Intrinsic::ctpop: 2747 ISD = ISD::CTPOP; 2748 break; 2749 case Intrinsic::cttz: 2750 ISD = ISD::CTTZ; 2751 break; 2752 case Intrinsic::maxnum: 2753 case Intrinsic::minnum: 2754 // FMINNUM has same costs so don't duplicate. 2755 ISD = ISD::FMAXNUM; 2756 break; 2757 case Intrinsic::sadd_sat: 2758 ISD = ISD::SADDSAT; 2759 break; 2760 case Intrinsic::smax: 2761 ISD = ISD::SMAX; 2762 break; 2763 case Intrinsic::smin: 2764 ISD = ISD::SMIN; 2765 break; 2766 case Intrinsic::ssub_sat: 2767 ISD = ISD::SSUBSAT; 2768 break; 2769 case Intrinsic::uadd_sat: 2770 ISD = ISD::UADDSAT; 2771 break; 2772 case Intrinsic::umax: 2773 ISD = ISD::UMAX; 2774 break; 2775 case Intrinsic::umin: 2776 ISD = ISD::UMIN; 2777 break; 2778 case Intrinsic::usub_sat: 2779 ISD = ISD::USUBSAT; 2780 break; 2781 case Intrinsic::sqrt: 2782 ISD = ISD::FSQRT; 2783 break; 2784 case Intrinsic::sadd_with_overflow: 2785 case Intrinsic::ssub_with_overflow: 2786 // SSUBO has same costs so don't duplicate. 2787 ISD = ISD::SADDO; 2788 OpTy = RetTy->getContainedType(0); 2789 break; 2790 case Intrinsic::uadd_with_overflow: 2791 case Intrinsic::usub_with_overflow: 2792 // USUBO has same costs so don't duplicate. 2793 ISD = ISD::UADDO; 2794 OpTy = RetTy->getContainedType(0); 2795 break; 2796 case Intrinsic::umul_with_overflow: 2797 case Intrinsic::smul_with_overflow: 2798 // SMULO has same costs so don't duplicate. 2799 ISD = ISD::UMULO; 2800 OpTy = RetTy->getContainedType(0); 2801 break; 2802 } 2803 2804 if (ISD != ISD::DELETED_NODE) { 2805 // Legalize the type. 2806 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); 2807 MVT MTy = LT.second; 2808 2809 // Attempt to lookup cost. 2810 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() && 2811 MTy.isVector()) { 2812 // With PSHUFB the code is very similar for all types. If we have integer 2813 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types 2814 // we also need a PSHUFB. 2815 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2; 2816 2817 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB 2818 // instructions. We also need an extract and an insert. 2819 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) || 2820 (ST->hasBWI() && MTy.is512BitVector()))) 2821 Cost = Cost * 2 + 2; 2822 2823 return LT.first * Cost; 2824 } 2825 2826 auto adjustTableCost = [](const CostTblEntry &Entry, int LegalizationCost, 2827 FastMathFlags FMF) { 2828 // If there are no NANs to deal with, then these are reduced to a 2829 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we 2830 // assume is used in the non-fast case. 2831 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) { 2832 if (FMF.noNaNs()) 2833 return LegalizationCost * 1; 2834 } 2835 return LegalizationCost * (int)Entry.Cost; 2836 }; 2837 2838 if (ST->useGLMDivSqrtCosts()) 2839 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 2840 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2841 2842 if (ST->isSLM()) 2843 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2844 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2845 2846 if (ST->hasCDI()) 2847 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 2848 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2849 2850 if (ST->hasBWI()) 2851 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2852 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2853 2854 if (ST->hasAVX512()) 2855 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2856 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2857 2858 if (ST->hasXOP()) 2859 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 2860 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2861 2862 if (ST->hasAVX2()) 2863 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2864 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2865 2866 if (ST->hasAVX()) 2867 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2868 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2869 2870 if (ST->hasSSE42()) 2871 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2872 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2873 2874 if (ST->hasSSE41()) 2875 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2876 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2877 2878 if (ST->hasSSSE3()) 2879 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 2880 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2881 2882 if (ST->hasSSE2()) 2883 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2884 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2885 2886 if (ST->hasSSE1()) 2887 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2888 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2889 2890 if (ST->hasBMI()) { 2891 if (ST->is64Bit()) 2892 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) 2893 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2894 2895 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) 2896 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2897 } 2898 2899 if (ST->hasLZCNT()) { 2900 if (ST->is64Bit()) 2901 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) 2902 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2903 2904 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) 2905 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2906 } 2907 2908 if (ST->hasPOPCNT()) { 2909 if (ST->is64Bit()) 2910 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) 2911 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2912 2913 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) 2914 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2915 } 2916 2917 // TODO - add BMI (TZCNT) scalar handling 2918 2919 if (ST->is64Bit()) 2920 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 2921 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2922 2923 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 2924 return adjustTableCost(*Entry, LT.first, ICA.getFlags()); 2925 } 2926 2927 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 2928 } 2929 2930 InstructionCost 2931 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2932 TTI::TargetCostKind CostKind) { 2933 if (ICA.isTypeBasedOnly()) 2934 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 2935 2936 static const CostTblEntry AVX512CostTbl[] = { 2937 { ISD::ROTL, MVT::v8i64, 1 }, 2938 { ISD::ROTL, MVT::v4i64, 1 }, 2939 { ISD::ROTL, MVT::v2i64, 1 }, 2940 { ISD::ROTL, MVT::v16i32, 1 }, 2941 { ISD::ROTL, MVT::v8i32, 1 }, 2942 { ISD::ROTL, MVT::v4i32, 1 }, 2943 { ISD::ROTR, MVT::v8i64, 1 }, 2944 { ISD::ROTR, MVT::v4i64, 1 }, 2945 { ISD::ROTR, MVT::v2i64, 1 }, 2946 { ISD::ROTR, MVT::v16i32, 1 }, 2947 { ISD::ROTR, MVT::v8i32, 1 }, 2948 { ISD::ROTR, MVT::v4i32, 1 } 2949 }; 2950 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) 2951 static const CostTblEntry XOPCostTbl[] = { 2952 { ISD::ROTL, MVT::v4i64, 4 }, 2953 { ISD::ROTL, MVT::v8i32, 4 }, 2954 { ISD::ROTL, MVT::v16i16, 4 }, 2955 { ISD::ROTL, MVT::v32i8, 4 }, 2956 { ISD::ROTL, MVT::v2i64, 1 }, 2957 { ISD::ROTL, MVT::v4i32, 1 }, 2958 { ISD::ROTL, MVT::v8i16, 1 }, 2959 { ISD::ROTL, MVT::v16i8, 1 }, 2960 { ISD::ROTR, MVT::v4i64, 6 }, 2961 { ISD::ROTR, MVT::v8i32, 6 }, 2962 { ISD::ROTR, MVT::v16i16, 6 }, 2963 { ISD::ROTR, MVT::v32i8, 6 }, 2964 { ISD::ROTR, MVT::v2i64, 2 }, 2965 { ISD::ROTR, MVT::v4i32, 2 }, 2966 { ISD::ROTR, MVT::v8i16, 2 }, 2967 { ISD::ROTR, MVT::v16i8, 2 } 2968 }; 2969 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 2970 { ISD::ROTL, MVT::i64, 1 }, 2971 { ISD::ROTR, MVT::i64, 1 }, 2972 { ISD::FSHL, MVT::i64, 4 } 2973 }; 2974 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 2975 { ISD::ROTL, MVT::i32, 1 }, 2976 { ISD::ROTL, MVT::i16, 1 }, 2977 { ISD::ROTL, MVT::i8, 1 }, 2978 { ISD::ROTR, MVT::i32, 1 }, 2979 { ISD::ROTR, MVT::i16, 1 }, 2980 { ISD::ROTR, MVT::i8, 1 }, 2981 { ISD::FSHL, MVT::i32, 4 }, 2982 { ISD::FSHL, MVT::i16, 4 }, 2983 { ISD::FSHL, MVT::i8, 4 } 2984 }; 2985 2986 Intrinsic::ID IID = ICA.getID(); 2987 Type *RetTy = ICA.getReturnType(); 2988 const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); 2989 unsigned ISD = ISD::DELETED_NODE; 2990 switch (IID) { 2991 default: 2992 break; 2993 case Intrinsic::fshl: 2994 ISD = ISD::FSHL; 2995 if (Args[0] == Args[1]) 2996 ISD = ISD::ROTL; 2997 break; 2998 case Intrinsic::fshr: 2999 // FSHR has same costs so don't duplicate. 3000 ISD = ISD::FSHL; 3001 if (Args[0] == Args[1]) 3002 ISD = ISD::ROTR; 3003 break; 3004 } 3005 3006 if (ISD != ISD::DELETED_NODE) { 3007 // Legalize the type. 3008 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 3009 MVT MTy = LT.second; 3010 3011 // Attempt to lookup cost. 3012 if (ST->hasAVX512()) 3013 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3014 return LT.first * Entry->Cost; 3015 3016 if (ST->hasXOP()) 3017 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 3018 return LT.first * Entry->Cost; 3019 3020 if (ST->is64Bit()) 3021 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 3022 return LT.first * Entry->Cost; 3023 3024 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 3025 return LT.first * Entry->Cost; 3026 } 3027 3028 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 3029 } 3030 3031 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, 3032 unsigned Index) { 3033 static const CostTblEntry SLMCostTbl[] = { 3034 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, 3035 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, 3036 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, 3037 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } 3038 }; 3039 3040 assert(Val->isVectorTy() && "This must be a vector type"); 3041 Type *ScalarType = Val->getScalarType(); 3042 int RegisterFileMoveCost = 0; 3043 3044 if (Index != -1U && (Opcode == Instruction::ExtractElement || 3045 Opcode == Instruction::InsertElement)) { 3046 // Legalize the type. 3047 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 3048 3049 // This type is legalized to a scalar type. 3050 if (!LT.second.isVector()) 3051 return 0; 3052 3053 // The type may be split. Normalize the index to the new type. 3054 unsigned NumElts = LT.second.getVectorNumElements(); 3055 unsigned SubNumElts = NumElts; 3056 Index = Index % NumElts; 3057 3058 // For >128-bit vectors, we need to extract higher 128-bit subvectors. 3059 // For inserts, we also need to insert the subvector back. 3060 if (LT.second.getSizeInBits() > 128) { 3061 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"); 3062 unsigned NumSubVecs = LT.second.getSizeInBits() / 128; 3063 SubNumElts = NumElts / NumSubVecs; 3064 if (SubNumElts <= Index) { 3065 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1); 3066 Index %= SubNumElts; 3067 } 3068 } 3069 3070 if (Index == 0) { 3071 // Floating point scalars are already located in index #0. 3072 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume 3073 // true for all. 3074 if (ScalarType->isFloatingPointTy()) 3075 return RegisterFileMoveCost; 3076 3077 // Assume movd/movq XMM -> GPR is relatively cheap on all targets. 3078 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) 3079 return 1 + RegisterFileMoveCost; 3080 } 3081 3082 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3083 assert(ISD && "Unexpected vector opcode"); 3084 MVT MScalarTy = LT.second.getScalarType(); 3085 if (ST->isSLM()) 3086 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) 3087 return Entry->Cost + RegisterFileMoveCost; 3088 3089 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. 3090 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3091 (MScalarTy.isInteger() && ST->hasSSE41())) 3092 return 1 + RegisterFileMoveCost; 3093 3094 // Assume insertps is relatively cheap on all targets. 3095 if (MScalarTy == MVT::f32 && ST->hasSSE41() && 3096 Opcode == Instruction::InsertElement) 3097 return 1 + RegisterFileMoveCost; 3098 3099 // For extractions we just need to shuffle the element to index 0, which 3100 // should be very cheap (assume cost = 1). For insertions we need to shuffle 3101 // the elements to its destination. In both cases we must handle the 3102 // subvector move(s). 3103 // If the vector type is already less than 128-bits then don't reduce it. 3104 // TODO: Under what circumstances should we shuffle using the full width? 3105 InstructionCost ShuffleCost = 1; 3106 if (Opcode == Instruction::InsertElement) { 3107 auto *SubTy = cast<VectorType>(Val); 3108 EVT VT = TLI->getValueType(DL, Val); 3109 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) 3110 SubTy = FixedVectorType::get(ScalarType, SubNumElts); 3111 ShuffleCost = 3112 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy); 3113 } 3114 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; 3115 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; 3116 } 3117 3118 // Add to the base cost if we know that the extracted element of a vector is 3119 // destined to be moved to and used in the integer register file. 3120 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 3121 RegisterFileMoveCost += 1; 3122 3123 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 3124 } 3125 3126 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty, 3127 const APInt &DemandedElts, 3128 bool Insert, 3129 bool Extract) { 3130 InstructionCost Cost = 0; 3131 3132 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much 3133 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. 3134 if (Insert) { 3135 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3136 MVT MScalarTy = LT.second.getScalarType(); 3137 3138 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 3139 (MScalarTy.isInteger() && ST->hasSSE41()) || 3140 (MScalarTy == MVT::f32 && ST->hasSSE41())) { 3141 // For types we can insert directly, insertion into 128-bit sub vectors is 3142 // cheap, followed by a cheap chain of concatenations. 3143 if (LT.second.getSizeInBits() <= 128) { 3144 Cost += 3145 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); 3146 } else { 3147 // In each 128-lane, if at least one index is demanded but not all 3148 // indices are demanded and this 128-lane is not the first 128-lane of 3149 // the legalized-vector, then this 128-lane needs a extracti128; If in 3150 // each 128-lane, there is at least one demanded index, this 128-lane 3151 // needs a inserti128. 3152 3153 // The following cases will help you build a better understanding: 3154 // Assume we insert several elements into a v8i32 vector in avx2, 3155 // Case#1: inserting into 1th index needs vpinsrd + inserti128. 3156 // Case#2: inserting into 5th index needs extracti128 + vpinsrd + 3157 // inserti128. 3158 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128. 3159 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * LT.first; 3160 unsigned NumElts = LT.second.getVectorNumElements() * LT.first; 3161 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); 3162 unsigned Scale = NumElts / Num128Lanes; 3163 // We iterate each 128-lane, and check if we need a 3164 // extracti128/inserti128 for this 128-lane. 3165 for (unsigned I = 0; I < NumElts; I += Scale) { 3166 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale); 3167 APInt MaskedDE = Mask & WidenedDemandedElts; 3168 unsigned Population = MaskedDE.countPopulation(); 3169 Cost += (Population > 0 && Population != Scale && 3170 I % LT.second.getVectorNumElements() != 0); 3171 Cost += Population > 0; 3172 } 3173 Cost += DemandedElts.countPopulation(); 3174 3175 // For vXf32 cases, insertion into the 0'th index in each v4f32 3176 // 128-bit vector is free. 3177 // NOTE: This assumes legalization widens vXf32 vectors. 3178 if (MScalarTy == MVT::f32) 3179 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements(); 3180 i < e; i += 4) 3181 if (DemandedElts[i]) 3182 Cost--; 3183 } 3184 } else if (LT.second.isVector()) { 3185 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded 3186 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a 3187 // series of UNPCK followed by CONCAT_VECTORS - all of these can be 3188 // considered cheap. 3189 if (Ty->isIntOrIntVectorTy()) 3190 Cost += DemandedElts.countPopulation(); 3191 3192 // Get the smaller of the legalized or original pow2-extended number of 3193 // vector elements, which represents the number of unpacks we'll end up 3194 // performing. 3195 unsigned NumElts = LT.second.getVectorNumElements(); 3196 unsigned Pow2Elts = 3197 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements()); 3198 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; 3199 } 3200 } 3201 3202 // TODO: Use default extraction for now, but we should investigate extending this 3203 // to handle repeated subvector extraction. 3204 if (Extract) 3205 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); 3206 3207 return Cost; 3208 } 3209 3210 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 3211 MaybeAlign Alignment, 3212 unsigned AddressSpace, 3213 TTI::TargetCostKind CostKind, 3214 const Instruction *I) { 3215 // TODO: Handle other cost kinds. 3216 if (CostKind != TTI::TCK_RecipThroughput) { 3217 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { 3218 // Store instruction with index and scale costs 2 Uops. 3219 // Check the preceding GEP to identify non-const indices. 3220 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) { 3221 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 3222 return TTI::TCC_Basic * 2; 3223 } 3224 } 3225 return TTI::TCC_Basic; 3226 } 3227 3228 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 3229 "Invalid Opcode"); 3230 // Type legalization can't handle structs 3231 if (TLI->getValueType(DL, Src, true) == MVT::Other) 3232 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3233 CostKind); 3234 3235 // Handle non-power-of-two vectors such as <3 x float> and <48 x i16> 3236 if (auto *VTy = dyn_cast<FixedVectorType>(Src)) { 3237 const unsigned NumElem = VTy->getNumElements(); 3238 if (!isPowerOf2_32(NumElem)) { 3239 // Factorize NumElem into sum of power-of-two. 3240 InstructionCost Cost = 0; 3241 unsigned NumElemDone = 0; 3242 for (unsigned NumElemLeft = NumElem, Factor; 3243 Factor = PowerOf2Floor(NumElemLeft), NumElemLeft > 0; 3244 NumElemLeft -= Factor) { 3245 Type *SubTy = FixedVectorType::get(VTy->getScalarType(), Factor); 3246 unsigned SubTyBytes = SubTy->getPrimitiveSizeInBits() / 8; 3247 3248 Cost += 3249 getMemoryOpCost(Opcode, SubTy, Alignment, AddressSpace, CostKind); 3250 3251 std::pair<int, MVT> LST = TLI->getTypeLegalizationCost(DL, SubTy); 3252 if (!LST.second.isVector()) { 3253 APInt DemandedElts = 3254 APInt::getBitsSet(NumElem, NumElemDone, NumElemDone + Factor); 3255 Cost += getScalarizationOverhead(VTy, DemandedElts, 3256 Opcode == Instruction::Load, 3257 Opcode == Instruction::Store); 3258 } 3259 3260 NumElemDone += Factor; 3261 Alignment = commonAlignment(Alignment.valueOrOne(), SubTyBytes); 3262 } 3263 assert(NumElemDone == NumElem && "Processed wrong element count?"); 3264 return Cost; 3265 } 3266 } 3267 3268 // Legalize the type. 3269 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 3270 3271 // Each load/store unit costs 1. 3272 int Cost = LT.first * 1; 3273 3274 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 3275 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 3276 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 3277 Cost *= 2; 3278 3279 return Cost; 3280 } 3281 3282 InstructionCost 3283 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, 3284 unsigned AddressSpace, 3285 TTI::TargetCostKind CostKind) { 3286 bool IsLoad = (Instruction::Load == Opcode); 3287 bool IsStore = (Instruction::Store == Opcode); 3288 3289 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy); 3290 if (!SrcVTy) 3291 // To calculate scalar take the regular cost, without mask 3292 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind); 3293 3294 unsigned NumElem = SrcVTy->getNumElements(); 3295 auto *MaskTy = 3296 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 3297 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) || 3298 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) || 3299 !isPowerOf2_32(NumElem)) { 3300 // Scalarization 3301 APInt DemandedElts = APInt::getAllOnesValue(NumElem); 3302 InstructionCost MaskSplitCost = 3303 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 3304 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 3305 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, 3306 CmpInst::BAD_ICMP_PREDICATE, CostKind); 3307 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 3308 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 3309 InstructionCost ValueSplitCost = 3310 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); 3311 InstructionCost MemopCost = 3312 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 3313 Alignment, AddressSpace, CostKind); 3314 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 3315 } 3316 3317 // Legalize the type. 3318 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 3319 auto VT = TLI->getValueType(DL, SrcVTy); 3320 InstructionCost Cost = 0; 3321 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 3322 LT.second.getVectorNumElements() == NumElem) 3323 // Promotion requires expand/truncate for data and a shuffle for mask. 3324 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) + 3325 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr); 3326 3327 else if (LT.second.getVectorNumElements() > NumElem) { 3328 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), 3329 LT.second.getVectorNumElements()); 3330 // Expanding requires fill mask with zeroes 3331 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy); 3332 } 3333 3334 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. 3335 if (!ST->hasAVX512()) 3336 return Cost + LT.first * (IsLoad ? 2 : 8); 3337 3338 // AVX-512 masked load/store is cheapper 3339 return Cost + LT.first; 3340 } 3341 3342 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty, 3343 ScalarEvolution *SE, 3344 const SCEV *Ptr) { 3345 // Address computations in vectorized code with non-consecutive addresses will 3346 // likely result in more instructions compared to scalar code where the 3347 // computation can more often be merged into the index mode. The resulting 3348 // extra micro-ops can significantly decrease throughput. 3349 const unsigned NumVectorInstToHideOverhead = 10; 3350 3351 // Cost modeling of Strided Access Computation is hidden by the indexing 3352 // modes of X86 regardless of the stride value. We dont believe that there 3353 // is a difference between constant strided access in gerenal and constant 3354 // strided value which is less than or equal to 64. 3355 // Even in the case of (loop invariant) stride whose value is not known at 3356 // compile time, the address computation will not incur more than one extra 3357 // ADD instruction. 3358 if (Ty->isVectorTy() && SE) { 3359 if (!BaseT::isStridedAccess(Ptr)) 3360 return NumVectorInstToHideOverhead; 3361 if (!BaseT::getConstantStrideStep(SE, Ptr)) 3362 return 1; 3363 } 3364 3365 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 3366 } 3367 3368 InstructionCost 3369 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 3370 bool IsPairwise, 3371 TTI::TargetCostKind CostKind) { 3372 // Just use the default implementation for pair reductions. 3373 if (IsPairwise) 3374 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind); 3375 3376 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 3377 // and make it as the cost. 3378 3379 static const CostTblEntry SLMCostTblNoPairWise[] = { 3380 { ISD::FADD, MVT::v2f64, 3 }, 3381 { ISD::ADD, MVT::v2i64, 5 }, 3382 }; 3383 3384 static const CostTblEntry SSE2CostTblNoPairWise[] = { 3385 { ISD::FADD, MVT::v2f64, 2 }, 3386 { ISD::FADD, MVT::v2f32, 2 }, 3387 { ISD::FADD, MVT::v4f32, 4 }, 3388 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 3389 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 3390 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 3391 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". 3392 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". 3393 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 3394 { ISD::ADD, MVT::v2i8, 2 }, 3395 { ISD::ADD, MVT::v4i8, 2 }, 3396 { ISD::ADD, MVT::v8i8, 2 }, 3397 { ISD::ADD, MVT::v16i8, 3 }, 3398 }; 3399 3400 static const CostTblEntry AVX1CostTblNoPairWise[] = { 3401 { ISD::FADD, MVT::v4f64, 3 }, 3402 { ISD::FADD, MVT::v4f32, 3 }, 3403 { ISD::FADD, MVT::v8f32, 4 }, 3404 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 3405 { ISD::ADD, MVT::v4i64, 3 }, 3406 { ISD::ADD, MVT::v8i32, 5 }, 3407 { ISD::ADD, MVT::v16i16, 5 }, 3408 { ISD::ADD, MVT::v32i8, 4 }, 3409 }; 3410 3411 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3412 assert(ISD && "Invalid opcode"); 3413 3414 // Before legalizing the type, give a chance to look up illegal narrow types 3415 // in the table. 3416 // FIXME: Is there a better way to do this? 3417 EVT VT = TLI->getValueType(DL, ValTy); 3418 if (VT.isSimple()) { 3419 MVT MTy = VT.getSimpleVT(); 3420 if (ST->isSLM()) 3421 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 3422 return Entry->Cost; 3423 3424 if (ST->hasAVX()) 3425 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3426 return Entry->Cost; 3427 3428 if (ST->hasSSE2()) 3429 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3430 return Entry->Cost; 3431 } 3432 3433 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 3434 3435 MVT MTy = LT.second; 3436 3437 auto *ValVTy = cast<FixedVectorType>(ValTy); 3438 3439 // Special case: vXi8 mul reductions are performed as vXi16. 3440 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) { 3441 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16); 3442 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements()); 3443 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy, 3444 TargetTransformInfo::CastContextHint::None, 3445 CostKind) + 3446 getArithmeticReductionCost(Opcode, WideVecTy, IsPairwise, CostKind); 3447 } 3448 3449 InstructionCost ArithmeticCost = 0; 3450 if (LT.first != 1 && MTy.isVector() && 3451 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3452 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3453 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 3454 MTy.getVectorNumElements()); 3455 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 3456 ArithmeticCost *= LT.first - 1; 3457 } 3458 3459 if (ST->isSLM()) 3460 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 3461 return ArithmeticCost + Entry->Cost; 3462 3463 if (ST->hasAVX()) 3464 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3465 return ArithmeticCost + Entry->Cost; 3466 3467 if (ST->hasSSE2()) 3468 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3469 return ArithmeticCost + Entry->Cost; 3470 3471 // FIXME: These assume a naive kshift+binop lowering, which is probably 3472 // conservative in most cases. 3473 static const CostTblEntry AVX512BoolReduction[] = { 3474 { ISD::AND, MVT::v2i1, 3 }, 3475 { ISD::AND, MVT::v4i1, 5 }, 3476 { ISD::AND, MVT::v8i1, 7 }, 3477 { ISD::AND, MVT::v16i1, 9 }, 3478 { ISD::AND, MVT::v32i1, 11 }, 3479 { ISD::AND, MVT::v64i1, 13 }, 3480 { ISD::OR, MVT::v2i1, 3 }, 3481 { ISD::OR, MVT::v4i1, 5 }, 3482 { ISD::OR, MVT::v8i1, 7 }, 3483 { ISD::OR, MVT::v16i1, 9 }, 3484 { ISD::OR, MVT::v32i1, 11 }, 3485 { ISD::OR, MVT::v64i1, 13 }, 3486 }; 3487 3488 static const CostTblEntry AVX2BoolReduction[] = { 3489 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp 3490 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp 3491 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp 3492 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp 3493 }; 3494 3495 static const CostTblEntry AVX1BoolReduction[] = { 3496 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp 3497 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp 3498 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 3499 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 3500 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp 3501 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp 3502 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 3503 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 3504 }; 3505 3506 static const CostTblEntry SSE2BoolReduction[] = { 3507 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp 3508 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp 3509 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp 3510 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp 3511 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp 3512 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp 3513 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp 3514 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp 3515 }; 3516 3517 // Handle bool allof/anyof patterns. 3518 if (ValVTy->getElementType()->isIntegerTy(1)) { 3519 InstructionCost ArithmeticCost = 0; 3520 if (LT.first != 1 && MTy.isVector() && 3521 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3522 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3523 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 3524 MTy.getVectorNumElements()); 3525 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 3526 ArithmeticCost *= LT.first - 1; 3527 } 3528 3529 if (ST->hasAVX512()) 3530 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) 3531 return ArithmeticCost + Entry->Cost; 3532 if (ST->hasAVX2()) 3533 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) 3534 return ArithmeticCost + Entry->Cost; 3535 if (ST->hasAVX()) 3536 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) 3537 return ArithmeticCost + Entry->Cost; 3538 if (ST->hasSSE2()) 3539 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) 3540 return ArithmeticCost + Entry->Cost; 3541 3542 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise, 3543 CostKind); 3544 } 3545 3546 unsigned NumVecElts = ValVTy->getNumElements(); 3547 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); 3548 3549 // Special case power of 2 reductions where the scalar type isn't changed 3550 // by type legalization. 3551 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) 3552 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise, 3553 CostKind); 3554 3555 InstructionCost ReductionCost = 0; 3556 3557 auto *Ty = ValVTy; 3558 if (LT.first != 1 && MTy.isVector() && 3559 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3560 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3561 Ty = FixedVectorType::get(ValVTy->getElementType(), 3562 MTy.getVectorNumElements()); 3563 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 3564 ReductionCost *= LT.first - 1; 3565 NumVecElts = MTy.getVectorNumElements(); 3566 } 3567 3568 // Now handle reduction with the legal type, taking into account size changes 3569 // at each level. 3570 while (NumVecElts > 1) { 3571 // Determine the size of the remaining vector we need to reduce. 3572 unsigned Size = NumVecElts * ScalarSize; 3573 NumVecElts /= 2; 3574 // If we're reducing from 256/512 bits, use an extract_subvector. 3575 if (Size > 128) { 3576 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 3577 ReductionCost += 3578 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 3579 Ty = SubTy; 3580 } else if (Size == 128) { 3581 // Reducing from 128 bits is a permute of v2f64/v2i64. 3582 FixedVectorType *ShufTy; 3583 if (ValVTy->isFloatingPointTy()) 3584 ShufTy = 3585 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); 3586 else 3587 ShufTy = 3588 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); 3589 ReductionCost += 3590 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 3591 } else if (Size == 64) { 3592 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 3593 FixedVectorType *ShufTy; 3594 if (ValVTy->isFloatingPointTy()) 3595 ShufTy = 3596 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); 3597 else 3598 ShufTy = 3599 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); 3600 ReductionCost += 3601 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 3602 } else { 3603 // Reducing from smaller size is a shift by immediate. 3604 auto *ShiftTy = FixedVectorType::get( 3605 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); 3606 ReductionCost += getArithmeticInstrCost( 3607 Instruction::LShr, ShiftTy, CostKind, 3608 TargetTransformInfo::OK_AnyValue, 3609 TargetTransformInfo::OK_UniformConstantValue, 3610 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 3611 } 3612 3613 // Add the arithmetic op for this level. 3614 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); 3615 } 3616 3617 // Add the final extract element to the cost. 3618 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 3619 } 3620 3621 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, 3622 bool IsUnsigned) { 3623 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3624 3625 MVT MTy = LT.second; 3626 3627 int ISD; 3628 if (Ty->isIntOrIntVectorTy()) { 3629 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 3630 } else { 3631 assert(Ty->isFPOrFPVectorTy() && 3632 "Expected float point or integer vector type."); 3633 ISD = ISD::FMINNUM; 3634 } 3635 3636 static const CostTblEntry SSE1CostTbl[] = { 3637 {ISD::FMINNUM, MVT::v4f32, 1}, 3638 }; 3639 3640 static const CostTblEntry SSE2CostTbl[] = { 3641 {ISD::FMINNUM, MVT::v2f64, 1}, 3642 {ISD::SMIN, MVT::v8i16, 1}, 3643 {ISD::UMIN, MVT::v16i8, 1}, 3644 }; 3645 3646 static const CostTblEntry SSE41CostTbl[] = { 3647 {ISD::SMIN, MVT::v4i32, 1}, 3648 {ISD::UMIN, MVT::v4i32, 1}, 3649 {ISD::UMIN, MVT::v8i16, 1}, 3650 {ISD::SMIN, MVT::v16i8, 1}, 3651 }; 3652 3653 static const CostTblEntry SSE42CostTbl[] = { 3654 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd 3655 }; 3656 3657 static const CostTblEntry AVX1CostTbl[] = { 3658 {ISD::FMINNUM, MVT::v8f32, 1}, 3659 {ISD::FMINNUM, MVT::v4f64, 1}, 3660 {ISD::SMIN, MVT::v8i32, 3}, 3661 {ISD::UMIN, MVT::v8i32, 3}, 3662 {ISD::SMIN, MVT::v16i16, 3}, 3663 {ISD::UMIN, MVT::v16i16, 3}, 3664 {ISD::SMIN, MVT::v32i8, 3}, 3665 {ISD::UMIN, MVT::v32i8, 3}, 3666 }; 3667 3668 static const CostTblEntry AVX2CostTbl[] = { 3669 {ISD::SMIN, MVT::v8i32, 1}, 3670 {ISD::UMIN, MVT::v8i32, 1}, 3671 {ISD::SMIN, MVT::v16i16, 1}, 3672 {ISD::UMIN, MVT::v16i16, 1}, 3673 {ISD::SMIN, MVT::v32i8, 1}, 3674 {ISD::UMIN, MVT::v32i8, 1}, 3675 }; 3676 3677 static const CostTblEntry AVX512CostTbl[] = { 3678 {ISD::FMINNUM, MVT::v16f32, 1}, 3679 {ISD::FMINNUM, MVT::v8f64, 1}, 3680 {ISD::SMIN, MVT::v2i64, 1}, 3681 {ISD::UMIN, MVT::v2i64, 1}, 3682 {ISD::SMIN, MVT::v4i64, 1}, 3683 {ISD::UMIN, MVT::v4i64, 1}, 3684 {ISD::SMIN, MVT::v8i64, 1}, 3685 {ISD::UMIN, MVT::v8i64, 1}, 3686 {ISD::SMIN, MVT::v16i32, 1}, 3687 {ISD::UMIN, MVT::v16i32, 1}, 3688 }; 3689 3690 static const CostTblEntry AVX512BWCostTbl[] = { 3691 {ISD::SMIN, MVT::v32i16, 1}, 3692 {ISD::UMIN, MVT::v32i16, 1}, 3693 {ISD::SMIN, MVT::v64i8, 1}, 3694 {ISD::UMIN, MVT::v64i8, 1}, 3695 }; 3696 3697 // If we have a native MIN/MAX instruction for this type, use it. 3698 if (ST->hasBWI()) 3699 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3700 return LT.first * Entry->Cost; 3701 3702 if (ST->hasAVX512()) 3703 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3704 return LT.first * Entry->Cost; 3705 3706 if (ST->hasAVX2()) 3707 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 3708 return LT.first * Entry->Cost; 3709 3710 if (ST->hasAVX()) 3711 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 3712 return LT.first * Entry->Cost; 3713 3714 if (ST->hasSSE42()) 3715 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 3716 return LT.first * Entry->Cost; 3717 3718 if (ST->hasSSE41()) 3719 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 3720 return LT.first * Entry->Cost; 3721 3722 if (ST->hasSSE2()) 3723 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 3724 return LT.first * Entry->Cost; 3725 3726 if (ST->hasSSE1()) 3727 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 3728 return LT.first * Entry->Cost; 3729 3730 unsigned CmpOpcode; 3731 if (Ty->isFPOrFPVectorTy()) { 3732 CmpOpcode = Instruction::FCmp; 3733 } else { 3734 assert(Ty->isIntOrIntVectorTy() && 3735 "expecting floating point or integer type for min/max reduction"); 3736 CmpOpcode = Instruction::ICmp; 3737 } 3738 3739 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3740 // Otherwise fall back to cmp+select. 3741 InstructionCost Result = 3742 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE, 3743 CostKind) + 3744 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, 3745 CmpInst::BAD_ICMP_PREDICATE, CostKind); 3746 return Result; 3747 } 3748 3749 InstructionCost 3750 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, 3751 bool IsPairwise, bool IsUnsigned, 3752 TTI::TargetCostKind CostKind) { 3753 // Just use the default implementation for pair reductions. 3754 if (IsPairwise) 3755 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned, 3756 CostKind); 3757 3758 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 3759 3760 MVT MTy = LT.second; 3761 3762 int ISD; 3763 if (ValTy->isIntOrIntVectorTy()) { 3764 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 3765 } else { 3766 assert(ValTy->isFPOrFPVectorTy() && 3767 "Expected float point or integer vector type."); 3768 ISD = ISD::FMINNUM; 3769 } 3770 3771 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 3772 // and make it as the cost. 3773 3774 static const CostTblEntry SSE2CostTblNoPairWise[] = { 3775 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw 3776 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw 3777 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw 3778 }; 3779 3780 static const CostTblEntry SSE41CostTblNoPairWise[] = { 3781 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 3782 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 3783 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 3784 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 3785 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor 3786 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax 3787 {ISD::SMIN, MVT::v2i8, 3}, // pminsb 3788 {ISD::SMIN, MVT::v4i8, 5}, // pminsb 3789 {ISD::SMIN, MVT::v8i8, 7}, // pminsb 3790 {ISD::SMIN, MVT::v16i8, 6}, 3791 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 3792 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 3793 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 3794 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax 3795 }; 3796 3797 static const CostTblEntry AVX1CostTblNoPairWise[] = { 3798 {ISD::SMIN, MVT::v16i16, 6}, 3799 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax 3800 {ISD::SMIN, MVT::v32i8, 8}, 3801 {ISD::UMIN, MVT::v32i8, 8}, 3802 }; 3803 3804 static const CostTblEntry AVX512BWCostTblNoPairWise[] = { 3805 {ISD::SMIN, MVT::v32i16, 8}, 3806 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax 3807 {ISD::SMIN, MVT::v64i8, 10}, 3808 {ISD::UMIN, MVT::v64i8, 10}, 3809 }; 3810 3811 // Before legalizing the type, give a chance to look up illegal narrow types 3812 // in the table. 3813 // FIXME: Is there a better way to do this? 3814 EVT VT = TLI->getValueType(DL, ValTy); 3815 if (VT.isSimple()) { 3816 MVT MTy = VT.getSimpleVT(); 3817 if (ST->hasBWI()) 3818 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 3819 return Entry->Cost; 3820 3821 if (ST->hasAVX()) 3822 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3823 return Entry->Cost; 3824 3825 if (ST->hasSSE41()) 3826 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 3827 return Entry->Cost; 3828 3829 if (ST->hasSSE2()) 3830 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3831 return Entry->Cost; 3832 } 3833 3834 auto *ValVTy = cast<FixedVectorType>(ValTy); 3835 unsigned NumVecElts = ValVTy->getNumElements(); 3836 3837 auto *Ty = ValVTy; 3838 InstructionCost MinMaxCost = 0; 3839 if (LT.first != 1 && MTy.isVector() && 3840 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3841 // Type needs to be split. We need LT.first - 1 operations ops. 3842 Ty = FixedVectorType::get(ValVTy->getElementType(), 3843 MTy.getVectorNumElements()); 3844 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(), 3845 MTy.getVectorNumElements()); 3846 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); 3847 MinMaxCost *= LT.first - 1; 3848 NumVecElts = MTy.getVectorNumElements(); 3849 } 3850 3851 if (ST->hasBWI()) 3852 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 3853 return MinMaxCost + Entry->Cost; 3854 3855 if (ST->hasAVX()) 3856 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3857 return MinMaxCost + Entry->Cost; 3858 3859 if (ST->hasSSE41()) 3860 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 3861 return MinMaxCost + Entry->Cost; 3862 3863 if (ST->hasSSE2()) 3864 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3865 return MinMaxCost + Entry->Cost; 3866 3867 unsigned ScalarSize = ValTy->getScalarSizeInBits(); 3868 3869 // Special case power of 2 reductions where the scalar type isn't changed 3870 // by type legalization. 3871 if (!isPowerOf2_32(ValVTy->getNumElements()) || 3872 ScalarSize != MTy.getScalarSizeInBits()) 3873 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned, 3874 CostKind); 3875 3876 // Now handle reduction with the legal type, taking into account size changes 3877 // at each level. 3878 while (NumVecElts > 1) { 3879 // Determine the size of the remaining vector we need to reduce. 3880 unsigned Size = NumVecElts * ScalarSize; 3881 NumVecElts /= 2; 3882 // If we're reducing from 256/512 bits, use an extract_subvector. 3883 if (Size > 128) { 3884 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); 3885 MinMaxCost += 3886 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); 3887 Ty = SubTy; 3888 } else if (Size == 128) { 3889 // Reducing from 128 bits is a permute of v2f64/v2i64. 3890 VectorType *ShufTy; 3891 if (ValTy->isFloatingPointTy()) 3892 ShufTy = 3893 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); 3894 else 3895 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); 3896 MinMaxCost += 3897 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 3898 } else if (Size == 64) { 3899 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 3900 FixedVectorType *ShufTy; 3901 if (ValTy->isFloatingPointTy()) 3902 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); 3903 else 3904 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); 3905 MinMaxCost += 3906 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); 3907 } else { 3908 // Reducing from smaller size is a shift by immediate. 3909 auto *ShiftTy = FixedVectorType::get( 3910 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); 3911 MinMaxCost += getArithmeticInstrCost( 3912 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, 3913 TargetTransformInfo::OK_AnyValue, 3914 TargetTransformInfo::OK_UniformConstantValue, 3915 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 3916 } 3917 3918 // Add the arithmetic op for this level. 3919 auto *SubCondTy = 3920 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); 3921 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); 3922 } 3923 3924 // Add the final extract element to the cost. 3925 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 3926 } 3927 3928 /// Calculate the cost of materializing a 64-bit value. This helper 3929 /// method might only calculate a fraction of a larger immediate. Therefore it 3930 /// is valid to return a cost of ZERO. 3931 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) { 3932 if (Val == 0) 3933 return TTI::TCC_Free; 3934 3935 if (isInt<32>(Val)) 3936 return TTI::TCC_Basic; 3937 3938 return 2 * TTI::TCC_Basic; 3939 } 3940 3941 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 3942 TTI::TargetCostKind CostKind) { 3943 assert(Ty->isIntegerTy()); 3944 3945 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 3946 if (BitSize == 0) 3947 return ~0U; 3948 3949 // Never hoist constants larger than 128bit, because this might lead to 3950 // incorrect code generation or assertions in codegen. 3951 // Fixme: Create a cost model for types larger than i128 once the codegen 3952 // issues have been fixed. 3953 if (BitSize > 128) 3954 return TTI::TCC_Free; 3955 3956 if (Imm == 0) 3957 return TTI::TCC_Free; 3958 3959 // Sign-extend all constants to a multiple of 64-bit. 3960 APInt ImmVal = Imm; 3961 if (BitSize % 64 != 0) 3962 ImmVal = Imm.sext(alignTo(BitSize, 64)); 3963 3964 // Split the constant into 64-bit chunks and calculate the cost for each 3965 // chunk. 3966 InstructionCost Cost = 0; 3967 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 3968 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 3969 int64_t Val = Tmp.getSExtValue(); 3970 Cost += getIntImmCost(Val); 3971 } 3972 // We need at least one instruction to materialize the constant. 3973 return std::max<InstructionCost>(1, Cost); 3974 } 3975 3976 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, 3977 const APInt &Imm, Type *Ty, 3978 TTI::TargetCostKind CostKind, 3979 Instruction *Inst) { 3980 assert(Ty->isIntegerTy()); 3981 3982 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 3983 // There is no cost model for constants with a bit size of 0. Return TCC_Free 3984 // here, so that constant hoisting will ignore this constant. 3985 if (BitSize == 0) 3986 return TTI::TCC_Free; 3987 3988 unsigned ImmIdx = ~0U; 3989 switch (Opcode) { 3990 default: 3991 return TTI::TCC_Free; 3992 case Instruction::GetElementPtr: 3993 // Always hoist the base address of a GetElementPtr. This prevents the 3994 // creation of new constants for every base constant that gets constant 3995 // folded with the offset. 3996 if (Idx == 0) 3997 return 2 * TTI::TCC_Basic; 3998 return TTI::TCC_Free; 3999 case Instruction::Store: 4000 ImmIdx = 0; 4001 break; 4002 case Instruction::ICmp: 4003 // This is an imperfect hack to prevent constant hoisting of 4004 // compares that might be trying to check if a 64-bit value fits in 4005 // 32-bits. The backend can optimize these cases using a right shift by 32. 4006 // Ideally we would check the compare predicate here. There also other 4007 // similar immediates the backend can use shifts for. 4008 if (Idx == 1 && Imm.getBitWidth() == 64) { 4009 uint64_t ImmVal = Imm.getZExtValue(); 4010 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 4011 return TTI::TCC_Free; 4012 } 4013 ImmIdx = 1; 4014 break; 4015 case Instruction::And: 4016 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 4017 // by using a 32-bit operation with implicit zero extension. Detect such 4018 // immediates here as the normal path expects bit 31 to be sign extended. 4019 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 4020 return TTI::TCC_Free; 4021 ImmIdx = 1; 4022 break; 4023 case Instruction::Add: 4024 case Instruction::Sub: 4025 // For add/sub, we can use the opposite instruction for INT32_MIN. 4026 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 4027 return TTI::TCC_Free; 4028 ImmIdx = 1; 4029 break; 4030 case Instruction::UDiv: 4031 case Instruction::SDiv: 4032 case Instruction::URem: 4033 case Instruction::SRem: 4034 // Division by constant is typically expanded later into a different 4035 // instruction sequence. This completely changes the constants. 4036 // Report them as "free" to stop ConstantHoist from marking them as opaque. 4037 return TTI::TCC_Free; 4038 case Instruction::Mul: 4039 case Instruction::Or: 4040 case Instruction::Xor: 4041 ImmIdx = 1; 4042 break; 4043 // Always return TCC_Free for the shift value of a shift instruction. 4044 case Instruction::Shl: 4045 case Instruction::LShr: 4046 case Instruction::AShr: 4047 if (Idx == 1) 4048 return TTI::TCC_Free; 4049 break; 4050 case Instruction::Trunc: 4051 case Instruction::ZExt: 4052 case Instruction::SExt: 4053 case Instruction::IntToPtr: 4054 case Instruction::PtrToInt: 4055 case Instruction::BitCast: 4056 case Instruction::PHI: 4057 case Instruction::Call: 4058 case Instruction::Select: 4059 case Instruction::Ret: 4060 case Instruction::Load: 4061 break; 4062 } 4063 4064 if (Idx == ImmIdx) { 4065 int NumConstants = divideCeil(BitSize, 64); 4066 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4067 return (Cost <= NumConstants * TTI::TCC_Basic) 4068 ? static_cast<int>(TTI::TCC_Free) 4069 : Cost; 4070 } 4071 4072 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4073 } 4074 4075 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 4076 const APInt &Imm, Type *Ty, 4077 TTI::TargetCostKind CostKind) { 4078 assert(Ty->isIntegerTy()); 4079 4080 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 4081 // There is no cost model for constants with a bit size of 0. Return TCC_Free 4082 // here, so that constant hoisting will ignore this constant. 4083 if (BitSize == 0) 4084 return TTI::TCC_Free; 4085 4086 switch (IID) { 4087 default: 4088 return TTI::TCC_Free; 4089 case Intrinsic::sadd_with_overflow: 4090 case Intrinsic::uadd_with_overflow: 4091 case Intrinsic::ssub_with_overflow: 4092 case Intrinsic::usub_with_overflow: 4093 case Intrinsic::smul_with_overflow: 4094 case Intrinsic::umul_with_overflow: 4095 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 4096 return TTI::TCC_Free; 4097 break; 4098 case Intrinsic::experimental_stackmap: 4099 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4100 return TTI::TCC_Free; 4101 break; 4102 case Intrinsic::experimental_patchpoint_void: 4103 case Intrinsic::experimental_patchpoint_i64: 4104 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 4105 return TTI::TCC_Free; 4106 break; 4107 } 4108 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 4109 } 4110 4111 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode, 4112 TTI::TargetCostKind CostKind, 4113 const Instruction *I) { 4114 if (CostKind != TTI::TCK_RecipThroughput) 4115 return Opcode == Instruction::PHI ? 0 : 1; 4116 // Branches are assumed to be predicted. 4117 return 0; 4118 } 4119 4120 int X86TTIImpl::getGatherOverhead() const { 4121 // Some CPUs have more overhead for gather. The specified overhead is relative 4122 // to the Load operation. "2" is the number provided by Intel architects. This 4123 // parameter is used for cost estimation of Gather Op and comparison with 4124 // other alternatives. 4125 // TODO: Remove the explicit hasAVX512()?, That would mean we would only 4126 // enable gather with a -march. 4127 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather())) 4128 return 2; 4129 4130 return 1024; 4131 } 4132 4133 int X86TTIImpl::getScatterOverhead() const { 4134 if (ST->hasAVX512()) 4135 return 2; 4136 4137 return 1024; 4138 } 4139 4140 // Return an average cost of Gather / Scatter instruction, maybe improved later. 4141 // FIXME: Add TargetCostKind support. 4142 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, 4143 const Value *Ptr, Align Alignment, 4144 unsigned AddressSpace) { 4145 4146 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 4147 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4148 4149 // Try to reduce index size from 64 bit (default for GEP) 4150 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 4151 // operation will use 16 x 64 indices which do not fit in a zmm and needs 4152 // to split. Also check that the base pointer is the same for all lanes, 4153 // and that there's at most one variable index. 4154 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { 4155 unsigned IndexSize = DL.getPointerSizeInBits(); 4156 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 4157 if (IndexSize < 64 || !GEP) 4158 return IndexSize; 4159 4160 unsigned NumOfVarIndices = 0; 4161 const Value *Ptrs = GEP->getPointerOperand(); 4162 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 4163 return IndexSize; 4164 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 4165 if (isa<Constant>(GEP->getOperand(i))) 4166 continue; 4167 Type *IndxTy = GEP->getOperand(i)->getType(); 4168 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) 4169 IndxTy = IndexVTy->getElementType(); 4170 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 4171 !isa<SExtInst>(GEP->getOperand(i))) || 4172 ++NumOfVarIndices > 1) 4173 return IndexSize; // 64 4174 } 4175 return (unsigned)32; 4176 }; 4177 4178 // Trying to reduce IndexSize to 32 bits for vector 16. 4179 // By default the IndexSize is equal to pointer size. 4180 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 4181 ? getIndexSizeInBits(Ptr, DL) 4182 : DL.getPointerSizeInBits(); 4183 4184 auto *IndexVTy = FixedVectorType::get( 4185 IntegerType::get(SrcVTy->getContext(), IndexSize), VF); 4186 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 4187 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 4188 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 4189 if (SplitFactor > 1) { 4190 // Handle splitting of vector of pointers 4191 auto *SplitSrcTy = 4192 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 4193 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 4194 AddressSpace); 4195 } 4196 4197 // The gather / scatter cost is given by Intel architects. It is a rough 4198 // number since we are looking at one instruction in a time. 4199 const int GSOverhead = (Opcode == Instruction::Load) 4200 ? getGatherOverhead() 4201 : getScatterOverhead(); 4202 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4203 MaybeAlign(Alignment), AddressSpace, 4204 TTI::TCK_RecipThroughput); 4205 } 4206 4207 /// Return the cost of full scalarization of gather / scatter operation. 4208 /// 4209 /// Opcode - Load or Store instruction. 4210 /// SrcVTy - The type of the data vector that should be gathered or scattered. 4211 /// VariableMask - The mask is non-constant at compile time. 4212 /// Alignment - Alignment for one element. 4213 /// AddressSpace - pointer[s] address space. 4214 /// 4215 /// FIXME: Add TargetCostKind support. 4216 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 4217 bool VariableMask, Align Alignment, 4218 unsigned AddressSpace) { 4219 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4220 APInt DemandedElts = APInt::getAllOnesValue(VF); 4221 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 4222 4223 InstructionCost MaskUnpackCost = 0; 4224 if (VariableMask) { 4225 auto *MaskTy = 4226 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 4227 MaskUnpackCost = 4228 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 4229 InstructionCost ScalarCompareCost = getCmpSelInstrCost( 4230 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, 4231 CmpInst::BAD_ICMP_PREDICATE, CostKind); 4232 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); 4233 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 4234 } 4235 4236 // The cost of the scalar loads/stores. 4237 InstructionCost MemoryOpCost = 4238 VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 4239 MaybeAlign(Alignment), AddressSpace, CostKind); 4240 4241 InstructionCost InsertExtractCost = 0; 4242 if (Opcode == Instruction::Load) 4243 for (unsigned i = 0; i < VF; ++i) 4244 // Add the cost of inserting each scalar load into the vector 4245 InsertExtractCost += 4246 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 4247 else 4248 for (unsigned i = 0; i < VF; ++i) 4249 // Add the cost of extracting each element out of the data vector 4250 InsertExtractCost += 4251 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 4252 4253 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 4254 } 4255 4256 /// Calculate the cost of Gather / Scatter operation 4257 InstructionCost X86TTIImpl::getGatherScatterOpCost( 4258 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, 4259 Align Alignment, TTI::TargetCostKind CostKind, 4260 const Instruction *I = nullptr) { 4261 if (CostKind != TTI::TCK_RecipThroughput) { 4262 if ((Opcode == Instruction::Load && 4263 isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4264 (Opcode == Instruction::Store && 4265 isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4266 return 1; 4267 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask, 4268 Alignment, CostKind, I); 4269 } 4270 4271 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 4272 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); 4273 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 4274 if (!PtrTy && Ptr->getType()->isVectorTy()) 4275 PtrTy = dyn_cast<PointerType>( 4276 cast<VectorType>(Ptr->getType())->getElementType()); 4277 assert(PtrTy && "Unexpected type for Ptr argument"); 4278 unsigned AddressSpace = PtrTy->getAddressSpace(); 4279 4280 bool Scalarize = false; 4281 if ((Opcode == Instruction::Load && 4282 !isLegalMaskedGather(SrcVTy, Align(Alignment))) || 4283 (Opcode == Instruction::Store && 4284 !isLegalMaskedScatter(SrcVTy, Align(Alignment)))) 4285 Scalarize = true; 4286 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 4287 // Vector-4 of gather/scatter instruction does not exist on KNL. 4288 // We can extend it to 8 elements, but zeroing upper bits of 4289 // the mask vector will add more instructions. Right now we give the scalar 4290 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 4291 // is better in the VariableMask case. 4292 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) 4293 Scalarize = true; 4294 4295 if (Scalarize) 4296 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 4297 AddressSpace); 4298 4299 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 4300 } 4301 4302 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 4303 TargetTransformInfo::LSRCost &C2) { 4304 // X86 specific here are "instruction number 1st priority". 4305 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 4306 C1.NumIVMuls, C1.NumBaseAdds, 4307 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 4308 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 4309 C2.NumIVMuls, C2.NumBaseAdds, 4310 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 4311 } 4312 4313 bool X86TTIImpl::canMacroFuseCmp() { 4314 return ST->hasMacroFusion() || ST->hasBranchFusion(); 4315 } 4316 4317 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { 4318 if (!ST->hasAVX()) 4319 return false; 4320 4321 // The backend can't handle a single element vector. 4322 if (isa<VectorType>(DataTy) && 4323 cast<FixedVectorType>(DataTy)->getNumElements() == 1) 4324 return false; 4325 Type *ScalarTy = DataTy->getScalarType(); 4326 4327 if (ScalarTy->isPointerTy()) 4328 return true; 4329 4330 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4331 return true; 4332 4333 if (!ScalarTy->isIntegerTy()) 4334 return false; 4335 4336 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4337 return IntWidth == 32 || IntWidth == 64 || 4338 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); 4339 } 4340 4341 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { 4342 return isLegalMaskedLoad(DataType, Alignment); 4343 } 4344 4345 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { 4346 unsigned DataSize = DL.getTypeStoreSize(DataType); 4347 // The only supported nontemporal loads are for aligned vectors of 16 or 32 4348 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 4349 // (the equivalent stores only require AVX). 4350 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) 4351 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); 4352 4353 return false; 4354 } 4355 4356 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { 4357 unsigned DataSize = DL.getTypeStoreSize(DataType); 4358 4359 // SSE4A supports nontemporal stores of float and double at arbitrary 4360 // alignment. 4361 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) 4362 return true; 4363 4364 // Besides the SSE4A subtarget exception above, only aligned stores are 4365 // available nontemporaly on any other subtarget. And only stores with a size 4366 // of 4..32 bytes (powers of 2, only) are permitted. 4367 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || 4368 !isPowerOf2_32(DataSize)) 4369 return false; 4370 4371 // 32-byte vector nontemporal stores are supported by AVX (the equivalent 4372 // loads require AVX2). 4373 if (DataSize == 32) 4374 return ST->hasAVX(); 4375 else if (DataSize == 16) 4376 return ST->hasSSE1(); 4377 return true; 4378 } 4379 4380 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { 4381 if (!isa<VectorType>(DataTy)) 4382 return false; 4383 4384 if (!ST->hasAVX512()) 4385 return false; 4386 4387 // The backend can't handle a single element vector. 4388 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1) 4389 return false; 4390 4391 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); 4392 4393 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4394 return true; 4395 4396 if (!ScalarTy->isIntegerTy()) 4397 return false; 4398 4399 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4400 return IntWidth == 32 || IntWidth == 64 || 4401 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); 4402 } 4403 4404 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { 4405 return isLegalMaskedExpandLoad(DataTy); 4406 } 4407 4408 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { 4409 // Some CPUs have better gather performance than others. 4410 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 4411 // enable gather with a -march. 4412 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()))) 4413 return false; 4414 4415 // This function is called now in two cases: from the Loop Vectorizer 4416 // and from the Scalarizer. 4417 // When the Loop Vectorizer asks about legality of the feature, 4418 // the vectorization factor is not calculated yet. The Loop Vectorizer 4419 // sends a scalar type and the decision is based on the width of the 4420 // scalar element. 4421 // Later on, the cost model will estimate usage this intrinsic based on 4422 // the vector type. 4423 // The Scalarizer asks again about legality. It sends a vector type. 4424 // In this case we can reject non-power-of-2 vectors. 4425 // We also reject single element vectors as the type legalizer can't 4426 // scalarize it. 4427 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) { 4428 unsigned NumElts = DataVTy->getNumElements(); 4429 if (NumElts == 1) 4430 return false; 4431 } 4432 Type *ScalarTy = DataTy->getScalarType(); 4433 if (ScalarTy->isPointerTy()) 4434 return true; 4435 4436 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4437 return true; 4438 4439 if (!ScalarTy->isIntegerTy()) 4440 return false; 4441 4442 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4443 return IntWidth == 32 || IntWidth == 64; 4444 } 4445 4446 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { 4447 // AVX2 doesn't support scatter 4448 if (!ST->hasAVX512()) 4449 return false; 4450 return isLegalMaskedGather(DataType, Alignment); 4451 } 4452 4453 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 4454 EVT VT = TLI->getValueType(DL, DataType); 4455 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 4456 } 4457 4458 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 4459 return false; 4460 } 4461 4462 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 4463 const Function *Callee) const { 4464 const TargetMachine &TM = getTLI()->getTargetMachine(); 4465 4466 // Work this as a subsetting of subtarget features. 4467 const FeatureBitset &CallerBits = 4468 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 4469 const FeatureBitset &CalleeBits = 4470 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 4471 4472 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 4473 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 4474 return (RealCallerBits & RealCalleeBits) == RealCalleeBits; 4475 } 4476 4477 bool X86TTIImpl::areFunctionArgsABICompatible( 4478 const Function *Caller, const Function *Callee, 4479 SmallPtrSetImpl<Argument *> &Args) const { 4480 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) 4481 return false; 4482 4483 // If we get here, we know the target features match. If one function 4484 // considers 512-bit vectors legal and the other does not, consider them 4485 // incompatible. 4486 const TargetMachine &TM = getTLI()->getTargetMachine(); 4487 4488 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == 4489 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) 4490 return true; 4491 4492 // Consider the arguments compatible if they aren't vectors or aggregates. 4493 // FIXME: Look at the size of vectors. 4494 // FIXME: Look at the element types of aggregates to see if there are vectors. 4495 // FIXME: The API of this function seems intended to allow arguments 4496 // to be removed from the set, but the caller doesn't check if the set 4497 // becomes empty so that may not work in practice. 4498 return llvm::none_of(Args, [](Argument *A) { 4499 auto *EltTy = cast<PointerType>(A->getType())->getElementType(); 4500 return EltTy->isVectorTy() || EltTy->isAggregateType(); 4501 }); 4502 } 4503 4504 X86TTIImpl::TTI::MemCmpExpansionOptions 4505 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 4506 TTI::MemCmpExpansionOptions Options; 4507 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 4508 Options.NumLoadsPerBlock = 2; 4509 // All GPR and vector loads can be unaligned. 4510 Options.AllowOverlappingLoads = true; 4511 if (IsZeroCmp) { 4512 // Only enable vector loads for equality comparison. Right now the vector 4513 // version is not as fast for three way compare (see #33329). 4514 const unsigned PreferredWidth = ST->getPreferVectorWidth(); 4515 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); 4516 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); 4517 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); 4518 } 4519 if (ST->is64Bit()) { 4520 Options.LoadSizes.push_back(8); 4521 } 4522 Options.LoadSizes.push_back(4); 4523 Options.LoadSizes.push_back(2); 4524 Options.LoadSizes.push_back(1); 4525 return Options; 4526 } 4527 4528 bool X86TTIImpl::enableInterleavedAccessVectorization() { 4529 // TODO: We expect this to be beneficial regardless of arch, 4530 // but there are currently some unexplained performance artifacts on Atom. 4531 // As a temporary solution, disable on Atom. 4532 return !(ST->isAtom()); 4533 } 4534 4535 // Get estimation for interleaved load/store operations for AVX2. 4536 // \p Factor is the interleaved-access factor (stride) - number of 4537 // (interleaved) elements in the group. 4538 // \p Indices contains the indices for a strided load: when the 4539 // interleaved load has gaps they indicate which elements are used. 4540 // If Indices is empty (or if the number of indices is equal to the size 4541 // of the interleaved-access as given in \p Factor) the access has no gaps. 4542 // 4543 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 4544 // computing the cost using a generic formula as a function of generic 4545 // shuffles. We therefore use a lookup table instead, filled according to 4546 // the instruction sequences that codegen currently generates. 4547 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2( 4548 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 4549 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 4550 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 4551 4552 if (UseMaskForCond || UseMaskForGaps) 4553 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4554 Alignment, AddressSpace, CostKind, 4555 UseMaskForCond, UseMaskForGaps); 4556 4557 // We currently Support only fully-interleaved groups, with no gaps. 4558 // TODO: Support also strided loads (interleaved-groups with gaps). 4559 if (Indices.size() && Indices.size() != Factor) 4560 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4561 Alignment, AddressSpace, 4562 CostKind); 4563 4564 // VecTy for interleave memop is <VF*Factor x Elt>. 4565 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 4566 // VecTy = <12 x i32>. 4567 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 4568 4569 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 4570 // the VF=2, while v2i128 is an unsupported MVT vector type 4571 // (see MachineValueType.h::getVectorVT()). 4572 if (!LegalVT.isVector()) 4573 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4574 Alignment, AddressSpace, 4575 CostKind); 4576 4577 unsigned VF = VecTy->getNumElements() / Factor; 4578 Type *ScalarTy = VecTy->getElementType(); 4579 4580 // Calculate the number of memory operations (NumOfMemOps), required 4581 // for load/store the VecTy. 4582 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 4583 unsigned LegalVTSize = LegalVT.getStoreSize(); 4584 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 4585 4586 // Get the cost of one memory operation. 4587 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), 4588 LegalVT.getVectorNumElements()); 4589 InstructionCost MemOpCost = getMemoryOpCost( 4590 Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind); 4591 4592 auto *VT = FixedVectorType::get(ScalarTy, VF); 4593 EVT ETy = TLI->getValueType(DL, VT); 4594 if (!ETy.isSimple()) 4595 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4596 Alignment, AddressSpace, 4597 CostKind); 4598 4599 // TODO: Complete for other data-types and strides. 4600 // Each combination of Stride, ElementTy and VF results in a different 4601 // sequence; The cost tables are therefore accessed with: 4602 // Factor (stride) and VectorType=VFxElemType. 4603 // The Cost accounts only for the shuffle sequence; 4604 // The cost of the loads/stores is accounted for separately. 4605 // 4606 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 4607 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 4608 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 4609 4610 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 4611 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 4612 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 4613 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 4614 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 4615 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 4616 4617 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 4618 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 4619 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 4620 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 4621 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 4622 4623 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 4624 }; 4625 4626 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 4627 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) 4628 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) 4629 4630 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) 4631 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) 4632 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) 4633 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) 4634 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) 4635 4636 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) 4637 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) 4638 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) 4639 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) 4640 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) 4641 }; 4642 4643 if (Opcode == Instruction::Load) { 4644 if (const auto *Entry = 4645 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 4646 return NumOfMemOps * MemOpCost + Entry->Cost; 4647 } else { 4648 assert(Opcode == Instruction::Store && 4649 "Expected Store Instruction at this point"); 4650 if (const auto *Entry = 4651 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 4652 return NumOfMemOps * MemOpCost + Entry->Cost; 4653 } 4654 4655 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4656 Alignment, AddressSpace, CostKind); 4657 } 4658 4659 // Get estimation for interleaved load/store operations and strided load. 4660 // \p Indices contains indices for strided load. 4661 // \p Factor - the factor of interleaving. 4662 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 4663 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( 4664 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, 4665 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, 4666 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { 4667 4668 if (UseMaskForCond || UseMaskForGaps) 4669 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4670 Alignment, AddressSpace, CostKind, 4671 UseMaskForCond, UseMaskForGaps); 4672 4673 // VecTy for interleave memop is <VF*Factor x Elt>. 4674 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 4675 // VecTy = <12 x i32>. 4676 4677 // Calculate the number of memory operations (NumOfMemOps), required 4678 // for load/store the VecTy. 4679 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 4680 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 4681 unsigned LegalVTSize = LegalVT.getStoreSize(); 4682 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 4683 4684 // Get the cost of one memory operation. 4685 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), 4686 LegalVT.getVectorNumElements()); 4687 InstructionCost MemOpCost = getMemoryOpCost( 4688 Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind); 4689 4690 unsigned VF = VecTy->getNumElements() / Factor; 4691 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 4692 4693 if (Opcode == Instruction::Load) { 4694 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 4695 // contain the cost of the optimized shuffle sequence that the 4696 // X86InterleavedAccess pass will generate. 4697 // The cost of loads and stores are computed separately from the table. 4698 4699 // X86InterleavedAccess support only the following interleaved-access group. 4700 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 4701 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 4702 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 4703 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 4704 }; 4705 4706 if (const auto *Entry = 4707 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 4708 return NumOfMemOps * MemOpCost + Entry->Cost; 4709 //If an entry does not exist, fallback to the default implementation. 4710 4711 // Kind of shuffle depends on number of loaded values. 4712 // If we load the entire data in one register, we can use a 1-src shuffle. 4713 // Otherwise, we'll merge 2 sources in each operation. 4714 TTI::ShuffleKind ShuffleKind = 4715 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 4716 4717 InstructionCost ShuffleCost = 4718 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr); 4719 4720 unsigned NumOfLoadsInInterleaveGrp = 4721 Indices.size() ? Indices.size() : Factor; 4722 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(), 4723 VecTy->getNumElements() / Factor); 4724 unsigned NumOfResults = 4725 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 4726 NumOfLoadsInInterleaveGrp; 4727 4728 // About a half of the loads may be folded in shuffles when we have only 4729 // one result. If we have more than one result, we do not fold loads at all. 4730 unsigned NumOfUnfoldedLoads = 4731 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 4732 4733 // Get a number of shuffle operations per result. 4734 unsigned NumOfShufflesPerResult = 4735 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 4736 4737 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 4738 // When we have more than one destination, we need additional instructions 4739 // to keep sources. 4740 unsigned NumOfMoves = 0; 4741 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 4742 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 4743 4744 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 4745 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 4746 4747 return Cost; 4748 } 4749 4750 // Store. 4751 assert(Opcode == Instruction::Store && 4752 "Expected Store Instruction at this point"); 4753 // X86InterleavedAccess support only the following interleaved-access group. 4754 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 4755 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 4756 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 4757 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 4758 4759 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 4760 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 4761 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 4762 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 4763 }; 4764 4765 if (const auto *Entry = 4766 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 4767 return NumOfMemOps * MemOpCost + Entry->Cost; 4768 //If an entry does not exist, fallback to the default implementation. 4769 4770 // There is no strided stores meanwhile. And store can't be folded in 4771 // shuffle. 4772 unsigned NumOfSources = Factor; // The number of values to be merged. 4773 InstructionCost ShuffleCost = 4774 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr); 4775 unsigned NumOfShufflesPerStore = NumOfSources - 1; 4776 4777 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 4778 // We need additional instructions to keep sources. 4779 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 4780 InstructionCost Cost = 4781 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 4782 NumOfMoves; 4783 return Cost; 4784 } 4785 4786 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost( 4787 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, 4788 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, 4789 bool UseMaskForCond, bool UseMaskForGaps) { 4790 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 4791 Type *EltTy = cast<VectorType>(VecTy)->getElementType(); 4792 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 4793 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 4794 return true; 4795 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 4796 return HasBW; 4797 return false; 4798 }; 4799 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 4800 return getInterleavedMemoryOpCostAVX512( 4801 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment, 4802 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 4803 if (ST->hasAVX2()) 4804 return getInterleavedMemoryOpCostAVX2( 4805 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment, 4806 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); 4807 4808 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4809 Alignment, AddressSpace, CostKind, 4810 UseMaskForCond, UseMaskForGaps); 4811 } 4812