1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements a TargetTransformInfo analysis pass specific to the 10 /// X86 target machine. It uses the target's detailed information to provide 11 /// more precise answers to certain TTI queries, while letting the target 12 /// independent and default TTI implementations handle the rest. 13 /// 14 //===----------------------------------------------------------------------===// 15 /// About Cost Model numbers used below it's necessary to say the following: 16 /// the numbers correspond to some "generic" X86 CPU instead of usage of 17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 19 /// the lookups below the cost is based on Nehalem as that was the first CPU 20 /// to support that feature level and thus has most likely the worst case cost. 21 /// Some examples of other technologies/CPUs: 22 /// SSE 3 - Pentium4 / Athlon64 23 /// SSE 4.1 - Penryn 24 /// SSE 4.2 - Nehalem 25 /// AVX - Sandy Bridge 26 /// AVX2 - Haswell 27 /// AVX-512 - Xeon Phi / Skylake 28 /// And some examples of instruction target dependent costs (latency) 29 /// divss sqrtss rsqrtss 30 /// AMD K7 11-16 19 3 31 /// Piledriver 9-24 13-15 5 32 /// Jaguar 14 16 2 33 /// Pentium II,III 18 30 2 34 /// Nehalem 7-14 7-18 3 35 /// Haswell 10-13 11 5 36 /// TODO: Develop and implement the target dependent cost model and 37 /// specialize cost numbers for different Cost Model Targets such as throughput, 38 /// code size, latency and uop count. 39 //===----------------------------------------------------------------------===// 40 41 #include "X86TargetTransformInfo.h" 42 #include "llvm/Analysis/TargetTransformInfo.h" 43 #include "llvm/CodeGen/BasicTTIImpl.h" 44 #include "llvm/CodeGen/CostTable.h" 45 #include "llvm/CodeGen/TargetLowering.h" 46 #include "llvm/IR/IntrinsicInst.h" 47 #include "llvm/Support/Debug.h" 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "x86tti" 52 53 //===----------------------------------------------------------------------===// 54 // 55 // X86 cost model. 56 // 57 //===----------------------------------------------------------------------===// 58 59 TargetTransformInfo::PopcntSupportKind 60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 62 // TODO: Currently the __builtin_popcount() implementation using SSE3 63 // instructions is inefficient. Once the problem is fixed, we should 64 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 66 } 67 68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize( 69 TargetTransformInfo::CacheLevel Level) const { 70 switch (Level) { 71 case TargetTransformInfo::CacheLevel::L1D: 72 // - Penryn 73 // - Nehalem 74 // - Westmere 75 // - Sandy Bridge 76 // - Ivy Bridge 77 // - Haswell 78 // - Broadwell 79 // - Skylake 80 // - Kabylake 81 return 32 * 1024; // 32 KByte 82 case TargetTransformInfo::CacheLevel::L2D: 83 // - Penryn 84 // - Nehalem 85 // - Westmere 86 // - Sandy Bridge 87 // - Ivy Bridge 88 // - Haswell 89 // - Broadwell 90 // - Skylake 91 // - Kabylake 92 return 256 * 1024; // 256 KByte 93 } 94 95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 96 } 97 98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( 99 TargetTransformInfo::CacheLevel Level) const { 100 // - Penryn 101 // - Nehalem 102 // - Westmere 103 // - Sandy Bridge 104 // - Ivy Bridge 105 // - Haswell 106 // - Broadwell 107 // - Skylake 108 // - Kabylake 109 switch (Level) { 110 case TargetTransformInfo::CacheLevel::L1D: 111 LLVM_FALLTHROUGH; 112 case TargetTransformInfo::CacheLevel::L2D: 113 return 8; 114 } 115 116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel"); 117 } 118 119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { 120 bool Vector = (ClassID == 1); 121 if (Vector && !ST->hasSSE1()) 122 return 0; 123 124 if (ST->is64Bit()) { 125 if (Vector && ST->hasAVX512()) 126 return 32; 127 return 16; 128 } 129 return 8; 130 } 131 132 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const { 133 unsigned PreferVectorWidth = ST->getPreferVectorWidth(); 134 if (Vector) { 135 if (ST->hasAVX512() && PreferVectorWidth >= 512) 136 return 512; 137 if (ST->hasAVX() && PreferVectorWidth >= 256) 138 return 256; 139 if (ST->hasSSE1() && PreferVectorWidth >= 128) 140 return 128; 141 return 0; 142 } 143 144 if (ST->is64Bit()) 145 return 64; 146 147 return 32; 148 } 149 150 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { 151 return getRegisterBitWidth(true); 152 } 153 154 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 155 // If the loop will not be vectorized, don't interleave the loop. 156 // Let regular unroll to unroll the loop, which saves the overflow 157 // check and memory check cost. 158 if (VF == 1) 159 return 1; 160 161 if (ST->isAtom()) 162 return 1; 163 164 // Sandybridge and Haswell have multiple execution ports and pipelined 165 // vector units. 166 if (ST->hasAVX()) 167 return 4; 168 169 return 2; 170 } 171 172 int X86TTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, 173 TTI::TargetCostKind CostKind, 174 TTI::OperandValueKind Op1Info, 175 TTI::OperandValueKind Op2Info, 176 TTI::OperandValueProperties Opd1PropInfo, 177 TTI::OperandValueProperties Opd2PropInfo, 178 ArrayRef<const Value *> Args, 179 const Instruction *CxtI) { 180 // TODO: Handle more cost kinds. 181 if (CostKind != TTI::TCK_RecipThroughput) 182 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, 183 Op2Info, Opd1PropInfo, 184 Opd2PropInfo, Args, CxtI); 185 // Legalize the type. 186 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 187 188 int ISD = TLI->InstructionOpcodeToISD(Opcode); 189 assert(ISD && "Invalid opcode"); 190 191 static const CostTblEntry GLMCostTable[] = { 192 { ISD::FDIV, MVT::f32, 18 }, // divss 193 { ISD::FDIV, MVT::v4f32, 35 }, // divps 194 { ISD::FDIV, MVT::f64, 33 }, // divsd 195 { ISD::FDIV, MVT::v2f64, 65 }, // divpd 196 }; 197 198 if (ST->useGLMDivSqrtCosts()) 199 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, 200 LT.second)) 201 return LT.first * Entry->Cost; 202 203 static const CostTblEntry SLMCostTable[] = { 204 { ISD::MUL, MVT::v4i32, 11 }, // pmulld 205 { ISD::MUL, MVT::v8i16, 2 }, // pmullw 206 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. 207 { ISD::FMUL, MVT::f64, 2 }, // mulsd 208 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd 209 { ISD::FMUL, MVT::v4f32, 2 }, // mulps 210 { ISD::FDIV, MVT::f32, 17 }, // divss 211 { ISD::FDIV, MVT::v4f32, 39 }, // divps 212 { ISD::FDIV, MVT::f64, 32 }, // divsd 213 { ISD::FDIV, MVT::v2f64, 69 }, // divpd 214 { ISD::FADD, MVT::v2f64, 2 }, // addpd 215 { ISD::FSUB, MVT::v2f64, 2 }, // subpd 216 // v2i64/v4i64 mul is custom lowered as a series of long: 217 // multiplies(3), shifts(3) and adds(2) 218 // slm muldq version throughput is 2 and addq throughput 4 219 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + 220 // 3X4 (addq throughput) = 17 221 { ISD::MUL, MVT::v2i64, 17 }, 222 // slm addq\subq throughput is 4 223 { ISD::ADD, MVT::v2i64, 4 }, 224 { ISD::SUB, MVT::v2i64, 4 }, 225 }; 226 227 if (ST->isSLM()) { 228 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { 229 // Check if the operands can be shrinked into a smaller datatype. 230 bool Op1Signed = false; 231 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); 232 bool Op2Signed = false; 233 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); 234 235 bool signedMode = Op1Signed | Op2Signed; 236 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); 237 238 if (OpMinSize <= 7) 239 return LT.first * 3; // pmullw/sext 240 if (!signedMode && OpMinSize <= 8) 241 return LT.first * 3; // pmullw/zext 242 if (OpMinSize <= 15) 243 return LT.first * 5; // pmullw/pmulhw/pshuf 244 if (!signedMode && OpMinSize <= 16) 245 return LT.first * 5; // pmullw/pmulhw/pshuf 246 } 247 248 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, 249 LT.second)) { 250 return LT.first * Entry->Cost; 251 } 252 } 253 254 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV || 255 ISD == ISD::UREM) && 256 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 257 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 258 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 259 if (ISD == ISD::SDIV || ISD == ISD::SREM) { 260 // On X86, vector signed division by constants power-of-two are 261 // normally expanded to the sequence SRA + SRL + ADD + SRA. 262 // The OperandValue properties may not be the same as that of the previous 263 // operation; conservatively assume OP_None. 264 int Cost = 265 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, 266 Op2Info, 267 TargetTransformInfo::OP_None, 268 TargetTransformInfo::OP_None); 269 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, 270 Op2Info, 271 TargetTransformInfo::OP_None, 272 TargetTransformInfo::OP_None); 273 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, 274 Op2Info, 275 TargetTransformInfo::OP_None, 276 TargetTransformInfo::OP_None); 277 278 if (ISD == ISD::SREM) { 279 // For SREM: (X % C) is the equivalent of (X - (X/C)*C) 280 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, 281 Op2Info); 282 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, 283 Op2Info); 284 } 285 286 return Cost; 287 } 288 289 // Vector unsigned division/remainder will be simplified to shifts/masks. 290 if (ISD == ISD::UDIV) 291 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, 292 Op1Info, Op2Info, 293 TargetTransformInfo::OP_None, 294 TargetTransformInfo::OP_None); 295 296 else // UREM 297 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, 298 Op1Info, Op2Info, 299 TargetTransformInfo::OP_None, 300 TargetTransformInfo::OP_None); 301 } 302 303 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 304 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. 305 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. 306 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. 307 }; 308 309 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 310 ST->hasBWI()) { 311 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 312 LT.second)) 313 return LT.first * Entry->Cost; 314 } 315 316 static const CostTblEntry AVX512UniformConstCostTable[] = { 317 { ISD::SRA, MVT::v2i64, 1 }, 318 { ISD::SRA, MVT::v4i64, 1 }, 319 { ISD::SRA, MVT::v8i64, 1 }, 320 321 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. 322 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. 323 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. 324 }; 325 326 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 327 ST->hasAVX512()) { 328 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 329 LT.second)) 330 return LT.first * Entry->Cost; 331 } 332 333 static const CostTblEntry AVX2UniformConstCostTable[] = { 334 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. 335 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. 336 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. 337 338 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 339 }; 340 341 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 342 ST->hasAVX2()) { 343 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 344 LT.second)) 345 return LT.first * Entry->Cost; 346 } 347 348 static const CostTblEntry SSE2UniformConstCostTable[] = { 349 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. 350 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. 351 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 352 353 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. 354 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. 355 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. 356 }; 357 358 // XOP has faster vXi8 shifts. 359 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 360 ST->hasSSE2() && !ST->hasXOP()) { 361 if (const auto *Entry = 362 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) 363 return LT.first * Entry->Cost; 364 } 365 366 static const CostTblEntry AVX512BWConstCostTable[] = { 367 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 368 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 369 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence 370 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 371 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 372 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence 373 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 374 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence 375 }; 376 377 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 378 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 379 ST->hasBWI()) { 380 if (const auto *Entry = 381 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) 382 return LT.first * Entry->Cost; 383 } 384 385 static const CostTblEntry AVX512ConstCostTable[] = { 386 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 387 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence 388 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 389 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence 390 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 391 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 392 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence 393 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence 394 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence 395 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence 396 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence 397 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence 398 }; 399 400 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 401 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 402 ST->hasAVX512()) { 403 if (const auto *Entry = 404 CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) 405 return LT.first * Entry->Cost; 406 } 407 408 static const CostTblEntry AVX2ConstCostTable[] = { 409 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 410 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 411 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence 412 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 413 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 414 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence 415 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 416 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence 417 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 418 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence 419 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 420 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence 421 }; 422 423 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 424 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 425 ST->hasAVX2()) { 426 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) 427 return LT.first * Entry->Cost; 428 } 429 430 static const CostTblEntry SSE2ConstCostTable[] = { 431 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 432 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 433 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 434 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 435 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. 436 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. 437 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence 438 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence 439 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. 440 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. 441 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 442 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence 443 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. 444 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. 445 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 446 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence 447 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. 448 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. 449 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 450 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence 451 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. 452 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. 453 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 454 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence 455 }; 456 457 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || 458 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && 459 ST->hasSSE2()) { 460 // pmuldq sequence. 461 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 462 return LT.first * 32; 463 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) 464 return LT.first * 38; 465 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 466 return LT.first * 15; 467 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) 468 return LT.first * 20; 469 470 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) 471 return LT.first * Entry->Cost; 472 } 473 474 static const CostTblEntry AVX512BWShiftCostTable[] = { 475 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw 476 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw 477 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw 478 479 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw 480 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw 481 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw 482 483 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw 484 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw 485 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw 486 }; 487 488 if (ST->hasBWI()) 489 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) 490 return LT.first * Entry->Cost; 491 492 static const CostTblEntry AVX2UniformCostTable[] = { 493 // Uniform splats are cheaper for the following instructions. 494 { ISD::SHL, MVT::v16i16, 1 }, // psllw. 495 { ISD::SRL, MVT::v16i16, 1 }, // psrlw. 496 { ISD::SRA, MVT::v16i16, 1 }, // psraw. 497 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. 498 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. 499 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. 500 }; 501 502 if (ST->hasAVX2() && 503 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 504 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 505 if (const auto *Entry = 506 CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) 507 return LT.first * Entry->Cost; 508 } 509 510 static const CostTblEntry SSE2UniformCostTable[] = { 511 // Uniform splats are cheaper for the following instructions. 512 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 513 { ISD::SHL, MVT::v4i32, 1 }, // pslld 514 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 515 516 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 517 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 518 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 519 520 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 521 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 522 }; 523 524 if (ST->hasSSE2() && 525 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 526 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 527 if (const auto *Entry = 528 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 529 return LT.first * Entry->Cost; 530 } 531 532 static const CostTblEntry AVX512DQCostTable[] = { 533 { ISD::MUL, MVT::v2i64, 1 }, 534 { ISD::MUL, MVT::v4i64, 1 }, 535 { ISD::MUL, MVT::v8i64, 1 } 536 }; 537 538 // Look for AVX512DQ lowering tricks for custom cases. 539 if (ST->hasDQI()) 540 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) 541 return LT.first * Entry->Cost; 542 543 static const CostTblEntry AVX512BWCostTable[] = { 544 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. 545 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. 546 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. 547 548 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. 549 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. 550 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. 551 }; 552 553 // Look for AVX512BW lowering tricks for custom cases. 554 if (ST->hasBWI()) 555 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) 556 return LT.first * Entry->Cost; 557 558 static const CostTblEntry AVX512CostTable[] = { 559 { ISD::SHL, MVT::v16i32, 1 }, 560 { ISD::SRL, MVT::v16i32, 1 }, 561 { ISD::SRA, MVT::v16i32, 1 }, 562 563 { ISD::SHL, MVT::v8i64, 1 }, 564 { ISD::SRL, MVT::v8i64, 1 }, 565 566 { ISD::SRA, MVT::v2i64, 1 }, 567 { ISD::SRA, MVT::v4i64, 1 }, 568 { ISD::SRA, MVT::v8i64, 1 }, 569 570 { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence. 571 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. 572 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. 573 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) 574 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) 575 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) 576 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add 577 578 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 579 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 580 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ 581 582 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 583 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 584 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ 585 }; 586 587 if (ST->hasAVX512()) 588 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 589 return LT.first * Entry->Cost; 590 591 static const CostTblEntry AVX2ShiftCostTable[] = { 592 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 593 // customize them to detect the cases where shift amount is a scalar one. 594 { ISD::SHL, MVT::v4i32, 1 }, 595 { ISD::SRL, MVT::v4i32, 1 }, 596 { ISD::SRA, MVT::v4i32, 1 }, 597 { ISD::SHL, MVT::v8i32, 1 }, 598 { ISD::SRL, MVT::v8i32, 1 }, 599 { ISD::SRA, MVT::v8i32, 1 }, 600 { ISD::SHL, MVT::v2i64, 1 }, 601 { ISD::SRL, MVT::v2i64, 1 }, 602 { ISD::SHL, MVT::v4i64, 1 }, 603 { ISD::SRL, MVT::v4i64, 1 }, 604 }; 605 606 if (ST->hasAVX512()) { 607 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && 608 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 609 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 610 // On AVX512, a packed v32i16 shift left by a constant build_vector 611 // is lowered into a vector multiply (vpmullw). 612 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 613 Op1Info, Op2Info, 614 TargetTransformInfo::OP_None, 615 TargetTransformInfo::OP_None); 616 } 617 618 // Look for AVX2 lowering tricks. 619 if (ST->hasAVX2()) { 620 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 621 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 622 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 623 // On AVX2, a packed v16i16 shift left by a constant build_vector 624 // is lowered into a vector multiply (vpmullw). 625 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, 626 Op1Info, Op2Info, 627 TargetTransformInfo::OP_None, 628 TargetTransformInfo::OP_None); 629 630 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) 631 return LT.first * Entry->Cost; 632 } 633 634 static const CostTblEntry XOPShiftCostTable[] = { 635 // 128bit shifts take 1cy, but right shifts require negation beforehand. 636 { ISD::SHL, MVT::v16i8, 1 }, 637 { ISD::SRL, MVT::v16i8, 2 }, 638 { ISD::SRA, MVT::v16i8, 2 }, 639 { ISD::SHL, MVT::v8i16, 1 }, 640 { ISD::SRL, MVT::v8i16, 2 }, 641 { ISD::SRA, MVT::v8i16, 2 }, 642 { ISD::SHL, MVT::v4i32, 1 }, 643 { ISD::SRL, MVT::v4i32, 2 }, 644 { ISD::SRA, MVT::v4i32, 2 }, 645 { ISD::SHL, MVT::v2i64, 1 }, 646 { ISD::SRL, MVT::v2i64, 2 }, 647 { ISD::SRA, MVT::v2i64, 2 }, 648 // 256bit shifts require splitting if AVX2 didn't catch them above. 649 { ISD::SHL, MVT::v32i8, 2+2 }, 650 { ISD::SRL, MVT::v32i8, 4+2 }, 651 { ISD::SRA, MVT::v32i8, 4+2 }, 652 { ISD::SHL, MVT::v16i16, 2+2 }, 653 { ISD::SRL, MVT::v16i16, 4+2 }, 654 { ISD::SRA, MVT::v16i16, 4+2 }, 655 { ISD::SHL, MVT::v8i32, 2+2 }, 656 { ISD::SRL, MVT::v8i32, 4+2 }, 657 { ISD::SRA, MVT::v8i32, 4+2 }, 658 { ISD::SHL, MVT::v4i64, 2+2 }, 659 { ISD::SRL, MVT::v4i64, 4+2 }, 660 { ISD::SRA, MVT::v4i64, 4+2 }, 661 }; 662 663 // Look for XOP lowering tricks. 664 if (ST->hasXOP()) { 665 // If the right shift is constant then we'll fold the negation so 666 // it's as cheap as a left shift. 667 int ShiftISD = ISD; 668 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && 669 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 670 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 671 ShiftISD = ISD::SHL; 672 if (const auto *Entry = 673 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) 674 return LT.first * Entry->Cost; 675 } 676 677 static const CostTblEntry SSE2UniformShiftCostTable[] = { 678 // Uniform splats are cheaper for the following instructions. 679 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. 680 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. 681 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. 682 683 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. 684 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. 685 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. 686 687 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. 688 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. 689 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. 690 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. 691 }; 692 693 if (ST->hasSSE2() && 694 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 695 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 696 697 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. 698 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) 699 return LT.first * 4; // 2*psrad + shuffle. 700 701 if (const auto *Entry = 702 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) 703 return LT.first * Entry->Cost; 704 } 705 706 if (ISD == ISD::SHL && 707 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 708 MVT VT = LT.second; 709 // Vector shift left by non uniform constant can be lowered 710 // into vector multiply. 711 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || 712 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) 713 ISD = ISD::MUL; 714 } 715 716 static const CostTblEntry AVX2CostTable[] = { 717 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 718 { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence. 719 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 720 { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence. 721 722 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 723 { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence. 724 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 725 { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence. 726 727 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 728 { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence. 729 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 730 { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence. 731 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 732 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 733 734 { ISD::SUB, MVT::v32i8, 1 }, // psubb 735 { ISD::ADD, MVT::v32i8, 1 }, // paddb 736 { ISD::SUB, MVT::v16i16, 1 }, // psubw 737 { ISD::ADD, MVT::v16i16, 1 }, // paddw 738 { ISD::SUB, MVT::v8i32, 1 }, // psubd 739 { ISD::ADD, MVT::v8i32, 1 }, // paddd 740 { ISD::SUB, MVT::v4i64, 1 }, // psubq 741 { ISD::ADD, MVT::v4i64, 1 }, // paddq 742 743 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. 744 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. 745 { ISD::MUL, MVT::v16i16, 1 }, // pmullw 746 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) 747 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add 748 749 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 750 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 751 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 752 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 753 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ 754 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ 755 756 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 757 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 758 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 759 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 760 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 761 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 762 }; 763 764 // Look for AVX2 lowering tricks for custom cases. 765 if (ST->hasAVX2()) 766 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 767 return LT.first * Entry->Cost; 768 769 static const CostTblEntry AVX1CostTable[] = { 770 // We don't have to scalarize unsupported ops. We can issue two half-sized 771 // operations and we only need to extract the upper YMM half. 772 // Two ops + 1 extract + 1 insert = 4. 773 { ISD::MUL, MVT::v16i16, 4 }, 774 { ISD::MUL, MVT::v8i32, 4 }, 775 { ISD::SUB, MVT::v32i8, 4 }, 776 { ISD::ADD, MVT::v32i8, 4 }, 777 { ISD::SUB, MVT::v16i16, 4 }, 778 { ISD::ADD, MVT::v16i16, 4 }, 779 { ISD::SUB, MVT::v8i32, 4 }, 780 { ISD::ADD, MVT::v8i32, 4 }, 781 { ISD::SUB, MVT::v4i64, 4 }, 782 { ISD::ADD, MVT::v4i64, 4 }, 783 784 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 785 // are lowered as a series of long multiplies(3), shifts(3) and adds(2) 786 // Because we believe v4i64 to be a legal type, we must also include the 787 // extract+insert in the cost table. Therefore, the cost here is 18 788 // instead of 8. 789 { ISD::MUL, MVT::v4i64, 18 }, 790 791 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. 792 793 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ 794 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 795 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 796 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ 797 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ 798 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ 799 }; 800 801 if (ST->hasAVX()) 802 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) 803 return LT.first * Entry->Cost; 804 805 static const CostTblEntry SSE42CostTable[] = { 806 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 807 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 808 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 809 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 810 811 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 812 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ 813 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 814 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 815 816 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ 817 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ 818 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ 819 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ 820 821 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ 822 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ 823 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ 824 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ 825 }; 826 827 if (ST->hasSSE42()) 828 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) 829 return LT.first * Entry->Cost; 830 831 static const CostTblEntry SSE41CostTable[] = { 832 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. 833 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. 834 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. 835 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 836 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld 837 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split 838 839 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. 840 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. 841 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. 842 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 843 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. 844 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. 845 846 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. 847 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. 848 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. 849 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. 850 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. 851 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. 852 853 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) 854 }; 855 856 if (ST->hasSSE41()) 857 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) 858 return LT.first * Entry->Cost; 859 860 static const CostTblEntry SSE2CostTable[] = { 861 // We don't correctly identify costs of casts because they are marked as 862 // custom. 863 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 864 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 865 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 866 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 867 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 868 869 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 870 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 871 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 872 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 873 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. 874 875 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 876 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 877 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 878 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 879 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. 880 881 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. 882 { ISD::MUL, MVT::v8i16, 1 }, // pmullw 883 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle 884 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add 885 886 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ 887 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ 888 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ 889 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ 890 891 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 892 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 893 894 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ 895 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ 896 }; 897 898 if (ST->hasSSE2()) 899 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 900 return LT.first * Entry->Cost; 901 902 static const CostTblEntry SSE1CostTable[] = { 903 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ 904 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ 905 906 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 907 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 908 909 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ 910 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ 911 912 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 913 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 914 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 915 916 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ 917 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ 918 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ 919 }; 920 921 if (ST->hasSSE1()) 922 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) 923 return LT.first * Entry->Cost; 924 925 // It is not a good idea to vectorize division. We have to scalarize it and 926 // in the process we will often end up having to spilling regular 927 // registers. The overhead of division is going to dominate most kernels 928 // anyways so try hard to prevent vectorization of division - it is 929 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 930 // to hide "20 cycles" for each lane. 931 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || 932 ISD == ISD::UDIV || ISD == ISD::UREM)) { 933 int ScalarCost = getArithmeticInstrCost( 934 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, 935 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 936 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; 937 } 938 939 // Fallback to the default implementation. 940 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); 941 } 942 943 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *BaseTp, 944 int Index, VectorType *SubTp) { 945 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 946 // 64-bit packed integer vectors (v2i32) are widened to type v4i32. 947 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); 948 949 // Treat Transpose as 2-op shuffles - there's no difference in lowering. 950 if (Kind == TTI::SK_Transpose) 951 Kind = TTI::SK_PermuteTwoSrc; 952 953 // For Broadcasts we are splatting the first element from the first input 954 // register, so only need to reference that input and all the output 955 // registers are the same. 956 if (Kind == TTI::SK_Broadcast) 957 LT.first = 1; 958 959 // Subvector extractions are free if they start at the beginning of a 960 // vector and cheap if the subvectors are aligned. 961 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { 962 int NumElts = LT.second.getVectorNumElements(); 963 if ((Index % NumElts) == 0) 964 return 0; 965 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp); 966 if (SubLT.second.isVector()) { 967 int NumSubElts = SubLT.second.getVectorNumElements(); 968 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) 969 return SubLT.first; 970 // Handle some cases for widening legalization. For now we only handle 971 // cases where the original subvector was naturally aligned and evenly 972 // fit in its legalized subvector type. 973 // FIXME: Remove some of the alignment restrictions. 974 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit 975 // vectors. 976 int OrigSubElts = cast<VectorType>(SubTp)->getNumElements(); 977 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && 978 (NumSubElts % OrigSubElts) == 0 && 979 LT.second.getVectorElementType() == 980 SubLT.second.getVectorElementType() && 981 LT.second.getVectorElementType().getSizeInBits() == 982 BaseTp->getElementType()->getPrimitiveSizeInBits()) { 983 assert(NumElts >= NumSubElts && NumElts > OrigSubElts && 984 "Unexpected number of elements!"); 985 VectorType *VecTy = VectorType::get(BaseTp->getElementType(), 986 LT.second.getVectorNumElements()); 987 VectorType *SubTy = 988 VectorType::get(BaseTp->getElementType(), 989 SubLT.second.getVectorNumElements()); 990 int ExtractIndex = alignDown((Index % NumElts), NumSubElts); 991 int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy, 992 ExtractIndex, SubTy); 993 994 // If the original size is 32-bits or more, we can use pshufd. Otherwise 995 // if we have SSSE3 we can use pshufb. 996 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) 997 return ExtractCost + 1; // pshufd or pshufb 998 999 assert(SubTp->getPrimitiveSizeInBits() == 16 && 1000 "Unexpected vector size"); 1001 1002 return ExtractCost + 2; // worst case pshufhw + pshufd 1003 } 1004 } 1005 } 1006 1007 // Handle some common (illegal) sub-vector types as they are often very cheap 1008 // to shuffle even on targets without PSHUFB. 1009 EVT VT = TLI->getValueType(DL, BaseTp); 1010 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && 1011 !ST->hasSSSE3()) { 1012 static const CostTblEntry SSE2SubVectorShuffleTbl[] = { 1013 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw 1014 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw 1015 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw 1016 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw 1017 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck 1018 1019 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw 1020 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw 1021 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus 1022 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck 1023 1024 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw 1025 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw 1026 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw 1027 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw 1028 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck 1029 1030 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw 1031 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw 1032 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw 1033 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw 1034 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck 1035 }; 1036 1037 if (ST->hasSSE2()) 1038 if (const auto *Entry = 1039 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) 1040 return Entry->Cost; 1041 } 1042 1043 // We are going to permute multiple sources and the result will be in multiple 1044 // destinations. Providing an accurate cost only for splits where the element 1045 // type remains the same. 1046 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { 1047 MVT LegalVT = LT.second; 1048 if (LegalVT.isVector() && 1049 LegalVT.getVectorElementType().getSizeInBits() == 1050 BaseTp->getElementType()->getPrimitiveSizeInBits() && 1051 LegalVT.getVectorNumElements() < BaseTp->getNumElements()) { 1052 1053 unsigned VecTySize = DL.getTypeStoreSize(BaseTp); 1054 unsigned LegalVTSize = LegalVT.getStoreSize(); 1055 // Number of source vectors after legalization: 1056 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; 1057 // Number of destination vectors after legalization: 1058 unsigned NumOfDests = LT.first; 1059 1060 VectorType *SingleOpTy = 1061 VectorType::get(BaseTp->getElementType(), 1062 LegalVT.getVectorNumElements()); 1063 1064 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; 1065 return NumOfShuffles * 1066 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr); 1067 } 1068 1069 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp); 1070 } 1071 1072 // For 2-input shuffles, we must account for splitting the 2 inputs into many. 1073 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { 1074 // We assume that source and destination have the same vector type. 1075 int NumOfDests = LT.first; 1076 int NumOfShufflesPerDest = LT.first * 2 - 1; 1077 LT.first = NumOfDests * NumOfShufflesPerDest; 1078 } 1079 1080 static const CostTblEntry AVX512VBMIShuffleTbl[] = { 1081 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb 1082 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb 1083 1084 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb 1085 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb 1086 1087 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b 1088 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b 1089 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b 1090 }; 1091 1092 if (ST->hasVBMI()) 1093 if (const auto *Entry = 1094 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) 1095 return LT.first * Entry->Cost; 1096 1097 static const CostTblEntry AVX512BWShuffleTbl[] = { 1098 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1099 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1100 1101 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw 1102 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw 1103 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 1104 1105 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw 1106 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw 1107 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 1108 1109 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w 1110 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w 1111 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w 1112 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 1113 }; 1114 1115 if (ST->hasBWI()) 1116 if (const auto *Entry = 1117 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) 1118 return LT.first * Entry->Cost; 1119 1120 static const CostTblEntry AVX512ShuffleTbl[] = { 1121 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd 1122 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps 1123 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq 1124 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd 1125 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw 1126 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb 1127 1128 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd 1129 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps 1130 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq 1131 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd 1132 1133 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd 1134 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1135 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd 1136 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps 1137 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1138 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps 1139 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq 1140 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1141 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq 1142 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd 1143 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1144 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd 1145 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1146 1147 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd 1148 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps 1149 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q 1150 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d 1151 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd 1152 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps 1153 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q 1154 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d 1155 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd 1156 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps 1157 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q 1158 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d 1159 1160 // FIXME: This just applies the type legalization cost rules above 1161 // assuming these completely split. 1162 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, 1163 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, 1164 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, 1165 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, 1166 }; 1167 1168 if (ST->hasAVX512()) 1169 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) 1170 return LT.first * Entry->Cost; 1171 1172 static const CostTblEntry AVX2ShuffleTbl[] = { 1173 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd 1174 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps 1175 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq 1176 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd 1177 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw 1178 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb 1179 1180 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd 1181 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps 1182 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq 1183 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd 1184 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb 1185 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb 1186 1187 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb 1188 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb 1189 1190 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd 1191 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps 1192 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq 1193 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd 1194 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb 1195 // + vpblendvb 1196 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb 1197 // + vpblendvb 1198 1199 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd 1200 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps 1201 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd 1202 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd 1203 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb 1204 // + vpblendvb 1205 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb 1206 // + vpblendvb 1207 }; 1208 1209 if (ST->hasAVX2()) 1210 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) 1211 return LT.first * Entry->Cost; 1212 1213 static const CostTblEntry XOPShuffleTbl[] = { 1214 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd 1215 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps 1216 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd 1217 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps 1218 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm 1219 // + vinsertf128 1220 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm 1221 // + vinsertf128 1222 1223 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm 1224 // + vinsertf128 1225 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm 1226 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm 1227 // + vinsertf128 1228 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm 1229 }; 1230 1231 if (ST->hasXOP()) 1232 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) 1233 return LT.first * Entry->Cost; 1234 1235 static const CostTblEntry AVX1ShuffleTbl[] = { 1236 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1237 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1238 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1239 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1240 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 1241 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 1242 1243 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd 1244 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps 1245 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd 1246 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps 1247 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb 1248 // + vinsertf128 1249 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb 1250 // + vinsertf128 1251 1252 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd 1253 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd 1254 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps 1255 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps 1256 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor 1257 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor 1258 1259 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd 1260 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd 1261 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1262 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1263 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb 1264 // + 2*por + vinsertf128 1265 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb 1266 // + 2*por + vinsertf128 1267 1268 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd 1269 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd 1270 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps 1271 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps 1272 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb 1273 // + 4*por + vinsertf128 1274 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb 1275 // + 4*por + vinsertf128 1276 }; 1277 1278 if (ST->hasAVX()) 1279 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) 1280 return LT.first * Entry->Cost; 1281 1282 static const CostTblEntry SSE41ShuffleTbl[] = { 1283 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw 1284 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1285 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw 1286 {TTI::SK_Select, MVT::v4f32, 1}, // blendps 1287 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw 1288 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb 1289 }; 1290 1291 if (ST->hasSSE41()) 1292 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) 1293 return LT.first * Entry->Cost; 1294 1295 static const CostTblEntry SSSE3ShuffleTbl[] = { 1296 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb 1297 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb 1298 1299 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb 1300 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb 1301 1302 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por 1303 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por 1304 1305 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb 1306 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb 1307 1308 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por 1309 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por 1310 }; 1311 1312 if (ST->hasSSSE3()) 1313 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) 1314 return LT.first * Entry->Cost; 1315 1316 static const CostTblEntry SSE2ShuffleTbl[] = { 1317 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd 1318 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd 1319 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd 1320 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd 1321 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd 1322 1323 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd 1324 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd 1325 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd 1326 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd 1327 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw 1328 // + 2*pshufd + 2*unpck + packus 1329 1330 {TTI::SK_Select, MVT::v2i64, 1}, // movsd 1331 {TTI::SK_Select, MVT::v2f64, 1}, // movsd 1332 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps 1333 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por 1334 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por 1335 1336 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd 1337 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd 1338 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd 1339 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw 1340 // + pshufd/unpck 1341 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw 1342 // + 2*pshufd + 2*unpck + 2*packus 1343 1344 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd 1345 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd 1346 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} 1347 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute 1348 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute 1349 }; 1350 1351 if (ST->hasSSE2()) 1352 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) 1353 return LT.first * Entry->Cost; 1354 1355 static const CostTblEntry SSE1ShuffleTbl[] = { 1356 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps 1357 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps 1358 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps 1359 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps 1360 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps 1361 }; 1362 1363 if (ST->hasSSE1()) 1364 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) 1365 return LT.first * Entry->Cost; 1366 1367 return BaseT::getShuffleCost(Kind, BaseTp, Index, SubTp); 1368 } 1369 1370 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, 1371 TTI::TargetCostKind CostKind, 1372 const Instruction *I) { 1373 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1374 assert(ISD && "Invalid opcode"); 1375 1376 // TODO: Allow non-throughput costs that aren't binary. 1377 auto AdjustCost = [&CostKind](int Cost) { 1378 if (CostKind != TTI::TCK_RecipThroughput) 1379 return Cost == 0 ? 0 : 1; 1380 return Cost; 1381 }; 1382 1383 // FIXME: Need a better design of the cost table to handle non-simple types of 1384 // potential massive combinations (elem_num x src_type x dst_type). 1385 1386 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { 1387 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1388 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, 1389 1390 // Mask sign extend has an instruction. 1391 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1392 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1393 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1394 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1395 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1396 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1397 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1398 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1399 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1400 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, 1401 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, 1402 1403 // Mask zero extend is a sext + shift. 1404 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1405 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1406 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1407 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1408 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1409 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1410 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1411 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1412 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1413 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, 1414 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, 1415 1416 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, 1417 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm 1418 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm 1419 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm 1420 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm 1421 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm 1422 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm 1423 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm 1424 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm 1425 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm 1426 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm 1427 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, 1428 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, 1429 }; 1430 1431 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 1432 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1433 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1434 1435 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 1436 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 1437 1438 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, 1439 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, 1440 1441 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 1442 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 1443 }; 1444 1445 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 1446 // 256-bit wide vectors. 1447 1448 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 1449 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 1450 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 1451 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 1452 1453 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1454 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1455 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1456 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd 1457 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1458 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1459 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1460 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd 1461 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd 1462 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd 1463 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd 1464 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd 1465 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq 1466 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq 1467 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq 1468 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, 1469 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, 1470 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, 1471 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, 1472 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 1473 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd 1474 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb 1475 1476 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 1477 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, 1478 1479 // Sign extend is zmm vpternlogd+vptruncdb. 1480 // Zero extend is zmm broadcast load+vptruncdw. 1481 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, 1482 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, 1483 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, 1484 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, 1485 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, 1486 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, 1487 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, 1488 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, 1489 1490 // Sign extend is zmm vpternlogd+vptruncdw. 1491 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. 1492 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, 1493 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1494 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, 1495 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1496 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, 1497 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1498 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, 1499 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1500 1501 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd 1502 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld 1503 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd 1504 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld 1505 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd 1506 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld 1507 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq 1508 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq 1509 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq 1510 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq 1511 1512 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd 1513 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld 1514 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq 1515 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq 1516 1517 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1518 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 1519 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1520 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 1521 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1522 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, 1523 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1524 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 1525 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1526 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 1527 1528 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1529 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right 1530 1531 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1532 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1533 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1534 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1535 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1536 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1537 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1538 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1539 1540 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 1541 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 1542 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 1543 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 1544 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 1545 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 1546 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 1547 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 1548 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 1549 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, 1550 1551 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 }, 1552 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, 1553 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 }, 1554 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 }, 1555 1556 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, 1557 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, 1558 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, 1559 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 1560 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, 1561 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, 1562 }; 1563 1564 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { 1565 // Mask sign extend has an instruction. 1566 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, 1567 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, 1568 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, 1569 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, 1570 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, 1571 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, 1572 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, 1573 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1574 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, 1575 1576 // Mask zero extend is a sext + shift. 1577 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, 1578 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, 1579 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, 1580 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, 1581 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, 1582 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, 1583 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, 1584 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, 1585 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, 1586 1587 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, 1588 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb 1589 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw 1590 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb 1591 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw 1592 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb 1593 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw 1594 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb 1595 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw 1596 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb 1597 }; 1598 1599 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { 1600 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1601 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1602 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1603 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1604 1605 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 1606 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 1607 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 1608 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 1609 1610 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, 1611 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, 1612 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, 1613 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, 1614 1615 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 1616 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 1617 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 1618 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 1619 }; 1620 1621 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { 1622 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd 1623 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd 1624 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd 1625 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 1626 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq 1627 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq 1628 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq 1629 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 1630 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd 1631 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd 1632 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd 1633 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq 1634 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq 1635 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd 1636 1637 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb 1638 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb 1639 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, 1640 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, 1641 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, 1642 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, 1643 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, 1644 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, 1645 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, 1646 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, 1647 1648 // sign extend is vpcmpeq+maskedmove+vpmovdw 1649 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw 1650 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, 1651 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, 1652 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, 1653 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, 1654 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, 1655 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, 1656 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, 1657 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, 1658 1659 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd 1660 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld 1661 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd 1662 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld 1663 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd 1664 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld 1665 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq 1666 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq 1667 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq 1668 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq 1669 1670 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 1671 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1672 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 1673 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 1674 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1675 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 1676 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 1677 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 1678 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1679 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1680 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1681 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 1682 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1683 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, 1684 1685 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, 1686 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, 1687 1688 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 }, 1689 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 }, 1690 1691 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, 1692 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, 1693 1694 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 1695 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 1696 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 }, 1697 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, 1698 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 1699 }; 1700 1701 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 1702 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1703 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 1704 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1705 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 1706 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 }, 1707 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 }, 1708 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 }, 1709 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 }, 1710 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1711 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, 1712 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1713 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 1714 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 }, 1715 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 }, 1716 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1717 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 1718 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1719 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 1720 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1721 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, 1722 1723 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1724 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, 1725 1726 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 1727 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 1728 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 1729 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 1730 1731 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 1732 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 1733 1734 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 1735 }; 1736 1737 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 1738 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 1739 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 1740 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 1741 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 1742 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1743 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1744 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1745 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 1746 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1747 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, 1748 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1749 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1750 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 }, 1751 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1752 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1753 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1754 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1755 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 1756 1757 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, 1758 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, 1759 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, 1760 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, 1761 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, 1762 1763 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 1764 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1765 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1766 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 1767 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 1768 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 1769 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 }, 1770 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 }, 1771 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, 1772 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 }, 1773 1774 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 1775 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 1776 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 1777 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 1778 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 1779 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 1780 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 1781 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 1782 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1783 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 1784 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 1785 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 1786 1787 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 1788 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 1789 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 1790 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 1791 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 1792 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 1793 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 1794 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 1795 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 1796 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 1797 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 1798 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 1799 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 1800 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 1801 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 }, 1802 // The generic code to compute the scalar overhead is currently broken. 1803 // Workaround this limitation by estimating the scalarization overhead 1804 // here. We have roughly 10 instructions per scalar element. 1805 // Multiply that by the vector width. 1806 // FIXME: remove that when PR19268 is fixed. 1807 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1808 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 1809 1810 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 }, 1811 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 }, 1812 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 }, 1813 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 }, 1814 1815 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 }, 1816 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 }, 1817 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 }, 1818 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 }, 1819 // This node is expanded into scalarized operations but BasicTTI is overly 1820 // optimistic estimating its cost. It computes 3 per element (one 1821 // vector-extract, one scalar conversion and one vector-insert). The 1822 // problem is that the inserts form a read-modify-write chain so latency 1823 // should be factored in too. Inflating the cost per element by 1. 1824 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 1825 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 1826 1827 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 1828 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 1829 }; 1830 1831 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 1832 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1833 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 1834 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1835 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 1836 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1837 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 1838 1839 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1840 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 1841 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1842 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 1843 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1844 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1845 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1846 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 1847 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1848 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 1849 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1850 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 1851 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1852 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1853 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1854 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 1855 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1856 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 1857 1858 // These truncates end up widening elements. 1859 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ 1860 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ 1861 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD 1862 1863 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 }, 1864 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 }, 1865 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 1866 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 1867 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 1868 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 1869 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 1870 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 1871 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB 1872 1873 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, 1874 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, 1875 1876 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 }, 1877 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 }, 1878 1879 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 }, 1880 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 }, 1881 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, 1882 }; 1883 1884 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 1885 // These are somewhat magic numbers justified by looking at the output of 1886 // Intel's IACA, running some kernels and making sure when we take 1887 // legalization into account the throughput will be overestimated. 1888 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1889 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1890 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1891 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1892 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 1893 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 }, 1894 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 }, 1895 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1896 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 1897 1898 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 1899 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 1900 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 1901 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 1902 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 1903 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 1904 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 }, 1905 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 1906 1907 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 }, 1908 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 }, 1909 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, 1910 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, 1911 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, 1912 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 }, 1913 1914 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 }, 1915 1916 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 }, 1917 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 }, 1918 1919 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, 1920 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, 1921 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 }, 1922 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 }, 1923 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, 1924 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 }, 1925 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, 1926 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 }, 1927 1928 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 1929 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 1930 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 1931 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 1932 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 1933 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 1934 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 1935 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 1936 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1937 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 1938 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 1939 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 1940 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 1941 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 1942 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 1943 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 1944 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 1945 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 1946 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 1947 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 1948 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 1949 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 1950 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 1951 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 1952 1953 // These truncates are really widening elements. 1954 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD 1955 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ 1956 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD 1957 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD 1958 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD 1959 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW 1960 1961 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB 1962 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB 1963 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB 1964 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 1965 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB 1966 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, 1967 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 1968 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 1969 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 1970 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 1971 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 1972 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 1973 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB 1974 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW 1975 { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD 1976 }; 1977 1978 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 1979 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 1980 1981 if (ST->hasSSE2() && !ST->hasAVX()) { 1982 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 1983 LTDest.second, LTSrc.second)) 1984 return AdjustCost(LTSrc.first * Entry->Cost); 1985 } 1986 1987 EVT SrcTy = TLI->getValueType(DL, Src); 1988 EVT DstTy = TLI->getValueType(DL, Dst); 1989 1990 // The function getSimpleVT only handles simple value types. 1991 if (!SrcTy.isSimple() || !DstTy.isSimple()) 1992 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind)); 1993 1994 MVT SimpleSrcTy = SrcTy.getSimpleVT(); 1995 MVT SimpleDstTy = DstTy.getSimpleVT(); 1996 1997 if (ST->useAVX512Regs()) { 1998 if (ST->hasBWI()) 1999 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD, 2000 SimpleDstTy, SimpleSrcTy)) 2001 return AdjustCost(Entry->Cost); 2002 2003 if (ST->hasDQI()) 2004 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 2005 SimpleDstTy, SimpleSrcTy)) 2006 return AdjustCost(Entry->Cost); 2007 2008 if (ST->hasAVX512()) 2009 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 2010 SimpleDstTy, SimpleSrcTy)) 2011 return AdjustCost(Entry->Cost); 2012 } 2013 2014 if (ST->hasBWI()) 2015 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, 2016 SimpleDstTy, SimpleSrcTy)) 2017 return AdjustCost(Entry->Cost); 2018 2019 if (ST->hasDQI()) 2020 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, 2021 SimpleDstTy, SimpleSrcTy)) 2022 return AdjustCost(Entry->Cost); 2023 2024 if (ST->hasAVX512()) 2025 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, 2026 SimpleDstTy, SimpleSrcTy)) 2027 return AdjustCost(Entry->Cost); 2028 2029 if (ST->hasAVX2()) { 2030 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 2031 SimpleDstTy, SimpleSrcTy)) 2032 return AdjustCost(Entry->Cost); 2033 } 2034 2035 if (ST->hasAVX()) { 2036 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 2037 SimpleDstTy, SimpleSrcTy)) 2038 return AdjustCost(Entry->Cost); 2039 } 2040 2041 if (ST->hasSSE41()) { 2042 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 2043 SimpleDstTy, SimpleSrcTy)) 2044 return AdjustCost(Entry->Cost); 2045 } 2046 2047 if (ST->hasSSE2()) { 2048 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 2049 SimpleDstTy, SimpleSrcTy)) 2050 return AdjustCost(Entry->Cost); 2051 } 2052 2053 return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I)); 2054 } 2055 2056 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, 2057 TTI::TargetCostKind CostKind, 2058 const Instruction *I) { 2059 // TODO: Handle other cost kinds. 2060 if (CostKind != TTI::TCK_RecipThroughput) 2061 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I); 2062 2063 // Legalize the type. 2064 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 2065 2066 MVT MTy = LT.second; 2067 2068 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2069 assert(ISD && "Invalid opcode"); 2070 2071 unsigned ExtraCost = 0; 2072 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) { 2073 // Some vector comparison predicates cost extra instructions. 2074 if (MTy.isVector() && 2075 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || 2076 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || 2077 ST->hasBWI())) { 2078 switch (cast<CmpInst>(I)->getPredicate()) { 2079 case CmpInst::Predicate::ICMP_NE: 2080 // xor(cmpeq(x,y),-1) 2081 ExtraCost = 1; 2082 break; 2083 case CmpInst::Predicate::ICMP_SGE: 2084 case CmpInst::Predicate::ICMP_SLE: 2085 // xor(cmpgt(x,y),-1) 2086 ExtraCost = 1; 2087 break; 2088 case CmpInst::Predicate::ICMP_ULT: 2089 case CmpInst::Predicate::ICMP_UGT: 2090 // cmpgt(xor(x,signbit),xor(y,signbit)) 2091 // xor(cmpeq(pmaxu(x,y),x),-1) 2092 ExtraCost = 2; 2093 break; 2094 case CmpInst::Predicate::ICMP_ULE: 2095 case CmpInst::Predicate::ICMP_UGE: 2096 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || 2097 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { 2098 // cmpeq(psubus(x,y),0) 2099 // cmpeq(pminu(x,y),x) 2100 ExtraCost = 1; 2101 } else { 2102 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) 2103 ExtraCost = 3; 2104 } 2105 break; 2106 default: 2107 break; 2108 } 2109 } 2110 } 2111 2112 static const CostTblEntry SLMCostTbl[] = { 2113 // slm pcmpeq/pcmpgt throughput is 2 2114 { ISD::SETCC, MVT::v2i64, 2 }, 2115 }; 2116 2117 static const CostTblEntry AVX512BWCostTbl[] = { 2118 { ISD::SETCC, MVT::v32i16, 1 }, 2119 { ISD::SETCC, MVT::v64i8, 1 }, 2120 2121 { ISD::SELECT, MVT::v32i16, 1 }, 2122 { ISD::SELECT, MVT::v64i8, 1 }, 2123 }; 2124 2125 static const CostTblEntry AVX512CostTbl[] = { 2126 { ISD::SETCC, MVT::v8i64, 1 }, 2127 { ISD::SETCC, MVT::v16i32, 1 }, 2128 { ISD::SETCC, MVT::v8f64, 1 }, 2129 { ISD::SETCC, MVT::v16f32, 1 }, 2130 2131 { ISD::SELECT, MVT::v8i64, 1 }, 2132 { ISD::SELECT, MVT::v16i32, 1 }, 2133 { ISD::SELECT, MVT::v8f64, 1 }, 2134 { ISD::SELECT, MVT::v16f32, 1 }, 2135 2136 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 2137 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 2138 2139 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3 2140 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3 2141 }; 2142 2143 static const CostTblEntry AVX2CostTbl[] = { 2144 { ISD::SETCC, MVT::v4i64, 1 }, 2145 { ISD::SETCC, MVT::v8i32, 1 }, 2146 { ISD::SETCC, MVT::v16i16, 1 }, 2147 { ISD::SETCC, MVT::v32i8, 1 }, 2148 2149 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb 2150 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb 2151 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb 2152 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb 2153 }; 2154 2155 static const CostTblEntry AVX1CostTbl[] = { 2156 { ISD::SETCC, MVT::v4f64, 1 }, 2157 { ISD::SETCC, MVT::v8f32, 1 }, 2158 // AVX1 does not support 8-wide integer compare. 2159 { ISD::SETCC, MVT::v4i64, 4 }, 2160 { ISD::SETCC, MVT::v8i32, 4 }, 2161 { ISD::SETCC, MVT::v16i16, 4 }, 2162 { ISD::SETCC, MVT::v32i8, 4 }, 2163 2164 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd 2165 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps 2166 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd 2167 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps 2168 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps 2169 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps 2170 }; 2171 2172 static const CostTblEntry SSE42CostTbl[] = { 2173 { ISD::SETCC, MVT::v2f64, 1 }, 2174 { ISD::SETCC, MVT::v4f32, 1 }, 2175 { ISD::SETCC, MVT::v2i64, 1 }, 2176 }; 2177 2178 static const CostTblEntry SSE41CostTbl[] = { 2179 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd 2180 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps 2181 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb 2182 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb 2183 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb 2184 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb 2185 }; 2186 2187 static const CostTblEntry SSE2CostTbl[] = { 2188 { ISD::SETCC, MVT::v2f64, 2 }, 2189 { ISD::SETCC, MVT::f64, 1 }, 2190 { ISD::SETCC, MVT::v2i64, 8 }, 2191 { ISD::SETCC, MVT::v4i32, 1 }, 2192 { ISD::SETCC, MVT::v8i16, 1 }, 2193 { ISD::SETCC, MVT::v16i8, 1 }, 2194 2195 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd 2196 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por 2197 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por 2198 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por 2199 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por 2200 }; 2201 2202 static const CostTblEntry SSE1CostTbl[] = { 2203 { ISD::SETCC, MVT::v4f32, 2 }, 2204 { ISD::SETCC, MVT::f32, 1 }, 2205 2206 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps 2207 }; 2208 2209 if (ST->isSLM()) 2210 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2211 return LT.first * (ExtraCost + Entry->Cost); 2212 2213 if (ST->hasBWI()) 2214 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2215 return LT.first * (ExtraCost + Entry->Cost); 2216 2217 if (ST->hasAVX512()) 2218 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2219 return LT.first * (ExtraCost + Entry->Cost); 2220 2221 if (ST->hasAVX2()) 2222 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2223 return LT.first * (ExtraCost + Entry->Cost); 2224 2225 if (ST->hasAVX()) 2226 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2227 return LT.first * (ExtraCost + Entry->Cost); 2228 2229 if (ST->hasSSE42()) 2230 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2231 return LT.first * (ExtraCost + Entry->Cost); 2232 2233 if (ST->hasSSE41()) 2234 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 2235 return LT.first * (ExtraCost + Entry->Cost); 2236 2237 if (ST->hasSSE2()) 2238 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2239 return LT.first * (ExtraCost + Entry->Cost); 2240 2241 if (ST->hasSSE1()) 2242 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2243 return LT.first * (ExtraCost + Entry->Cost); 2244 2245 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I); 2246 } 2247 2248 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } 2249 2250 int X86TTIImpl::getTypeBasedIntrinsicInstrCost( 2251 const IntrinsicCostAttributes &ICA, TTI::TargetCostKind CostKind) { 2252 2253 // Costs should match the codegen from: 2254 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 2255 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 2256 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 2257 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 2258 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 2259 static const CostTblEntry AVX512CDCostTbl[] = { 2260 { ISD::CTLZ, MVT::v8i64, 1 }, 2261 { ISD::CTLZ, MVT::v16i32, 1 }, 2262 { ISD::CTLZ, MVT::v32i16, 8 }, 2263 { ISD::CTLZ, MVT::v64i8, 20 }, 2264 { ISD::CTLZ, MVT::v4i64, 1 }, 2265 { ISD::CTLZ, MVT::v8i32, 1 }, 2266 { ISD::CTLZ, MVT::v16i16, 4 }, 2267 { ISD::CTLZ, MVT::v32i8, 10 }, 2268 { ISD::CTLZ, MVT::v2i64, 1 }, 2269 { ISD::CTLZ, MVT::v4i32, 1 }, 2270 { ISD::CTLZ, MVT::v8i16, 4 }, 2271 { ISD::CTLZ, MVT::v16i8, 4 }, 2272 }; 2273 static const CostTblEntry AVX512BWCostTbl[] = { 2274 { ISD::BITREVERSE, MVT::v8i64, 5 }, 2275 { ISD::BITREVERSE, MVT::v16i32, 5 }, 2276 { ISD::BITREVERSE, MVT::v32i16, 5 }, 2277 { ISD::BITREVERSE, MVT::v64i8, 5 }, 2278 { ISD::CTLZ, MVT::v8i64, 23 }, 2279 { ISD::CTLZ, MVT::v16i32, 22 }, 2280 { ISD::CTLZ, MVT::v32i16, 18 }, 2281 { ISD::CTLZ, MVT::v64i8, 17 }, 2282 { ISD::CTPOP, MVT::v8i64, 7 }, 2283 { ISD::CTPOP, MVT::v16i32, 11 }, 2284 { ISD::CTPOP, MVT::v32i16, 9 }, 2285 { ISD::CTPOP, MVT::v64i8, 6 }, 2286 { ISD::CTTZ, MVT::v8i64, 10 }, 2287 { ISD::CTTZ, MVT::v16i32, 14 }, 2288 { ISD::CTTZ, MVT::v32i16, 12 }, 2289 { ISD::CTTZ, MVT::v64i8, 9 }, 2290 { ISD::SADDSAT, MVT::v32i16, 1 }, 2291 { ISD::SADDSAT, MVT::v64i8, 1 }, 2292 { ISD::SSUBSAT, MVT::v32i16, 1 }, 2293 { ISD::SSUBSAT, MVT::v64i8, 1 }, 2294 { ISD::UADDSAT, MVT::v32i16, 1 }, 2295 { ISD::UADDSAT, MVT::v64i8, 1 }, 2296 { ISD::USUBSAT, MVT::v32i16, 1 }, 2297 { ISD::USUBSAT, MVT::v64i8, 1 }, 2298 }; 2299 static const CostTblEntry AVX512CostTbl[] = { 2300 { ISD::BITREVERSE, MVT::v8i64, 36 }, 2301 { ISD::BITREVERSE, MVT::v16i32, 24 }, 2302 { ISD::BITREVERSE, MVT::v32i16, 10 }, 2303 { ISD::BITREVERSE, MVT::v64i8, 10 }, 2304 { ISD::CTLZ, MVT::v8i64, 29 }, 2305 { ISD::CTLZ, MVT::v16i32, 35 }, 2306 { ISD::CTLZ, MVT::v32i16, 28 }, 2307 { ISD::CTLZ, MVT::v64i8, 18 }, 2308 { ISD::CTPOP, MVT::v8i64, 16 }, 2309 { ISD::CTPOP, MVT::v16i32, 24 }, 2310 { ISD::CTPOP, MVT::v32i16, 18 }, 2311 { ISD::CTPOP, MVT::v64i8, 12 }, 2312 { ISD::CTTZ, MVT::v8i64, 20 }, 2313 { ISD::CTTZ, MVT::v16i32, 28 }, 2314 { ISD::CTTZ, MVT::v32i16, 24 }, 2315 { ISD::CTTZ, MVT::v64i8, 18 }, 2316 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd 2317 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq 2318 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq 2319 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq 2320 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd 2321 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq 2322 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq 2323 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq 2324 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split 2325 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split 2326 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split 2327 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split 2328 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split 2329 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split 2330 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split 2331 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split 2332 { ISD::FMAXNUM, MVT::f32, 2 }, 2333 { ISD::FMAXNUM, MVT::v4f32, 2 }, 2334 { ISD::FMAXNUM, MVT::v8f32, 2 }, 2335 { ISD::FMAXNUM, MVT::v16f32, 2 }, 2336 { ISD::FMAXNUM, MVT::f64, 2 }, 2337 { ISD::FMAXNUM, MVT::v2f64, 2 }, 2338 { ISD::FMAXNUM, MVT::v4f64, 2 }, 2339 { ISD::FMAXNUM, MVT::v8f64, 2 }, 2340 }; 2341 static const CostTblEntry XOPCostTbl[] = { 2342 { ISD::BITREVERSE, MVT::v4i64, 4 }, 2343 { ISD::BITREVERSE, MVT::v8i32, 4 }, 2344 { ISD::BITREVERSE, MVT::v16i16, 4 }, 2345 { ISD::BITREVERSE, MVT::v32i8, 4 }, 2346 { ISD::BITREVERSE, MVT::v2i64, 1 }, 2347 { ISD::BITREVERSE, MVT::v4i32, 1 }, 2348 { ISD::BITREVERSE, MVT::v8i16, 1 }, 2349 { ISD::BITREVERSE, MVT::v16i8, 1 }, 2350 { ISD::BITREVERSE, MVT::i64, 3 }, 2351 { ISD::BITREVERSE, MVT::i32, 3 }, 2352 { ISD::BITREVERSE, MVT::i16, 3 }, 2353 { ISD::BITREVERSE, MVT::i8, 3 } 2354 }; 2355 static const CostTblEntry AVX2CostTbl[] = { 2356 { ISD::BITREVERSE, MVT::v4i64, 5 }, 2357 { ISD::BITREVERSE, MVT::v8i32, 5 }, 2358 { ISD::BITREVERSE, MVT::v16i16, 5 }, 2359 { ISD::BITREVERSE, MVT::v32i8, 5 }, 2360 { ISD::BSWAP, MVT::v4i64, 1 }, 2361 { ISD::BSWAP, MVT::v8i32, 1 }, 2362 { ISD::BSWAP, MVT::v16i16, 1 }, 2363 { ISD::CTLZ, MVT::v4i64, 23 }, 2364 { ISD::CTLZ, MVT::v8i32, 18 }, 2365 { ISD::CTLZ, MVT::v16i16, 14 }, 2366 { ISD::CTLZ, MVT::v32i8, 9 }, 2367 { ISD::CTPOP, MVT::v4i64, 7 }, 2368 { ISD::CTPOP, MVT::v8i32, 11 }, 2369 { ISD::CTPOP, MVT::v16i16, 9 }, 2370 { ISD::CTPOP, MVT::v32i8, 6 }, 2371 { ISD::CTTZ, MVT::v4i64, 10 }, 2372 { ISD::CTTZ, MVT::v8i32, 14 }, 2373 { ISD::CTTZ, MVT::v16i16, 12 }, 2374 { ISD::CTTZ, MVT::v32i8, 9 }, 2375 { ISD::SADDSAT, MVT::v16i16, 1 }, 2376 { ISD::SADDSAT, MVT::v32i8, 1 }, 2377 { ISD::SSUBSAT, MVT::v16i16, 1 }, 2378 { ISD::SSUBSAT, MVT::v32i8, 1 }, 2379 { ISD::UADDSAT, MVT::v16i16, 1 }, 2380 { ISD::UADDSAT, MVT::v32i8, 1 }, 2381 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd 2382 { ISD::USUBSAT, MVT::v16i16, 1 }, 2383 { ISD::USUBSAT, MVT::v32i8, 1 }, 2384 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd 2385 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ 2386 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ 2387 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ 2388 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ 2389 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ 2390 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ 2391 }; 2392 static const CostTblEntry AVX1CostTbl[] = { 2393 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert 2394 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert 2395 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert 2396 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert 2397 { ISD::BSWAP, MVT::v4i64, 4 }, 2398 { ISD::BSWAP, MVT::v8i32, 4 }, 2399 { ISD::BSWAP, MVT::v16i16, 4 }, 2400 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert 2401 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert 2402 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert 2403 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2404 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert 2405 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert 2406 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert 2407 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert 2408 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert 2409 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert 2410 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert 2411 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert 2412 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2413 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2414 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2415 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2416 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2417 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2418 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert 2419 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert 2420 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert 2421 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert 2422 { ISD::FMAXNUM, MVT::f32, 3 }, 2423 { ISD::FMAXNUM, MVT::v4f32, 3 }, 2424 { ISD::FMAXNUM, MVT::v8f32, 5 }, 2425 { ISD::FMAXNUM, MVT::f64, 3 }, 2426 { ISD::FMAXNUM, MVT::v2f64, 3 }, 2427 { ISD::FMAXNUM, MVT::v4f64, 5 }, 2428 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ 2429 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ 2430 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ 2431 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ 2432 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ 2433 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ 2434 }; 2435 static const CostTblEntry GLMCostTbl[] = { 2436 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss 2437 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps 2438 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd 2439 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd 2440 }; 2441 static const CostTblEntry SLMCostTbl[] = { 2442 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss 2443 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps 2444 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd 2445 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd 2446 }; 2447 static const CostTblEntry SSE42CostTbl[] = { 2448 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd 2449 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd 2450 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ 2451 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ 2452 }; 2453 static const CostTblEntry SSSE3CostTbl[] = { 2454 { ISD::BITREVERSE, MVT::v2i64, 5 }, 2455 { ISD::BITREVERSE, MVT::v4i32, 5 }, 2456 { ISD::BITREVERSE, MVT::v8i16, 5 }, 2457 { ISD::BITREVERSE, MVT::v16i8, 5 }, 2458 { ISD::BSWAP, MVT::v2i64, 1 }, 2459 { ISD::BSWAP, MVT::v4i32, 1 }, 2460 { ISD::BSWAP, MVT::v8i16, 1 }, 2461 { ISD::CTLZ, MVT::v2i64, 23 }, 2462 { ISD::CTLZ, MVT::v4i32, 18 }, 2463 { ISD::CTLZ, MVT::v8i16, 14 }, 2464 { ISD::CTLZ, MVT::v16i8, 9 }, 2465 { ISD::CTPOP, MVT::v2i64, 7 }, 2466 { ISD::CTPOP, MVT::v4i32, 11 }, 2467 { ISD::CTPOP, MVT::v8i16, 9 }, 2468 { ISD::CTPOP, MVT::v16i8, 6 }, 2469 { ISD::CTTZ, MVT::v2i64, 10 }, 2470 { ISD::CTTZ, MVT::v4i32, 14 }, 2471 { ISD::CTTZ, MVT::v8i16, 12 }, 2472 { ISD::CTTZ, MVT::v16i8, 9 } 2473 }; 2474 static const CostTblEntry SSE2CostTbl[] = { 2475 { ISD::BITREVERSE, MVT::v2i64, 29 }, 2476 { ISD::BITREVERSE, MVT::v4i32, 27 }, 2477 { ISD::BITREVERSE, MVT::v8i16, 27 }, 2478 { ISD::BITREVERSE, MVT::v16i8, 20 }, 2479 { ISD::BSWAP, MVT::v2i64, 7 }, 2480 { ISD::BSWAP, MVT::v4i32, 7 }, 2481 { ISD::BSWAP, MVT::v8i16, 7 }, 2482 { ISD::CTLZ, MVT::v2i64, 25 }, 2483 { ISD::CTLZ, MVT::v4i32, 26 }, 2484 { ISD::CTLZ, MVT::v8i16, 20 }, 2485 { ISD::CTLZ, MVT::v16i8, 17 }, 2486 { ISD::CTPOP, MVT::v2i64, 12 }, 2487 { ISD::CTPOP, MVT::v4i32, 15 }, 2488 { ISD::CTPOP, MVT::v8i16, 13 }, 2489 { ISD::CTPOP, MVT::v16i8, 10 }, 2490 { ISD::CTTZ, MVT::v2i64, 14 }, 2491 { ISD::CTTZ, MVT::v4i32, 18 }, 2492 { ISD::CTTZ, MVT::v8i16, 16 }, 2493 { ISD::CTTZ, MVT::v16i8, 13 }, 2494 { ISD::SADDSAT, MVT::v8i16, 1 }, 2495 { ISD::SADDSAT, MVT::v16i8, 1 }, 2496 { ISD::SSUBSAT, MVT::v8i16, 1 }, 2497 { ISD::SSUBSAT, MVT::v16i8, 1 }, 2498 { ISD::UADDSAT, MVT::v8i16, 1 }, 2499 { ISD::UADDSAT, MVT::v16i8, 1 }, 2500 { ISD::USUBSAT, MVT::v8i16, 1 }, 2501 { ISD::USUBSAT, MVT::v16i8, 1 }, 2502 { ISD::FMAXNUM, MVT::f64, 4 }, 2503 { ISD::FMAXNUM, MVT::v2f64, 4 }, 2504 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ 2505 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ 2506 }; 2507 static const CostTblEntry SSE1CostTbl[] = { 2508 { ISD::FMAXNUM, MVT::f32, 4 }, 2509 { ISD::FMAXNUM, MVT::v4f32, 4 }, 2510 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ 2511 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ 2512 }; 2513 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets 2514 { ISD::CTTZ, MVT::i64, 1 }, 2515 }; 2516 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets 2517 { ISD::CTTZ, MVT::i32, 1 }, 2518 { ISD::CTTZ, MVT::i16, 1 }, 2519 { ISD::CTTZ, MVT::i8, 1 }, 2520 }; 2521 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets 2522 { ISD::CTLZ, MVT::i64, 1 }, 2523 }; 2524 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets 2525 { ISD::CTLZ, MVT::i32, 1 }, 2526 { ISD::CTLZ, MVT::i16, 1 }, 2527 { ISD::CTLZ, MVT::i8, 1 }, 2528 }; 2529 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets 2530 { ISD::CTPOP, MVT::i64, 1 }, 2531 }; 2532 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets 2533 { ISD::CTPOP, MVT::i32, 1 }, 2534 { ISD::CTPOP, MVT::i16, 1 }, 2535 { ISD::CTPOP, MVT::i8, 1 }, 2536 }; 2537 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 2538 { ISD::BITREVERSE, MVT::i64, 14 }, 2539 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV 2540 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH 2541 { ISD::CTPOP, MVT::i64, 10 }, 2542 { ISD::SADDO, MVT::i64, 1 }, 2543 { ISD::UADDO, MVT::i64, 1 }, 2544 }; 2545 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 2546 { ISD::BITREVERSE, MVT::i32, 14 }, 2547 { ISD::BITREVERSE, MVT::i16, 14 }, 2548 { ISD::BITREVERSE, MVT::i8, 11 }, 2549 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV 2550 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV 2551 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV 2552 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH 2553 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH 2554 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH 2555 { ISD::CTPOP, MVT::i32, 8 }, 2556 { ISD::CTPOP, MVT::i16, 9 }, 2557 { ISD::CTPOP, MVT::i8, 7 }, 2558 { ISD::SADDO, MVT::i32, 1 }, 2559 { ISD::SADDO, MVT::i16, 1 }, 2560 { ISD::SADDO, MVT::i8, 1 }, 2561 { ISD::UADDO, MVT::i32, 1 }, 2562 { ISD::UADDO, MVT::i16, 1 }, 2563 { ISD::UADDO, MVT::i8, 1 }, 2564 }; 2565 2566 Type *RetTy = ICA.getReturnType(); 2567 Type *OpTy = RetTy; 2568 Intrinsic::ID IID = ICA.getID(); 2569 unsigned ISD = ISD::DELETED_NODE; 2570 switch (IID) { 2571 default: 2572 break; 2573 case Intrinsic::bitreverse: 2574 ISD = ISD::BITREVERSE; 2575 break; 2576 case Intrinsic::bswap: 2577 ISD = ISD::BSWAP; 2578 break; 2579 case Intrinsic::ctlz: 2580 ISD = ISD::CTLZ; 2581 break; 2582 case Intrinsic::ctpop: 2583 ISD = ISD::CTPOP; 2584 break; 2585 case Intrinsic::cttz: 2586 ISD = ISD::CTTZ; 2587 break; 2588 case Intrinsic::maxnum: 2589 case Intrinsic::minnum: 2590 // FMINNUM has same costs so don't duplicate. 2591 ISD = ISD::FMAXNUM; 2592 break; 2593 case Intrinsic::sadd_sat: 2594 ISD = ISD::SADDSAT; 2595 break; 2596 case Intrinsic::ssub_sat: 2597 ISD = ISD::SSUBSAT; 2598 break; 2599 case Intrinsic::uadd_sat: 2600 ISD = ISD::UADDSAT; 2601 break; 2602 case Intrinsic::usub_sat: 2603 ISD = ISD::USUBSAT; 2604 break; 2605 case Intrinsic::sqrt: 2606 ISD = ISD::FSQRT; 2607 break; 2608 case Intrinsic::sadd_with_overflow: 2609 case Intrinsic::ssub_with_overflow: 2610 // SSUBO has same costs so don't duplicate. 2611 ISD = ISD::SADDO; 2612 OpTy = RetTy->getContainedType(0); 2613 break; 2614 case Intrinsic::uadd_with_overflow: 2615 case Intrinsic::usub_with_overflow: 2616 // USUBO has same costs so don't duplicate. 2617 ISD = ISD::UADDO; 2618 OpTy = RetTy->getContainedType(0); 2619 break; 2620 } 2621 2622 if (ISD != ISD::DELETED_NODE) { 2623 // Legalize the type. 2624 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); 2625 MVT MTy = LT.second; 2626 2627 // Attempt to lookup cost. 2628 if (ST->useGLMDivSqrtCosts()) 2629 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) 2630 return LT.first * Entry->Cost; 2631 2632 if (ST->isSLM()) 2633 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) 2634 return LT.first * Entry->Cost; 2635 2636 if (ST->hasCDI()) 2637 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) 2638 return LT.first * Entry->Cost; 2639 2640 if (ST->hasBWI()) 2641 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 2642 return LT.first * Entry->Cost; 2643 2644 if (ST->hasAVX512()) 2645 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2646 return LT.first * Entry->Cost; 2647 2648 if (ST->hasXOP()) 2649 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 2650 return LT.first * Entry->Cost; 2651 2652 if (ST->hasAVX2()) 2653 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 2654 return LT.first * Entry->Cost; 2655 2656 if (ST->hasAVX()) 2657 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 2658 return LT.first * Entry->Cost; 2659 2660 if (ST->hasSSE42()) 2661 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 2662 return LT.first * Entry->Cost; 2663 2664 if (ST->hasSSSE3()) 2665 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 2666 return LT.first * Entry->Cost; 2667 2668 if (ST->hasSSE2()) 2669 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 2670 return LT.first * Entry->Cost; 2671 2672 if (ST->hasSSE1()) 2673 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 2674 return LT.first * Entry->Cost; 2675 2676 if (ST->hasBMI()) { 2677 if (ST->is64Bit()) 2678 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) 2679 return LT.first * Entry->Cost; 2680 2681 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) 2682 return LT.first * Entry->Cost; 2683 } 2684 2685 if (ST->hasLZCNT()) { 2686 if (ST->is64Bit()) 2687 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) 2688 return LT.first * Entry->Cost; 2689 2690 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) 2691 return LT.first * Entry->Cost; 2692 } 2693 2694 if (ST->hasPOPCNT()) { 2695 if (ST->is64Bit()) 2696 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) 2697 return LT.first * Entry->Cost; 2698 2699 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) 2700 return LT.first * Entry->Cost; 2701 } 2702 2703 // TODO - add BMI (TZCNT) scalar handling 2704 2705 if (ST->is64Bit()) 2706 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 2707 return LT.first * Entry->Cost; 2708 2709 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 2710 return LT.first * Entry->Cost; 2711 } 2712 2713 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 2714 } 2715 2716 int X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, 2717 TTI::TargetCostKind CostKind) { 2718 if (CostKind != TTI::TCK_RecipThroughput) 2719 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 2720 2721 if (ICA.isTypeBasedOnly()) 2722 return getTypeBasedIntrinsicInstrCost(ICA, CostKind); 2723 2724 static const CostTblEntry AVX512CostTbl[] = { 2725 { ISD::ROTL, MVT::v8i64, 1 }, 2726 { ISD::ROTL, MVT::v4i64, 1 }, 2727 { ISD::ROTL, MVT::v2i64, 1 }, 2728 { ISD::ROTL, MVT::v16i32, 1 }, 2729 { ISD::ROTL, MVT::v8i32, 1 }, 2730 { ISD::ROTL, MVT::v4i32, 1 }, 2731 { ISD::ROTR, MVT::v8i64, 1 }, 2732 { ISD::ROTR, MVT::v4i64, 1 }, 2733 { ISD::ROTR, MVT::v2i64, 1 }, 2734 { ISD::ROTR, MVT::v16i32, 1 }, 2735 { ISD::ROTR, MVT::v8i32, 1 }, 2736 { ISD::ROTR, MVT::v4i32, 1 } 2737 }; 2738 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) 2739 static const CostTblEntry XOPCostTbl[] = { 2740 { ISD::ROTL, MVT::v4i64, 4 }, 2741 { ISD::ROTL, MVT::v8i32, 4 }, 2742 { ISD::ROTL, MVT::v16i16, 4 }, 2743 { ISD::ROTL, MVT::v32i8, 4 }, 2744 { ISD::ROTL, MVT::v2i64, 1 }, 2745 { ISD::ROTL, MVT::v4i32, 1 }, 2746 { ISD::ROTL, MVT::v8i16, 1 }, 2747 { ISD::ROTL, MVT::v16i8, 1 }, 2748 { ISD::ROTR, MVT::v4i64, 6 }, 2749 { ISD::ROTR, MVT::v8i32, 6 }, 2750 { ISD::ROTR, MVT::v16i16, 6 }, 2751 { ISD::ROTR, MVT::v32i8, 6 }, 2752 { ISD::ROTR, MVT::v2i64, 2 }, 2753 { ISD::ROTR, MVT::v4i32, 2 }, 2754 { ISD::ROTR, MVT::v8i16, 2 }, 2755 { ISD::ROTR, MVT::v16i8, 2 } 2756 }; 2757 static const CostTblEntry X64CostTbl[] = { // 64-bit targets 2758 { ISD::ROTL, MVT::i64, 1 }, 2759 { ISD::ROTR, MVT::i64, 1 }, 2760 { ISD::FSHL, MVT::i64, 4 } 2761 }; 2762 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets 2763 { ISD::ROTL, MVT::i32, 1 }, 2764 { ISD::ROTL, MVT::i16, 1 }, 2765 { ISD::ROTL, MVT::i8, 1 }, 2766 { ISD::ROTR, MVT::i32, 1 }, 2767 { ISD::ROTR, MVT::i16, 1 }, 2768 { ISD::ROTR, MVT::i8, 1 }, 2769 { ISD::FSHL, MVT::i32, 4 }, 2770 { ISD::FSHL, MVT::i16, 4 }, 2771 { ISD::FSHL, MVT::i8, 4 } 2772 }; 2773 2774 Intrinsic::ID IID = ICA.getID(); 2775 Type *RetTy = ICA.getReturnType(); 2776 const SmallVectorImpl<Value *> &Args = ICA.getArgs(); 2777 unsigned ISD = ISD::DELETED_NODE; 2778 switch (IID) { 2779 default: 2780 break; 2781 case Intrinsic::fshl: 2782 ISD = ISD::FSHL; 2783 if (Args[0] == Args[1]) 2784 ISD = ISD::ROTL; 2785 break; 2786 case Intrinsic::fshr: 2787 // FSHR has same costs so don't duplicate. 2788 ISD = ISD::FSHL; 2789 if (Args[0] == Args[1]) 2790 ISD = ISD::ROTR; 2791 break; 2792 } 2793 2794 if (ISD != ISD::DELETED_NODE) { 2795 // Legalize the type. 2796 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 2797 MVT MTy = LT.second; 2798 2799 // Attempt to lookup cost. 2800 if (ST->hasAVX512()) 2801 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 2802 return LT.first * Entry->Cost; 2803 2804 if (ST->hasXOP()) 2805 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 2806 return LT.first * Entry->Cost; 2807 2808 if (ST->is64Bit()) 2809 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) 2810 return LT.first * Entry->Cost; 2811 2812 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) 2813 return LT.first * Entry->Cost; 2814 } 2815 2816 return BaseT::getIntrinsicInstrCost(ICA, CostKind); 2817 } 2818 2819 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 2820 static const CostTblEntry SLMCostTbl[] = { 2821 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, 2822 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, 2823 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, 2824 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } 2825 }; 2826 2827 assert(Val->isVectorTy() && "This must be a vector type"); 2828 Type *ScalarType = Val->getScalarType(); 2829 int RegisterFileMoveCost = 0; 2830 2831 if (Index != -1U && (Opcode == Instruction::ExtractElement || 2832 Opcode == Instruction::InsertElement)) { 2833 // Legalize the type. 2834 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 2835 2836 // This type is legalized to a scalar type. 2837 if (!LT.second.isVector()) 2838 return 0; 2839 2840 // The type may be split. Normalize the index to the new type. 2841 unsigned NumElts = LT.second.getVectorNumElements(); 2842 unsigned SubNumElts = NumElts; 2843 Index = Index % NumElts; 2844 2845 // For >128-bit vectors, we need to extract higher 128-bit subvectors. 2846 // For inserts, we also need to insert the subvector back. 2847 if (LT.second.getSizeInBits() > 128) { 2848 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector"); 2849 unsigned NumSubVecs = LT.second.getSizeInBits() / 128; 2850 SubNumElts = NumElts / NumSubVecs; 2851 if (SubNumElts <= Index) { 2852 RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1); 2853 Index %= SubNumElts; 2854 } 2855 } 2856 2857 if (Index == 0) { 2858 // Floating point scalars are already located in index #0. 2859 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume 2860 // true for all. 2861 if (ScalarType->isFloatingPointTy()) 2862 return RegisterFileMoveCost; 2863 2864 // Assume movd/movq XMM -> GPR is relatively cheap on all targets. 2865 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) 2866 return 1 + RegisterFileMoveCost; 2867 } 2868 2869 int ISD = TLI->InstructionOpcodeToISD(Opcode); 2870 assert(ISD && "Unexpected vector opcode"); 2871 MVT MScalarTy = LT.second.getScalarType(); 2872 if (ST->isSLM()) 2873 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) 2874 return Entry->Cost + RegisterFileMoveCost; 2875 2876 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. 2877 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 2878 (MScalarTy.isInteger() && ST->hasSSE41())) 2879 return 1 + RegisterFileMoveCost; 2880 2881 // Assume insertps is relatively cheap on all targets. 2882 if (MScalarTy == MVT::f32 && ST->hasSSE41() && 2883 Opcode == Instruction::InsertElement) 2884 return 1 + RegisterFileMoveCost; 2885 2886 // For extractions we just need to shuffle the element to index 0, which 2887 // should be very cheap (assume cost = 1). For insertions we need to shuffle 2888 // the elements to its destination. In both cases we must handle the 2889 // subvector move(s). 2890 // If the vector type is already less than 128-bits then don't reduce it. 2891 // TODO: Under what circumstances should we shuffle using the full width? 2892 int ShuffleCost = 1; 2893 if (Opcode == Instruction::InsertElement) { 2894 auto *SubTy = cast<VectorType>(Val); 2895 EVT VT = TLI->getValueType(DL, Val); 2896 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) 2897 SubTy = VectorType::get(ScalarType, SubNumElts); 2898 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, 0, SubTy); 2899 } 2900 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; 2901 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; 2902 } 2903 2904 // Add to the base cost if we know that the extracted element of a vector is 2905 // destined to be moved to and used in the integer register file. 2906 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 2907 RegisterFileMoveCost += 1; 2908 2909 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 2910 } 2911 2912 unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty, 2913 const APInt &DemandedElts, 2914 bool Insert, bool Extract) { 2915 unsigned Cost = 0; 2916 2917 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much 2918 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. 2919 if (Insert) { 2920 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 2921 MVT MScalarTy = LT.second.getScalarType(); 2922 2923 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || 2924 (MScalarTy.isInteger() && ST->hasSSE41()) || 2925 (MScalarTy == MVT::f32 && ST->hasSSE41())) { 2926 // For types we can insert directly, insertion into 128-bit sub vectors is 2927 // cheap, followed by a cheap chain of concatenations. 2928 if (LT.second.getSizeInBits() <= 128) { 2929 Cost += 2930 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); 2931 } else { 2932 unsigned NumSubVecs = LT.second.getSizeInBits() / 128; 2933 Cost += (PowerOf2Ceil(NumSubVecs) - 1) * LT.first; 2934 Cost += DemandedElts.countPopulation(); 2935 2936 // For vXf32 cases, insertion into the 0'th index in each v4f32 2937 // 128-bit vector is free. 2938 // NOTE: This assumes legalization widens vXf32 vectors. 2939 if (MScalarTy == MVT::f32) 2940 for (unsigned i = 0, e = Ty->getNumElements(); i < e; i += 4) 2941 if (DemandedElts[i]) 2942 Cost--; 2943 } 2944 } else if (LT.second.isVector()) { 2945 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded 2946 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a 2947 // series of UNPCK followed by CONCAT_VECTORS - all of these can be 2948 // considered cheap. 2949 if (Ty->isIntOrIntVectorTy()) 2950 Cost += DemandedElts.countPopulation(); 2951 2952 // Get the smaller of the legalized or original pow2-extended number of 2953 // vector elements, which represents the number of unpacks we'll end up 2954 // performing. 2955 unsigned NumElts = LT.second.getVectorNumElements(); 2956 unsigned Pow2Elts = PowerOf2Ceil(Ty->getNumElements()); 2957 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; 2958 } 2959 } 2960 2961 // TODO: Use default extraction for now, but we should investigate extending this 2962 // to handle repeated subvector extraction. 2963 if (Extract) 2964 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); 2965 2966 return Cost; 2967 } 2968 2969 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, 2970 MaybeAlign Alignment, unsigned AddressSpace, 2971 TTI::TargetCostKind CostKind, 2972 const Instruction *I) { 2973 // TODO: Handle other cost kinds. 2974 if (CostKind != TTI::TCK_RecipThroughput) { 2975 if (isa_and_nonnull<StoreInst>(I)) { 2976 Value *Ptr = I->getOperand(1); 2977 // Store instruction with index and scale costs 2 Uops. 2978 // Check the preceding GEP to identify non-const indices. 2979 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) { 2980 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) 2981 return TTI::TCC_Basic * 2; 2982 } 2983 } 2984 return TTI::TCC_Basic; 2985 } 2986 2987 // Handle non-power-of-two vectors such as <3 x float> 2988 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 2989 unsigned NumElem = VTy->getNumElements(); 2990 2991 // Handle a few common cases: 2992 // <3 x float> 2993 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 2994 // Cost = 64 bit store + extract + 32 bit store. 2995 return 3; 2996 2997 // <3 x double> 2998 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 2999 // Cost = 128 bit store + unpack + 64 bit store. 3000 return 3; 3001 3002 // Assume that all other non-power-of-two numbers are scalarized. 3003 if (!isPowerOf2_32(NumElem)) { 3004 APInt DemandedElts = APInt::getAllOnesValue(NumElem); 3005 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 3006 AddressSpace, CostKind); 3007 int SplitCost = getScalarizationOverhead(VTy, DemandedElts, 3008 Opcode == Instruction::Load, 3009 Opcode == Instruction::Store); 3010 return NumElem * Cost + SplitCost; 3011 } 3012 } 3013 3014 // Type legalization can't handle structs 3015 if (TLI->getValueType(DL, Src, true) == MVT::Other) 3016 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, 3017 CostKind); 3018 3019 // Legalize the type. 3020 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 3021 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 3022 "Invalid Opcode"); 3023 3024 // Each load/store unit costs 1. 3025 int Cost = LT.first * 1; 3026 3027 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 3028 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 3029 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 3030 Cost *= 2; 3031 3032 return Cost; 3033 } 3034 3035 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 3036 unsigned Alignment, 3037 unsigned AddressSpace, 3038 TTI::TargetCostKind CostKind) { 3039 bool IsLoad = (Instruction::Load == Opcode); 3040 bool IsStore = (Instruction::Store == Opcode); 3041 3042 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 3043 if (!SrcVTy) 3044 // To calculate scalar take the regular cost, without mask 3045 return getMemoryOpCost(Opcode, SrcTy, MaybeAlign(Alignment), AddressSpace, 3046 CostKind); 3047 3048 unsigned NumElem = SrcVTy->getNumElements(); 3049 VectorType *MaskTy = 3050 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 3051 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, MaybeAlign(Alignment))) || 3052 (IsStore && !isLegalMaskedStore(SrcVTy, MaybeAlign(Alignment))) || 3053 !isPowerOf2_32(NumElem)) { 3054 // Scalarization 3055 APInt DemandedElts = APInt::getAllOnesValue(NumElem); 3056 int MaskSplitCost = 3057 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 3058 int ScalarCompareCost = getCmpSelInstrCost( 3059 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, 3060 CostKind); 3061 int BranchCost = getCFInstrCost(Instruction::Br, CostKind); 3062 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 3063 int ValueSplitCost = 3064 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); 3065 int MemopCost = 3066 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 3067 MaybeAlign(Alignment), AddressSpace, 3068 CostKind); 3069 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 3070 } 3071 3072 // Legalize the type. 3073 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 3074 auto VT = TLI->getValueType(DL, SrcVTy); 3075 int Cost = 0; 3076 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 3077 LT.second.getVectorNumElements() == NumElem) 3078 // Promotion requires expand/truncate for data and a shuffle for mask. 3079 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, 0, nullptr) + 3080 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr); 3081 3082 else if (LT.second.getVectorNumElements() > NumElem) { 3083 VectorType *NewMaskTy = VectorType::get(MaskTy->getElementType(), 3084 LT.second.getVectorNumElements()); 3085 // Expanding requires fill mask with zeroes 3086 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 3087 } 3088 3089 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. 3090 if (!ST->hasAVX512()) 3091 return Cost + LT.first * (IsLoad ? 2 : 8); 3092 3093 // AVX-512 masked load/store is cheapper 3094 return Cost + LT.first; 3095 } 3096 3097 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, 3098 const SCEV *Ptr) { 3099 // Address computations in vectorized code with non-consecutive addresses will 3100 // likely result in more instructions compared to scalar code where the 3101 // computation can more often be merged into the index mode. The resulting 3102 // extra micro-ops can significantly decrease throughput. 3103 const unsigned NumVectorInstToHideOverhead = 10; 3104 3105 // Cost modeling of Strided Access Computation is hidden by the indexing 3106 // modes of X86 regardless of the stride value. We dont believe that there 3107 // is a difference between constant strided access in gerenal and constant 3108 // strided value which is less than or equal to 64. 3109 // Even in the case of (loop invariant) stride whose value is not known at 3110 // compile time, the address computation will not incur more than one extra 3111 // ADD instruction. 3112 if (Ty->isVectorTy() && SE) { 3113 if (!BaseT::isStridedAccess(Ptr)) 3114 return NumVectorInstToHideOverhead; 3115 if (!BaseT::getConstantStrideStep(SE, Ptr)) 3116 return 1; 3117 } 3118 3119 return BaseT::getAddressComputationCost(Ty, SE, Ptr); 3120 } 3121 3122 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, 3123 bool IsPairwise, 3124 TTI::TargetCostKind CostKind) { 3125 // Just use the default implementation for pair reductions. 3126 if (IsPairwise) 3127 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind); 3128 3129 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 3130 // and make it as the cost. 3131 3132 static const CostTblEntry SLMCostTblNoPairWise[] = { 3133 { ISD::FADD, MVT::v2f64, 3 }, 3134 { ISD::ADD, MVT::v2i64, 5 }, 3135 }; 3136 3137 static const CostTblEntry SSE2CostTblNoPairWise[] = { 3138 { ISD::FADD, MVT::v2f64, 2 }, 3139 { ISD::FADD, MVT::v4f32, 4 }, 3140 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 3141 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 3142 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 3143 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". 3144 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". 3145 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 3146 { ISD::ADD, MVT::v2i8, 2 }, 3147 { ISD::ADD, MVT::v4i8, 2 }, 3148 { ISD::ADD, MVT::v8i8, 2 }, 3149 { ISD::ADD, MVT::v16i8, 3 }, 3150 }; 3151 3152 static const CostTblEntry AVX1CostTblNoPairWise[] = { 3153 { ISD::FADD, MVT::v4f64, 3 }, 3154 { ISD::FADD, MVT::v4f32, 3 }, 3155 { ISD::FADD, MVT::v8f32, 4 }, 3156 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 3157 { ISD::ADD, MVT::v4i64, 3 }, 3158 { ISD::ADD, MVT::v8i32, 5 }, 3159 { ISD::ADD, MVT::v16i16, 5 }, 3160 { ISD::ADD, MVT::v32i8, 4 }, 3161 }; 3162 3163 int ISD = TLI->InstructionOpcodeToISD(Opcode); 3164 assert(ISD && "Invalid opcode"); 3165 3166 // Before legalizing the type, give a chance to look up illegal narrow types 3167 // in the table. 3168 // FIXME: Is there a better way to do this? 3169 EVT VT = TLI->getValueType(DL, ValTy); 3170 if (VT.isSimple()) { 3171 MVT MTy = VT.getSimpleVT(); 3172 if (ST->isSLM()) 3173 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 3174 return Entry->Cost; 3175 3176 if (ST->hasAVX()) 3177 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3178 return Entry->Cost; 3179 3180 if (ST->hasSSE2()) 3181 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3182 return Entry->Cost; 3183 } 3184 3185 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 3186 3187 MVT MTy = LT.second; 3188 3189 auto *ValVTy = cast<VectorType>(ValTy); 3190 3191 unsigned ArithmeticCost = 0; 3192 if (LT.first != 1 && MTy.isVector() && 3193 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3194 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3195 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 3196 MTy.getVectorNumElements()); 3197 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 3198 ArithmeticCost *= LT.first - 1; 3199 } 3200 3201 if (ST->isSLM()) 3202 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) 3203 return ArithmeticCost + Entry->Cost; 3204 3205 if (ST->hasAVX()) 3206 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3207 return ArithmeticCost + Entry->Cost; 3208 3209 if (ST->hasSSE2()) 3210 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3211 return ArithmeticCost + Entry->Cost; 3212 3213 // FIXME: These assume a naive kshift+binop lowering, which is probably 3214 // conservative in most cases. 3215 static const CostTblEntry AVX512BoolReduction[] = { 3216 { ISD::AND, MVT::v2i1, 3 }, 3217 { ISD::AND, MVT::v4i1, 5 }, 3218 { ISD::AND, MVT::v8i1, 7 }, 3219 { ISD::AND, MVT::v16i1, 9 }, 3220 { ISD::AND, MVT::v32i1, 11 }, 3221 { ISD::AND, MVT::v64i1, 13 }, 3222 { ISD::OR, MVT::v2i1, 3 }, 3223 { ISD::OR, MVT::v4i1, 5 }, 3224 { ISD::OR, MVT::v8i1, 7 }, 3225 { ISD::OR, MVT::v16i1, 9 }, 3226 { ISD::OR, MVT::v32i1, 11 }, 3227 { ISD::OR, MVT::v64i1, 13 }, 3228 }; 3229 3230 static const CostTblEntry AVX2BoolReduction[] = { 3231 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp 3232 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp 3233 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp 3234 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp 3235 }; 3236 3237 static const CostTblEntry AVX1BoolReduction[] = { 3238 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp 3239 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp 3240 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 3241 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp 3242 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp 3243 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp 3244 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 3245 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp 3246 }; 3247 3248 static const CostTblEntry SSE2BoolReduction[] = { 3249 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp 3250 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp 3251 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp 3252 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp 3253 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp 3254 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp 3255 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp 3256 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp 3257 }; 3258 3259 // Handle bool allof/anyof patterns. 3260 if (ValVTy->getElementType()->isIntegerTy(1)) { 3261 unsigned ArithmeticCost = 0; 3262 if (LT.first != 1 && MTy.isVector() && 3263 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3264 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3265 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), 3266 MTy.getVectorNumElements()); 3267 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); 3268 ArithmeticCost *= LT.first - 1; 3269 } 3270 3271 if (ST->hasAVX512()) 3272 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) 3273 return ArithmeticCost + Entry->Cost; 3274 if (ST->hasAVX2()) 3275 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) 3276 return ArithmeticCost + Entry->Cost; 3277 if (ST->hasAVX()) 3278 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) 3279 return ArithmeticCost + Entry->Cost; 3280 if (ST->hasSSE2()) 3281 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) 3282 return ArithmeticCost + Entry->Cost; 3283 3284 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise, 3285 CostKind); 3286 } 3287 3288 unsigned NumVecElts = ValVTy->getNumElements(); 3289 unsigned ScalarSize = ValVTy->getScalarSizeInBits(); 3290 3291 // Special case power of 2 reductions where the scalar type isn't changed 3292 // by type legalization. 3293 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) 3294 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise, 3295 CostKind); 3296 3297 unsigned ReductionCost = 0; 3298 3299 auto *Ty = ValVTy; 3300 if (LT.first != 1 && MTy.isVector() && 3301 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3302 // Type needs to be split. We need LT.first - 1 arithmetic ops. 3303 Ty = VectorType::get(ValVTy->getElementType(), MTy.getVectorNumElements()); 3304 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); 3305 ReductionCost *= LT.first - 1; 3306 NumVecElts = MTy.getVectorNumElements(); 3307 } 3308 3309 // Now handle reduction with the legal type, taking into account size changes 3310 // at each level. 3311 while (NumVecElts > 1) { 3312 // Determine the size of the remaining vector we need to reduce. 3313 unsigned Size = NumVecElts * ScalarSize; 3314 NumVecElts /= 2; 3315 // If we're reducing from 256/512 bits, use an extract_subvector. 3316 if (Size > 128) { 3317 auto *SubTy = VectorType::get(ValVTy->getElementType(), NumVecElts); 3318 ReductionCost += 3319 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy); 3320 Ty = SubTy; 3321 } else if (Size == 128) { 3322 // Reducing from 128 bits is a permute of v2f64/v2i64. 3323 VectorType *ShufTy; 3324 if (ValVTy->isFloatingPointTy()) 3325 ShufTy = VectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); 3326 else 3327 ShufTy = VectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); 3328 ReductionCost += 3329 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr); 3330 } else if (Size == 64) { 3331 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 3332 VectorType *ShufTy; 3333 if (ValVTy->isFloatingPointTy()) 3334 ShufTy = VectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); 3335 else 3336 ShufTy = VectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); 3337 ReductionCost += 3338 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr); 3339 } else { 3340 // Reducing from smaller size is a shift by immediate. 3341 auto *ShiftTy = FixedVectorType::get( 3342 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); 3343 ReductionCost += getArithmeticInstrCost( 3344 Instruction::LShr, ShiftTy, CostKind, 3345 TargetTransformInfo::OK_AnyValue, 3346 TargetTransformInfo::OK_UniformConstantValue, 3347 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 3348 } 3349 3350 // Add the arithmetic op for this level. 3351 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); 3352 } 3353 3354 // Add the final extract element to the cost. 3355 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 3356 } 3357 3358 int X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, bool IsUnsigned) { 3359 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 3360 3361 MVT MTy = LT.second; 3362 3363 int ISD; 3364 if (Ty->isIntOrIntVectorTy()) { 3365 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 3366 } else { 3367 assert(Ty->isFPOrFPVectorTy() && 3368 "Expected float point or integer vector type."); 3369 ISD = ISD::FMINNUM; 3370 } 3371 3372 static const CostTblEntry SSE1CostTbl[] = { 3373 {ISD::FMINNUM, MVT::v4f32, 1}, 3374 }; 3375 3376 static const CostTblEntry SSE2CostTbl[] = { 3377 {ISD::FMINNUM, MVT::v2f64, 1}, 3378 {ISD::SMIN, MVT::v8i16, 1}, 3379 {ISD::UMIN, MVT::v16i8, 1}, 3380 }; 3381 3382 static const CostTblEntry SSE41CostTbl[] = { 3383 {ISD::SMIN, MVT::v4i32, 1}, 3384 {ISD::UMIN, MVT::v4i32, 1}, 3385 {ISD::UMIN, MVT::v8i16, 1}, 3386 {ISD::SMIN, MVT::v16i8, 1}, 3387 }; 3388 3389 static const CostTblEntry SSE42CostTbl[] = { 3390 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd 3391 }; 3392 3393 static const CostTblEntry AVX1CostTbl[] = { 3394 {ISD::FMINNUM, MVT::v8f32, 1}, 3395 {ISD::FMINNUM, MVT::v4f64, 1}, 3396 {ISD::SMIN, MVT::v8i32, 3}, 3397 {ISD::UMIN, MVT::v8i32, 3}, 3398 {ISD::SMIN, MVT::v16i16, 3}, 3399 {ISD::UMIN, MVT::v16i16, 3}, 3400 {ISD::SMIN, MVT::v32i8, 3}, 3401 {ISD::UMIN, MVT::v32i8, 3}, 3402 }; 3403 3404 static const CostTblEntry AVX2CostTbl[] = { 3405 {ISD::SMIN, MVT::v8i32, 1}, 3406 {ISD::UMIN, MVT::v8i32, 1}, 3407 {ISD::SMIN, MVT::v16i16, 1}, 3408 {ISD::UMIN, MVT::v16i16, 1}, 3409 {ISD::SMIN, MVT::v32i8, 1}, 3410 {ISD::UMIN, MVT::v32i8, 1}, 3411 }; 3412 3413 static const CostTblEntry AVX512CostTbl[] = { 3414 {ISD::FMINNUM, MVT::v16f32, 1}, 3415 {ISD::FMINNUM, MVT::v8f64, 1}, 3416 {ISD::SMIN, MVT::v2i64, 1}, 3417 {ISD::UMIN, MVT::v2i64, 1}, 3418 {ISD::SMIN, MVT::v4i64, 1}, 3419 {ISD::UMIN, MVT::v4i64, 1}, 3420 {ISD::SMIN, MVT::v8i64, 1}, 3421 {ISD::UMIN, MVT::v8i64, 1}, 3422 {ISD::SMIN, MVT::v16i32, 1}, 3423 {ISD::UMIN, MVT::v16i32, 1}, 3424 }; 3425 3426 static const CostTblEntry AVX512BWCostTbl[] = { 3427 {ISD::SMIN, MVT::v32i16, 1}, 3428 {ISD::UMIN, MVT::v32i16, 1}, 3429 {ISD::SMIN, MVT::v64i8, 1}, 3430 {ISD::UMIN, MVT::v64i8, 1}, 3431 }; 3432 3433 // If we have a native MIN/MAX instruction for this type, use it. 3434 if (ST->hasBWI()) 3435 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) 3436 return LT.first * Entry->Cost; 3437 3438 if (ST->hasAVX512()) 3439 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 3440 return LT.first * Entry->Cost; 3441 3442 if (ST->hasAVX2()) 3443 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 3444 return LT.first * Entry->Cost; 3445 3446 if (ST->hasAVX()) 3447 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 3448 return LT.first * Entry->Cost; 3449 3450 if (ST->hasSSE42()) 3451 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 3452 return LT.first * Entry->Cost; 3453 3454 if (ST->hasSSE41()) 3455 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) 3456 return LT.first * Entry->Cost; 3457 3458 if (ST->hasSSE2()) 3459 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 3460 return LT.first * Entry->Cost; 3461 3462 if (ST->hasSSE1()) 3463 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) 3464 return LT.first * Entry->Cost; 3465 3466 unsigned CmpOpcode; 3467 if (Ty->isFPOrFPVectorTy()) { 3468 CmpOpcode = Instruction::FCmp; 3469 } else { 3470 assert(Ty->isIntOrIntVectorTy() && 3471 "expecting floating point or integer type for min/max reduction"); 3472 CmpOpcode = Instruction::ICmp; 3473 } 3474 3475 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3476 // Otherwise fall back to cmp+select. 3477 return getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CostKind) + 3478 getCmpSelInstrCost(Instruction::Select, Ty, CondTy, CostKind); 3479 } 3480 3481 int X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, 3482 bool IsPairwise, bool IsUnsigned, 3483 TTI::TargetCostKind CostKind) { 3484 // Just use the default implementation for pair reductions. 3485 if (IsPairwise) 3486 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned, 3487 CostKind); 3488 3489 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 3490 3491 MVT MTy = LT.second; 3492 3493 int ISD; 3494 if (ValTy->isIntOrIntVectorTy()) { 3495 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; 3496 } else { 3497 assert(ValTy->isFPOrFPVectorTy() && 3498 "Expected float point or integer vector type."); 3499 ISD = ISD::FMINNUM; 3500 } 3501 3502 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 3503 // and make it as the cost. 3504 3505 static const CostTblEntry SSE2CostTblNoPairWise[] = { 3506 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw 3507 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw 3508 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw 3509 }; 3510 3511 static const CostTblEntry SSE41CostTblNoPairWise[] = { 3512 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 3513 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 3514 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 3515 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 3516 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor 3517 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax 3518 {ISD::SMIN, MVT::v2i8, 3}, // pminsb 3519 {ISD::SMIN, MVT::v4i8, 5}, // pminsb 3520 {ISD::SMIN, MVT::v8i8, 7}, // pminsb 3521 {ISD::SMIN, MVT::v16i8, 6}, 3522 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 3523 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 3524 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 3525 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax 3526 }; 3527 3528 static const CostTblEntry AVX1CostTblNoPairWise[] = { 3529 {ISD::SMIN, MVT::v16i16, 6}, 3530 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax 3531 {ISD::SMIN, MVT::v32i8, 8}, 3532 {ISD::UMIN, MVT::v32i8, 8}, 3533 }; 3534 3535 static const CostTblEntry AVX512BWCostTblNoPairWise[] = { 3536 {ISD::SMIN, MVT::v32i16, 8}, 3537 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax 3538 {ISD::SMIN, MVT::v64i8, 10}, 3539 {ISD::UMIN, MVT::v64i8, 10}, 3540 }; 3541 3542 // Before legalizing the type, give a chance to look up illegal narrow types 3543 // in the table. 3544 // FIXME: Is there a better way to do this? 3545 EVT VT = TLI->getValueType(DL, ValTy); 3546 if (VT.isSimple()) { 3547 MVT MTy = VT.getSimpleVT(); 3548 if (ST->hasBWI()) 3549 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 3550 return Entry->Cost; 3551 3552 if (ST->hasAVX()) 3553 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3554 return Entry->Cost; 3555 3556 if (ST->hasSSE41()) 3557 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 3558 return Entry->Cost; 3559 3560 if (ST->hasSSE2()) 3561 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3562 return Entry->Cost; 3563 } 3564 3565 auto *ValVTy = cast<VectorType>(ValTy); 3566 unsigned NumVecElts = ValVTy->getNumElements(); 3567 3568 auto *Ty = ValVTy; 3569 unsigned MinMaxCost = 0; 3570 if (LT.first != 1 && MTy.isVector() && 3571 MTy.getVectorNumElements() < ValVTy->getNumElements()) { 3572 // Type needs to be split. We need LT.first - 1 operations ops. 3573 Ty = VectorType::get(ValVTy->getElementType(), MTy.getVectorNumElements()); 3574 auto *SubCondTy = VectorType::get( 3575 cast<VectorType>(CondTy)->getElementType(), MTy.getVectorNumElements()); 3576 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); 3577 MinMaxCost *= LT.first - 1; 3578 NumVecElts = MTy.getVectorNumElements(); 3579 } 3580 3581 if (ST->hasBWI()) 3582 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) 3583 return MinMaxCost + Entry->Cost; 3584 3585 if (ST->hasAVX()) 3586 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 3587 return MinMaxCost + Entry->Cost; 3588 3589 if (ST->hasSSE41()) 3590 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) 3591 return MinMaxCost + Entry->Cost; 3592 3593 if (ST->hasSSE2()) 3594 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) 3595 return MinMaxCost + Entry->Cost; 3596 3597 unsigned ScalarSize = ValTy->getScalarSizeInBits(); 3598 3599 // Special case power of 2 reductions where the scalar type isn't changed 3600 // by type legalization. 3601 if (!isPowerOf2_32(ValVTy->getNumElements()) || 3602 ScalarSize != MTy.getScalarSizeInBits()) 3603 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned, 3604 CostKind); 3605 3606 // Now handle reduction with the legal type, taking into account size changes 3607 // at each level. 3608 while (NumVecElts > 1) { 3609 // Determine the size of the remaining vector we need to reduce. 3610 unsigned Size = NumVecElts * ScalarSize; 3611 NumVecElts /= 2; 3612 // If we're reducing from 256/512 bits, use an extract_subvector. 3613 if (Size > 128) { 3614 auto *SubTy = VectorType::get(ValVTy->getElementType(), NumVecElts); 3615 MinMaxCost += 3616 getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts, SubTy); 3617 Ty = SubTy; 3618 } else if (Size == 128) { 3619 // Reducing from 128 bits is a permute of v2f64/v2i64. 3620 VectorType *ShufTy; 3621 if (ValTy->isFloatingPointTy()) 3622 ShufTy = VectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); 3623 else 3624 ShufTy = VectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); 3625 MinMaxCost += 3626 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr); 3627 } else if (Size == 64) { 3628 // Reducing from 64 bits is a shuffle of v4f32/v4i32. 3629 VectorType *ShufTy; 3630 if (ValTy->isFloatingPointTy()) 3631 ShufTy = VectorType::get(Type::getFloatTy(ValTy->getContext()), 4); 3632 else 3633 ShufTy = VectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); 3634 MinMaxCost += 3635 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, 0, nullptr); 3636 } else { 3637 // Reducing from smaller size is a shift by immediate. 3638 VectorType *ShiftTy = VectorType::get( 3639 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); 3640 MinMaxCost += getArithmeticInstrCost( 3641 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, 3642 TargetTransformInfo::OK_AnyValue, 3643 TargetTransformInfo::OK_UniformConstantValue, 3644 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); 3645 } 3646 3647 // Add the arithmetic op for this level. 3648 auto *SubCondTy = 3649 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); 3650 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); 3651 } 3652 3653 // Add the final extract element to the cost. 3654 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); 3655 } 3656 3657 /// Calculate the cost of materializing a 64-bit value. This helper 3658 /// method might only calculate a fraction of a larger immediate. Therefore it 3659 /// is valid to return a cost of ZERO. 3660 int X86TTIImpl::getIntImmCost(int64_t Val) { 3661 if (Val == 0) 3662 return TTI::TCC_Free; 3663 3664 if (isInt<32>(Val)) 3665 return TTI::TCC_Basic; 3666 3667 return 2 * TTI::TCC_Basic; 3668 } 3669 3670 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, 3671 TTI::TargetCostKind CostKind) { 3672 assert(Ty->isIntegerTy()); 3673 3674 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 3675 if (BitSize == 0) 3676 return ~0U; 3677 3678 // Never hoist constants larger than 128bit, because this might lead to 3679 // incorrect code generation or assertions in codegen. 3680 // Fixme: Create a cost model for types larger than i128 once the codegen 3681 // issues have been fixed. 3682 if (BitSize > 128) 3683 return TTI::TCC_Free; 3684 3685 if (Imm == 0) 3686 return TTI::TCC_Free; 3687 3688 // Sign-extend all constants to a multiple of 64-bit. 3689 APInt ImmVal = Imm; 3690 if (BitSize % 64 != 0) 3691 ImmVal = Imm.sext(alignTo(BitSize, 64)); 3692 3693 // Split the constant into 64-bit chunks and calculate the cost for each 3694 // chunk. 3695 int Cost = 0; 3696 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 3697 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 3698 int64_t Val = Tmp.getSExtValue(); 3699 Cost += getIntImmCost(Val); 3700 } 3701 // We need at least one instruction to materialize the constant. 3702 return std::max(1, Cost); 3703 } 3704 3705 int X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm, 3706 Type *Ty, TTI::TargetCostKind CostKind) { 3707 assert(Ty->isIntegerTy()); 3708 3709 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 3710 // There is no cost model for constants with a bit size of 0. Return TCC_Free 3711 // here, so that constant hoisting will ignore this constant. 3712 if (BitSize == 0) 3713 return TTI::TCC_Free; 3714 3715 unsigned ImmIdx = ~0U; 3716 switch (Opcode) { 3717 default: 3718 return TTI::TCC_Free; 3719 case Instruction::GetElementPtr: 3720 // Always hoist the base address of a GetElementPtr. This prevents the 3721 // creation of new constants for every base constant that gets constant 3722 // folded with the offset. 3723 if (Idx == 0) 3724 return 2 * TTI::TCC_Basic; 3725 return TTI::TCC_Free; 3726 case Instruction::Store: 3727 ImmIdx = 0; 3728 break; 3729 case Instruction::ICmp: 3730 // This is an imperfect hack to prevent constant hoisting of 3731 // compares that might be trying to check if a 64-bit value fits in 3732 // 32-bits. The backend can optimize these cases using a right shift by 32. 3733 // Ideally we would check the compare predicate here. There also other 3734 // similar immediates the backend can use shifts for. 3735 if (Idx == 1 && Imm.getBitWidth() == 64) { 3736 uint64_t ImmVal = Imm.getZExtValue(); 3737 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 3738 return TTI::TCC_Free; 3739 } 3740 ImmIdx = 1; 3741 break; 3742 case Instruction::And: 3743 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 3744 // by using a 32-bit operation with implicit zero extension. Detect such 3745 // immediates here as the normal path expects bit 31 to be sign extended. 3746 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 3747 return TTI::TCC_Free; 3748 ImmIdx = 1; 3749 break; 3750 case Instruction::Add: 3751 case Instruction::Sub: 3752 // For add/sub, we can use the opposite instruction for INT32_MIN. 3753 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) 3754 return TTI::TCC_Free; 3755 ImmIdx = 1; 3756 break; 3757 case Instruction::UDiv: 3758 case Instruction::SDiv: 3759 case Instruction::URem: 3760 case Instruction::SRem: 3761 // Division by constant is typically expanded later into a different 3762 // instruction sequence. This completely changes the constants. 3763 // Report them as "free" to stop ConstantHoist from marking them as opaque. 3764 return TTI::TCC_Free; 3765 case Instruction::Mul: 3766 case Instruction::Or: 3767 case Instruction::Xor: 3768 ImmIdx = 1; 3769 break; 3770 // Always return TCC_Free for the shift value of a shift instruction. 3771 case Instruction::Shl: 3772 case Instruction::LShr: 3773 case Instruction::AShr: 3774 if (Idx == 1) 3775 return TTI::TCC_Free; 3776 break; 3777 case Instruction::Trunc: 3778 case Instruction::ZExt: 3779 case Instruction::SExt: 3780 case Instruction::IntToPtr: 3781 case Instruction::PtrToInt: 3782 case Instruction::BitCast: 3783 case Instruction::PHI: 3784 case Instruction::Call: 3785 case Instruction::Select: 3786 case Instruction::Ret: 3787 case Instruction::Load: 3788 break; 3789 } 3790 3791 if (Idx == ImmIdx) { 3792 int NumConstants = divideCeil(BitSize, 64); 3793 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 3794 return (Cost <= NumConstants * TTI::TCC_Basic) 3795 ? static_cast<int>(TTI::TCC_Free) 3796 : Cost; 3797 } 3798 3799 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 3800 } 3801 3802 int X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, 3803 const APInt &Imm, Type *Ty, 3804 TTI::TargetCostKind CostKind) { 3805 assert(Ty->isIntegerTy()); 3806 3807 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 3808 // There is no cost model for constants with a bit size of 0. Return TCC_Free 3809 // here, so that constant hoisting will ignore this constant. 3810 if (BitSize == 0) 3811 return TTI::TCC_Free; 3812 3813 switch (IID) { 3814 default: 3815 return TTI::TCC_Free; 3816 case Intrinsic::sadd_with_overflow: 3817 case Intrinsic::uadd_with_overflow: 3818 case Intrinsic::ssub_with_overflow: 3819 case Intrinsic::usub_with_overflow: 3820 case Intrinsic::smul_with_overflow: 3821 case Intrinsic::umul_with_overflow: 3822 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 3823 return TTI::TCC_Free; 3824 break; 3825 case Intrinsic::experimental_stackmap: 3826 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 3827 return TTI::TCC_Free; 3828 break; 3829 case Intrinsic::experimental_patchpoint_void: 3830 case Intrinsic::experimental_patchpoint_i64: 3831 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 3832 return TTI::TCC_Free; 3833 break; 3834 } 3835 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); 3836 } 3837 3838 // Return an average cost of Gather / Scatter instruction, maybe improved later 3839 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 3840 unsigned Alignment, unsigned AddressSpace) { 3841 3842 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 3843 unsigned VF = cast<VectorType>(SrcVTy)->getNumElements(); 3844 3845 // Try to reduce index size from 64 bit (default for GEP) 3846 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 3847 // operation will use 16 x 64 indices which do not fit in a zmm and needs 3848 // to split. Also check that the base pointer is the same for all lanes, 3849 // and that there's at most one variable index. 3850 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 3851 unsigned IndexSize = DL.getPointerSizeInBits(); 3852 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 3853 if (IndexSize < 64 || !GEP) 3854 return IndexSize; 3855 3856 unsigned NumOfVarIndices = 0; 3857 Value *Ptrs = GEP->getPointerOperand(); 3858 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 3859 return IndexSize; 3860 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 3861 if (isa<Constant>(GEP->getOperand(i))) 3862 continue; 3863 Type *IndxTy = GEP->getOperand(i)->getType(); 3864 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) 3865 IndxTy = IndexVTy->getElementType(); 3866 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 3867 !isa<SExtInst>(GEP->getOperand(i))) || 3868 ++NumOfVarIndices > 1) 3869 return IndexSize; // 64 3870 } 3871 return (unsigned)32; 3872 }; 3873 3874 3875 // Trying to reduce IndexSize to 32 bits for vector 16. 3876 // By default the IndexSize is equal to pointer size. 3877 unsigned IndexSize = (ST->hasAVX512() && VF >= 16) 3878 ? getIndexSizeInBits(Ptr, DL) 3879 : DL.getPointerSizeInBits(); 3880 3881 auto *IndexVTy = FixedVectorType::get( 3882 IntegerType::get(SrcVTy->getContext(), IndexSize), VF); 3883 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 3884 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 3885 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 3886 if (SplitFactor > 1) { 3887 // Handle splitting of vector of pointers 3888 auto *SplitSrcTy = 3889 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 3890 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 3891 AddressSpace); 3892 } 3893 3894 // The gather / scatter cost is given by Intel architects. It is a rough 3895 // number since we are looking at one instruction in a time. 3896 const int GSOverhead = (Opcode == Instruction::Load) 3897 ? ST->getGatherOverhead() 3898 : ST->getScatterOverhead(); 3899 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 3900 MaybeAlign(Alignment), AddressSpace, 3901 TTI::TCK_RecipThroughput); 3902 } 3903 3904 /// Return the cost of full scalarization of gather / scatter operation. 3905 /// 3906 /// Opcode - Load or Store instruction. 3907 /// SrcVTy - The type of the data vector that should be gathered or scattered. 3908 /// VariableMask - The mask is non-constant at compile time. 3909 /// Alignment - Alignment for one element. 3910 /// AddressSpace - pointer[s] address space. 3911 /// 3912 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 3913 bool VariableMask, unsigned Alignment, 3914 unsigned AddressSpace) { 3915 unsigned VF = cast<VectorType>(SrcVTy)->getNumElements(); 3916 APInt DemandedElts = APInt::getAllOnesValue(VF); 3917 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; 3918 3919 int MaskUnpackCost = 0; 3920 if (VariableMask) { 3921 VectorType *MaskTy = 3922 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 3923 MaskUnpackCost = 3924 getScalarizationOverhead(MaskTy, DemandedElts, false, true); 3925 int ScalarCompareCost = 3926 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 3927 nullptr, CostKind); 3928 int BranchCost = getCFInstrCost(Instruction::Br, CostKind); 3929 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 3930 } 3931 3932 // The cost of the scalar loads/stores. 3933 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 3934 MaybeAlign(Alignment), AddressSpace, 3935 CostKind); 3936 3937 int InsertExtractCost = 0; 3938 if (Opcode == Instruction::Load) 3939 for (unsigned i = 0; i < VF; ++i) 3940 // Add the cost of inserting each scalar load into the vector 3941 InsertExtractCost += 3942 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 3943 else 3944 for (unsigned i = 0; i < VF; ++i) 3945 // Add the cost of extracting each element out of the data vector 3946 InsertExtractCost += 3947 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 3948 3949 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 3950 } 3951 3952 /// Calculate the cost of Gather / Scatter operation 3953 int X86TTIImpl::getGatherScatterOpCost( 3954 unsigned Opcode, Type *SrcVTy, Value *Ptr, bool VariableMask, 3955 unsigned Alignment, TTI::TargetCostKind CostKind, 3956 const Instruction *I = nullptr) { 3957 3958 if (CostKind != TTI::TCK_RecipThroughput) 3959 return 1; 3960 3961 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 3962 unsigned VF = cast<VectorType>(SrcVTy)->getNumElements(); 3963 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 3964 if (!PtrTy && Ptr->getType()->isVectorTy()) 3965 PtrTy = dyn_cast<PointerType>( 3966 cast<VectorType>(Ptr->getType())->getElementType()); 3967 assert(PtrTy && "Unexpected type for Ptr argument"); 3968 unsigned AddressSpace = PtrTy->getAddressSpace(); 3969 3970 bool Scalarize = false; 3971 if ((Opcode == Instruction::Load && 3972 !isLegalMaskedGather(SrcVTy, MaybeAlign(Alignment))) || 3973 (Opcode == Instruction::Store && 3974 !isLegalMaskedScatter(SrcVTy, MaybeAlign(Alignment)))) 3975 Scalarize = true; 3976 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 3977 // Vector-4 of gather/scatter instruction does not exist on KNL. 3978 // We can extend it to 8 elements, but zeroing upper bits of 3979 // the mask vector will add more instructions. Right now we give the scalar 3980 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction 3981 // is better in the VariableMask case. 3982 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) 3983 Scalarize = true; 3984 3985 if (Scalarize) 3986 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, 3987 AddressSpace); 3988 3989 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 3990 } 3991 3992 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, 3993 TargetTransformInfo::LSRCost &C2) { 3994 // X86 specific here are "instruction number 1st priority". 3995 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, 3996 C1.NumIVMuls, C1.NumBaseAdds, 3997 C1.ScaleCost, C1.ImmCost, C1.SetupCost) < 3998 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, 3999 C2.NumIVMuls, C2.NumBaseAdds, 4000 C2.ScaleCost, C2.ImmCost, C2.SetupCost); 4001 } 4002 4003 bool X86TTIImpl::canMacroFuseCmp() { 4004 return ST->hasMacroFusion() || ST->hasBranchFusion(); 4005 } 4006 4007 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) { 4008 if (!ST->hasAVX()) 4009 return false; 4010 4011 // The backend can't handle a single element vector. 4012 if (isa<VectorType>(DataTy) && 4013 cast<VectorType>(DataTy)->getNumElements() == 1) 4014 return false; 4015 Type *ScalarTy = DataTy->getScalarType(); 4016 4017 if (ScalarTy->isPointerTy()) 4018 return true; 4019 4020 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4021 return true; 4022 4023 if (!ScalarTy->isIntegerTy()) 4024 return false; 4025 4026 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4027 return IntWidth == 32 || IntWidth == 64 || 4028 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); 4029 } 4030 4031 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) { 4032 return isLegalMaskedLoad(DataType, Alignment); 4033 } 4034 4035 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { 4036 unsigned DataSize = DL.getTypeStoreSize(DataType); 4037 // The only supported nontemporal loads are for aligned vectors of 16 or 32 4038 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 4039 // (the equivalent stores only require AVX). 4040 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) 4041 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); 4042 4043 return false; 4044 } 4045 4046 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { 4047 unsigned DataSize = DL.getTypeStoreSize(DataType); 4048 4049 // SSE4A supports nontemporal stores of float and double at arbitrary 4050 // alignment. 4051 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) 4052 return true; 4053 4054 // Besides the SSE4A subtarget exception above, only aligned stores are 4055 // available nontemporaly on any other subtarget. And only stores with a size 4056 // of 4..32 bytes (powers of 2, only) are permitted. 4057 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || 4058 !isPowerOf2_32(DataSize)) 4059 return false; 4060 4061 // 32-byte vector nontemporal stores are supported by AVX (the equivalent 4062 // loads require AVX2). 4063 if (DataSize == 32) 4064 return ST->hasAVX(); 4065 else if (DataSize == 16) 4066 return ST->hasSSE1(); 4067 return true; 4068 } 4069 4070 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { 4071 if (!isa<VectorType>(DataTy)) 4072 return false; 4073 4074 if (!ST->hasAVX512()) 4075 return false; 4076 4077 // The backend can't handle a single element vector. 4078 if (cast<VectorType>(DataTy)->getNumElements() == 1) 4079 return false; 4080 4081 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); 4082 4083 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4084 return true; 4085 4086 if (!ScalarTy->isIntegerTy()) 4087 return false; 4088 4089 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4090 return IntWidth == 32 || IntWidth == 64 || 4091 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); 4092 } 4093 4094 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { 4095 return isLegalMaskedExpandLoad(DataTy); 4096 } 4097 4098 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, MaybeAlign Alignment) { 4099 // Some CPUs have better gather performance than others. 4100 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only 4101 // enable gather with a -march. 4102 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()))) 4103 return false; 4104 4105 // This function is called now in two cases: from the Loop Vectorizer 4106 // and from the Scalarizer. 4107 // When the Loop Vectorizer asks about legality of the feature, 4108 // the vectorization factor is not calculated yet. The Loop Vectorizer 4109 // sends a scalar type and the decision is based on the width of the 4110 // scalar element. 4111 // Later on, the cost model will estimate usage this intrinsic based on 4112 // the vector type. 4113 // The Scalarizer asks again about legality. It sends a vector type. 4114 // In this case we can reject non-power-of-2 vectors. 4115 // We also reject single element vectors as the type legalizer can't 4116 // scalarize it. 4117 if (auto *DataVTy = dyn_cast<VectorType>(DataTy)) { 4118 unsigned NumElts = DataVTy->getNumElements(); 4119 if (NumElts == 1 || !isPowerOf2_32(NumElts)) 4120 return false; 4121 } 4122 Type *ScalarTy = DataTy->getScalarType(); 4123 if (ScalarTy->isPointerTy()) 4124 return true; 4125 4126 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) 4127 return true; 4128 4129 if (!ScalarTy->isIntegerTy()) 4130 return false; 4131 4132 unsigned IntWidth = ScalarTy->getIntegerBitWidth(); 4133 return IntWidth == 32 || IntWidth == 64; 4134 } 4135 4136 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, MaybeAlign Alignment) { 4137 // AVX2 doesn't support scatter 4138 if (!ST->hasAVX512()) 4139 return false; 4140 return isLegalMaskedGather(DataType, Alignment); 4141 } 4142 4143 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { 4144 EVT VT = TLI->getValueType(DL, DataType); 4145 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); 4146 } 4147 4148 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { 4149 return false; 4150 } 4151 4152 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 4153 const Function *Callee) const { 4154 const TargetMachine &TM = getTLI()->getTargetMachine(); 4155 4156 // Work this as a subsetting of subtarget features. 4157 const FeatureBitset &CallerBits = 4158 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 4159 const FeatureBitset &CalleeBits = 4160 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 4161 4162 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; 4163 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; 4164 return (RealCallerBits & RealCalleeBits) == RealCalleeBits; 4165 } 4166 4167 bool X86TTIImpl::areFunctionArgsABICompatible( 4168 const Function *Caller, const Function *Callee, 4169 SmallPtrSetImpl<Argument *> &Args) const { 4170 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) 4171 return false; 4172 4173 // If we get here, we know the target features match. If one function 4174 // considers 512-bit vectors legal and the other does not, consider them 4175 // incompatible. 4176 const TargetMachine &TM = getTLI()->getTargetMachine(); 4177 4178 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == 4179 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) 4180 return true; 4181 4182 // Consider the arguments compatible if they aren't vectors or aggregates. 4183 // FIXME: Look at the size of vectors. 4184 // FIXME: Look at the element types of aggregates to see if there are vectors. 4185 // FIXME: The API of this function seems intended to allow arguments 4186 // to be removed from the set, but the caller doesn't check if the set 4187 // becomes empty so that may not work in practice. 4188 return llvm::none_of(Args, [](Argument *A) { 4189 auto *EltTy = cast<PointerType>(A->getType())->getElementType(); 4190 return EltTy->isVectorTy() || EltTy->isAggregateType(); 4191 }); 4192 } 4193 4194 X86TTIImpl::TTI::MemCmpExpansionOptions 4195 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { 4196 TTI::MemCmpExpansionOptions Options; 4197 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); 4198 Options.NumLoadsPerBlock = 2; 4199 // All GPR and vector loads can be unaligned. 4200 Options.AllowOverlappingLoads = true; 4201 if (IsZeroCmp) { 4202 // Only enable vector loads for equality comparison. Right now the vector 4203 // version is not as fast for three way compare (see #33329). 4204 const unsigned PreferredWidth = ST->getPreferVectorWidth(); 4205 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); 4206 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); 4207 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); 4208 } 4209 if (ST->is64Bit()) { 4210 Options.LoadSizes.push_back(8); 4211 } 4212 Options.LoadSizes.push_back(4); 4213 Options.LoadSizes.push_back(2); 4214 Options.LoadSizes.push_back(1); 4215 return Options; 4216 } 4217 4218 bool X86TTIImpl::enableInterleavedAccessVectorization() { 4219 // TODO: We expect this to be beneficial regardless of arch, 4220 // but there are currently some unexplained performance artifacts on Atom. 4221 // As a temporary solution, disable on Atom. 4222 return !(ST->isAtom()); 4223 } 4224 4225 // Get estimation for interleaved load/store operations for AVX2. 4226 // \p Factor is the interleaved-access factor (stride) - number of 4227 // (interleaved) elements in the group. 4228 // \p Indices contains the indices for a strided load: when the 4229 // interleaved load has gaps they indicate which elements are used. 4230 // If Indices is empty (or if the number of indices is equal to the size 4231 // of the interleaved-access as given in \p Factor) the access has no gaps. 4232 // 4233 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow 4234 // computing the cost using a generic formula as a function of generic 4235 // shuffles. We therefore use a lookup table instead, filled according to 4236 // the instruction sequences that codegen currently generates. 4237 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy, 4238 unsigned Factor, 4239 ArrayRef<unsigned> Indices, 4240 unsigned Alignment, 4241 unsigned AddressSpace, 4242 TTI::TargetCostKind CostKind, 4243 bool UseMaskForCond, 4244 bool UseMaskForGaps) { 4245 4246 if (UseMaskForCond || UseMaskForGaps) 4247 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4248 Alignment, AddressSpace, CostKind, 4249 UseMaskForCond, UseMaskForGaps); 4250 4251 // We currently Support only fully-interleaved groups, with no gaps. 4252 // TODO: Support also strided loads (interleaved-groups with gaps). 4253 if (Indices.size() && Indices.size() != Factor) 4254 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4255 Alignment, AddressSpace, 4256 CostKind); 4257 4258 // VecTy for interleave memop is <VF*Factor x Elt>. 4259 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 4260 // VecTy = <12 x i32>. 4261 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 4262 4263 // This function can be called with VecTy=<6xi128>, Factor=3, in which case 4264 // the VF=2, while v2i128 is an unsupported MVT vector type 4265 // (see MachineValueType.h::getVectorVT()). 4266 if (!LegalVT.isVector()) 4267 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4268 Alignment, AddressSpace, 4269 CostKind); 4270 4271 unsigned VF = cast<VectorType>(VecTy)->getNumElements() / Factor; 4272 Type *ScalarTy = cast<VectorType>(VecTy)->getElementType(); 4273 4274 // Calculate the number of memory operations (NumOfMemOps), required 4275 // for load/store the VecTy. 4276 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 4277 unsigned LegalVTSize = LegalVT.getStoreSize(); 4278 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 4279 4280 // Get the cost of one memory operation. 4281 auto *SingleMemOpTy = 4282 FixedVectorType::get(cast<VectorType>(VecTy)->getElementType(), 4283 LegalVT.getVectorNumElements()); 4284 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, 4285 MaybeAlign(Alignment), AddressSpace, 4286 CostKind); 4287 4288 auto *VT = FixedVectorType::get(ScalarTy, VF); 4289 EVT ETy = TLI->getValueType(DL, VT); 4290 if (!ETy.isSimple()) 4291 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4292 Alignment, AddressSpace, 4293 CostKind); 4294 4295 // TODO: Complete for other data-types and strides. 4296 // Each combination of Stride, ElementTy and VF results in a different 4297 // sequence; The cost tables are therefore accessed with: 4298 // Factor (stride) and VectorType=VFxElemType. 4299 // The Cost accounts only for the shuffle sequence; 4300 // The cost of the loads/stores is accounted for separately. 4301 // 4302 static const CostTblEntry AVX2InterleavedLoadTbl[] = { 4303 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 4304 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 4305 4306 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 4307 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 4308 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 4309 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 4310 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 4311 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 4312 4313 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 4314 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 4315 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 4316 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 4317 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 4318 4319 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 4320 }; 4321 4322 static const CostTblEntry AVX2InterleavedStoreTbl[] = { 4323 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) 4324 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) 4325 4326 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) 4327 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) 4328 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) 4329 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) 4330 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) 4331 4332 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) 4333 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) 4334 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) 4335 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) 4336 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) 4337 }; 4338 4339 if (Opcode == Instruction::Load) { 4340 if (const auto *Entry = 4341 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) 4342 return NumOfMemOps * MemOpCost + Entry->Cost; 4343 } else { 4344 assert(Opcode == Instruction::Store && 4345 "Expected Store Instruction at this point"); 4346 if (const auto *Entry = 4347 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) 4348 return NumOfMemOps * MemOpCost + Entry->Cost; 4349 } 4350 4351 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4352 Alignment, AddressSpace, CostKind); 4353 } 4354 4355 // Get estimation for interleaved load/store operations and strided load. 4356 // \p Indices contains indices for strided load. 4357 // \p Factor - the factor of interleaving. 4358 // AVX-512 provides 3-src shuffles that significantly reduces the cost. 4359 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy, 4360 unsigned Factor, 4361 ArrayRef<unsigned> Indices, 4362 unsigned Alignment, 4363 unsigned AddressSpace, 4364 TTI::TargetCostKind CostKind, 4365 bool UseMaskForCond, 4366 bool UseMaskForGaps) { 4367 4368 if (UseMaskForCond || UseMaskForGaps) 4369 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4370 Alignment, AddressSpace, CostKind, 4371 UseMaskForCond, UseMaskForGaps); 4372 4373 // VecTy for interleave memop is <VF*Factor x Elt>. 4374 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have 4375 // VecTy = <12 x i32>. 4376 4377 // Calculate the number of memory operations (NumOfMemOps), required 4378 // for load/store the VecTy. 4379 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; 4380 unsigned VecTySize = DL.getTypeStoreSize(VecTy); 4381 unsigned LegalVTSize = LegalVT.getStoreSize(); 4382 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; 4383 4384 // Get the cost of one memory operation. 4385 auto *SingleMemOpTy = 4386 VectorType::get(cast<VectorType>(VecTy)->getElementType(), 4387 LegalVT.getVectorNumElements()); 4388 unsigned MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, 4389 MaybeAlign(Alignment), AddressSpace, 4390 CostKind); 4391 4392 unsigned VF = cast<VectorType>(VecTy)->getNumElements() / Factor; 4393 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); 4394 4395 if (Opcode == Instruction::Load) { 4396 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) 4397 // contain the cost of the optimized shuffle sequence that the 4398 // X86InterleavedAccess pass will generate. 4399 // The cost of loads and stores are computed separately from the table. 4400 4401 // X86InterleavedAccess support only the following interleaved-access group. 4402 static const CostTblEntry AVX512InterleavedLoadTbl[] = { 4403 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 4404 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 4405 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 4406 }; 4407 4408 if (const auto *Entry = 4409 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) 4410 return NumOfMemOps * MemOpCost + Entry->Cost; 4411 //If an entry does not exist, fallback to the default implementation. 4412 4413 // Kind of shuffle depends on number of loaded values. 4414 // If we load the entire data in one register, we can use a 1-src shuffle. 4415 // Otherwise, we'll merge 2 sources in each operation. 4416 TTI::ShuffleKind ShuffleKind = 4417 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; 4418 4419 unsigned ShuffleCost = 4420 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr); 4421 4422 unsigned NumOfLoadsInInterleaveGrp = 4423 Indices.size() ? Indices.size() : Factor; 4424 auto *ResultTy = FixedVectorType::get( 4425 cast<VectorType>(VecTy)->getElementType(), 4426 cast<VectorType>(VecTy)->getNumElements() / Factor); 4427 unsigned NumOfResults = 4428 getTLI()->getTypeLegalizationCost(DL, ResultTy).first * 4429 NumOfLoadsInInterleaveGrp; 4430 4431 // About a half of the loads may be folded in shuffles when we have only 4432 // one result. If we have more than one result, we do not fold loads at all. 4433 unsigned NumOfUnfoldedLoads = 4434 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; 4435 4436 // Get a number of shuffle operations per result. 4437 unsigned NumOfShufflesPerResult = 4438 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); 4439 4440 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 4441 // When we have more than one destination, we need additional instructions 4442 // to keep sources. 4443 unsigned NumOfMoves = 0; 4444 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) 4445 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; 4446 4447 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + 4448 NumOfUnfoldedLoads * MemOpCost + NumOfMoves; 4449 4450 return Cost; 4451 } 4452 4453 // Store. 4454 assert(Opcode == Instruction::Store && 4455 "Expected Store Instruction at this point"); 4456 // X86InterleavedAccess support only the following interleaved-access group. 4457 static const CostTblEntry AVX512InterleavedStoreTbl[] = { 4458 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) 4459 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) 4460 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) 4461 4462 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) 4463 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) 4464 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) 4465 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) 4466 }; 4467 4468 if (const auto *Entry = 4469 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) 4470 return NumOfMemOps * MemOpCost + Entry->Cost; 4471 //If an entry does not exist, fallback to the default implementation. 4472 4473 // There is no strided stores meanwhile. And store can't be folded in 4474 // shuffle. 4475 unsigned NumOfSources = Factor; // The number of values to be merged. 4476 unsigned ShuffleCost = 4477 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr); 4478 unsigned NumOfShufflesPerStore = NumOfSources - 1; 4479 4480 // The SK_MergeTwoSrc shuffle clobbers one of src operands. 4481 // We need additional instructions to keep sources. 4482 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; 4483 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + 4484 NumOfMoves; 4485 return Cost; 4486 } 4487 4488 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, 4489 unsigned Factor, 4490 ArrayRef<unsigned> Indices, 4491 unsigned Alignment, 4492 unsigned AddressSpace, 4493 TTI::TargetCostKind CostKind, 4494 bool UseMaskForCond, 4495 bool UseMaskForGaps) { 4496 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { 4497 Type *EltTy = cast<VectorType>(VecTy)->getElementType(); 4498 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || 4499 EltTy->isIntegerTy(32) || EltTy->isPointerTy()) 4500 return true; 4501 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) 4502 return HasBW; 4503 return false; 4504 }; 4505 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) 4506 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices, 4507 Alignment, AddressSpace, CostKind, 4508 UseMaskForCond, UseMaskForGaps); 4509 if (ST->hasAVX2()) 4510 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices, 4511 Alignment, AddressSpace, CostKind, 4512 UseMaskForCond, UseMaskForGaps); 4513 4514 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, 4515 Alignment, AddressSpace, CostKind, 4516 UseMaskForCond, UseMaskForGaps); 4517 } 4518