1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// \file 10 /// This file implements a TargetTransformInfo analysis pass specific to the 11 /// X86 target machine. It uses the target's detailed information to provide 12 /// more precise answers to certain TTI queries, while letting the target 13 /// independent and default TTI implementations handle the rest. 14 /// 15 //===----------------------------------------------------------------------===// 16 /// About Cost Model numbers used below it's necessary to say the following: 17 /// the numbers correspond to some "generic" X86 CPU instead of usage of 18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature 19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in 20 /// the lookups below the cost is based on Nehalem as that was the first CPU 21 /// to support that feature level and thus has most likely the worst case cost. 22 /// Some examples of other technologies/CPUs: 23 /// SSE 3 - Pentium4 / Athlon64 24 /// SSE 4.1 - Penryn 25 /// SSE 4.2 - Nehalem 26 /// AVX - Sandy Bridge 27 /// AVX2 - Haswell 28 /// AVX-512 - Xeon Phi / Skylake 29 /// And some examples of instruction target dependent costs (latency) 30 /// divss sqrtss rsqrtss 31 /// AMD K7 11-16 19 3 32 /// Piledriver 9-24 13-15 5 33 /// Jaguar 14 16 2 34 /// Pentium II,III 18 30 2 35 /// Nehalem 7-14 7-18 3 36 /// Haswell 10-13 11 5 37 /// TODO: Develop and implement the target dependent cost model and 38 /// specialize cost numbers for different Cost Model Targets such as throughput, 39 /// code size, latency and uop count. 40 //===----------------------------------------------------------------------===// 41 42 #include "X86TargetTransformInfo.h" 43 #include "llvm/Analysis/TargetTransformInfo.h" 44 #include "llvm/CodeGen/BasicTTIImpl.h" 45 #include "llvm/IR/IntrinsicInst.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Target/CostTable.h" 48 #include "llvm/Target/TargetLowering.h" 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "x86tti" 53 54 //===----------------------------------------------------------------------===// 55 // 56 // X86 cost model. 57 // 58 //===----------------------------------------------------------------------===// 59 60 TargetTransformInfo::PopcntSupportKind 61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) { 62 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2"); 63 // TODO: Currently the __builtin_popcount() implementation using SSE3 64 // instructions is inefficient. Once the problem is fixed, we should 65 // call ST->hasSSE3() instead of ST->hasPOPCNT(). 66 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; 67 } 68 69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) { 70 if (Vector && !ST->hasSSE1()) 71 return 0; 72 73 if (ST->is64Bit()) { 74 if (Vector && ST->hasAVX512()) 75 return 32; 76 return 16; 77 } 78 return 8; 79 } 80 81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) { 82 if (Vector) { 83 if (ST->hasAVX512()) return 512; 84 if (ST->hasAVX()) return 256; 85 if (ST->hasSSE1()) return 128; 86 return 0; 87 } 88 89 if (ST->is64Bit()) 90 return 64; 91 92 return 32; 93 } 94 95 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { 96 // If the loop will not be vectorized, don't interleave the loop. 97 // Let regular unroll to unroll the loop, which saves the overflow 98 // check and memory check cost. 99 if (VF == 1) 100 return 1; 101 102 if (ST->isAtom()) 103 return 1; 104 105 // Sandybridge and Haswell have multiple execution ports and pipelined 106 // vector units. 107 if (ST->hasAVX()) 108 return 4; 109 110 return 2; 111 } 112 113 int X86TTIImpl::getArithmeticInstrCost( 114 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info, 115 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo, 116 TTI::OperandValueProperties Opd2PropInfo) { 117 // Legalize the type. 118 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); 119 120 int ISD = TLI->InstructionOpcodeToISD(Opcode); 121 assert(ISD && "Invalid opcode"); 122 123 if (ISD == ISD::SDIV && 124 Op2Info == TargetTransformInfo::OK_UniformConstantValue && 125 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { 126 // On X86, vector signed division by constants power-of-two are 127 // normally expanded to the sequence SRA + SRL + ADD + SRA. 128 // The OperandValue properties many not be same as that of previous 129 // operation;conservatively assume OP_None. 130 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, 131 Op2Info, TargetTransformInfo::OP_None, 132 TargetTransformInfo::OP_None); 133 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info, 134 TargetTransformInfo::OP_None, 135 TargetTransformInfo::OP_None); 136 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info, 137 TargetTransformInfo::OP_None, 138 TargetTransformInfo::OP_None); 139 140 return Cost; 141 } 142 143 static const CostTblEntry AVX512BWUniformConstCostTable[] = { 144 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence 145 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence 146 }; 147 148 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 149 ST->hasBWI()) { 150 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, 151 LT.second)) 152 return LT.first * Entry->Cost; 153 } 154 155 static const CostTblEntry AVX512UniformConstCostTable[] = { 156 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence 157 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence 158 }; 159 160 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 161 ST->hasAVX512()) { 162 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, 163 LT.second)) 164 return LT.first * Entry->Cost; 165 } 166 167 static const CostTblEntry AVX2UniformConstCostTable[] = { 168 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. 169 170 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence 171 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence 172 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence 173 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence 174 }; 175 176 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 177 ST->hasAVX2()) { 178 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, 179 LT.second)) 180 return LT.first * Entry->Cost; 181 } 182 183 static const CostTblEntry SSE2UniformConstCostTable[] = { 184 { ISD::SDIV, MVT::v16i16, 12 }, // pmulhw sequence 185 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence 186 { ISD::UDIV, MVT::v16i16, 12 }, // pmulhuw sequence 187 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence 188 { ISD::SDIV, MVT::v8i32, 38 }, // pmuludq sequence 189 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence 190 { ISD::UDIV, MVT::v8i32, 30 }, // pmuludq sequence 191 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence 192 }; 193 194 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && 195 ST->hasSSE2()) { 196 // pmuldq sequence. 197 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) 198 return LT.first * 30; 199 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) 200 return LT.first * 15; 201 202 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD, 203 LT.second)) 204 return LT.first * Entry->Cost; 205 } 206 207 static const CostTblEntry AVX512BWCostTable[] = { 208 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 209 { ISD::SDIV, MVT::v64i8, 64*20 }, 210 { ISD::SDIV, MVT::v32i16, 32*20 }, 211 { ISD::SDIV, MVT::v16i32, 16*20 }, 212 { ISD::SDIV, MVT::v8i64, 8*20 }, 213 { ISD::UDIV, MVT::v64i8, 64*20 }, 214 { ISD::UDIV, MVT::v32i16, 32*20 }, 215 { ISD::UDIV, MVT::v16i32, 16*20 }, 216 { ISD::UDIV, MVT::v8i64, 8*20 }, 217 }; 218 219 // Look for AVX512BW lowering tricks for custom cases. 220 if (ST->hasBWI()) { 221 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, 222 LT.second)) 223 return LT.first * Entry->Cost; 224 } 225 226 static const CostTblEntry AVX512CostTable[] = { 227 { ISD::SHL, MVT::v16i32, 1 }, 228 { ISD::SRL, MVT::v16i32, 1 }, 229 { ISD::SRA, MVT::v16i32, 1 }, 230 { ISD::SHL, MVT::v8i64, 1 }, 231 { ISD::SRL, MVT::v8i64, 1 }, 232 { ISD::SRA, MVT::v8i64, 1 }, 233 }; 234 235 if (ST->hasAVX512()) { 236 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) 237 return LT.first * Entry->Cost; 238 } 239 240 static const CostTblEntry AVX2CostTable[] = { 241 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to 242 // customize them to detect the cases where shift amount is a scalar one. 243 { ISD::SHL, MVT::v4i32, 1 }, 244 { ISD::SRL, MVT::v4i32, 1 }, 245 { ISD::SRA, MVT::v4i32, 1 }, 246 { ISD::SHL, MVT::v8i32, 1 }, 247 { ISD::SRL, MVT::v8i32, 1 }, 248 { ISD::SRA, MVT::v8i32, 1 }, 249 { ISD::SHL, MVT::v2i64, 1 }, 250 { ISD::SRL, MVT::v2i64, 1 }, 251 { ISD::SHL, MVT::v4i64, 1 }, 252 { ISD::SRL, MVT::v4i64, 1 }, 253 }; 254 255 // Look for AVX2 lowering tricks. 256 if (ST->hasAVX2()) { 257 if (ISD == ISD::SHL && LT.second == MVT::v16i16 && 258 (Op2Info == TargetTransformInfo::OK_UniformConstantValue || 259 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) 260 // On AVX2, a packed v16i16 shift left by a constant build_vector 261 // is lowered into a vector multiply (vpmullw). 262 return LT.first; 263 264 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) 265 return LT.first * Entry->Cost; 266 } 267 268 static const CostTblEntry XOPCostTable[] = { 269 // 128bit shifts take 1cy, but right shifts require negation beforehand. 270 { ISD::SHL, MVT::v16i8, 1 }, 271 { ISD::SRL, MVT::v16i8, 2 }, 272 { ISD::SRA, MVT::v16i8, 2 }, 273 { ISD::SHL, MVT::v8i16, 1 }, 274 { ISD::SRL, MVT::v8i16, 2 }, 275 { ISD::SRA, MVT::v8i16, 2 }, 276 { ISD::SHL, MVT::v4i32, 1 }, 277 { ISD::SRL, MVT::v4i32, 2 }, 278 { ISD::SRA, MVT::v4i32, 2 }, 279 { ISD::SHL, MVT::v2i64, 1 }, 280 { ISD::SRL, MVT::v2i64, 2 }, 281 { ISD::SRA, MVT::v2i64, 2 }, 282 // 256bit shifts require splitting if AVX2 didn't catch them above. 283 { ISD::SHL, MVT::v32i8, 2 }, 284 { ISD::SRL, MVT::v32i8, 4 }, 285 { ISD::SRA, MVT::v32i8, 4 }, 286 { ISD::SHL, MVT::v16i16, 2 }, 287 { ISD::SRL, MVT::v16i16, 4 }, 288 { ISD::SRA, MVT::v16i16, 4 }, 289 { ISD::SHL, MVT::v8i32, 2 }, 290 { ISD::SRL, MVT::v8i32, 4 }, 291 { ISD::SRA, MVT::v8i32, 4 }, 292 { ISD::SHL, MVT::v4i64, 2 }, 293 { ISD::SRL, MVT::v4i64, 4 }, 294 { ISD::SRA, MVT::v4i64, 4 }, 295 }; 296 297 // Look for XOP lowering tricks. 298 if (ST->hasXOP()) { 299 if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second)) 300 return LT.first * Entry->Cost; 301 } 302 303 static const CostTblEntry AVX2CustomCostTable[] = { 304 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. 305 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 306 307 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. 308 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. 309 310 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. 311 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. 312 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. 313 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. 314 }; 315 316 // Look for AVX2 lowering tricks for custom cases. 317 if (ST->hasAVX2()) { 318 if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD, 319 LT.second)) 320 return LT.first * Entry->Cost; 321 } 322 323 static const CostTblEntry AVXCustomCostTable[] = { 324 // Vectorizing division is a bad idea. See the SSE2 table for more comments. 325 { ISD::SDIV, MVT::v32i8, 32*20 }, 326 { ISD::SDIV, MVT::v16i16, 16*20 }, 327 { ISD::SDIV, MVT::v8i32, 8*20 }, 328 { ISD::SDIV, MVT::v4i64, 4*20 }, 329 { ISD::UDIV, MVT::v32i8, 32*20 }, 330 { ISD::UDIV, MVT::v16i16, 16*20 }, 331 { ISD::UDIV, MVT::v8i32, 8*20 }, 332 { ISD::UDIV, MVT::v4i64, 4*20 }, 333 }; 334 335 // Look for AVX2 lowering tricks for custom cases. 336 if (ST->hasAVX()) { 337 if (const auto *Entry = CostTableLookup(AVXCustomCostTable, ISD, 338 LT.second)) 339 return LT.first * Entry->Cost; 340 } 341 342 static const CostTblEntry 343 SSE2UniformCostTable[] = { 344 // Uniform splats are cheaper for the following instructions. 345 { ISD::SHL, MVT::v16i8, 1 }, // psllw. 346 { ISD::SHL, MVT::v32i8, 2 }, // psllw. 347 { ISD::SHL, MVT::v8i16, 1 }, // psllw. 348 { ISD::SHL, MVT::v16i16, 2 }, // psllw. 349 { ISD::SHL, MVT::v4i32, 1 }, // pslld 350 { ISD::SHL, MVT::v8i32, 2 }, // pslld 351 { ISD::SHL, MVT::v2i64, 1 }, // psllq. 352 { ISD::SHL, MVT::v4i64, 2 }, // psllq. 353 354 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. 355 { ISD::SRL, MVT::v32i8, 2 }, // psrlw. 356 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. 357 { ISD::SRL, MVT::v16i16, 2 }, // psrlw. 358 { ISD::SRL, MVT::v4i32, 1 }, // psrld. 359 { ISD::SRL, MVT::v8i32, 2 }, // psrld. 360 { ISD::SRL, MVT::v2i64, 1 }, // psrlq. 361 { ISD::SRL, MVT::v4i64, 2 }, // psrlq. 362 363 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. 364 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb. 365 { ISD::SRA, MVT::v8i16, 1 }, // psraw. 366 { ISD::SRA, MVT::v16i16, 2 }, // psraw. 367 { ISD::SRA, MVT::v4i32, 1 }, // psrad. 368 { ISD::SRA, MVT::v8i32, 2 }, // psrad. 369 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle. 370 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle. 371 }; 372 373 if (ST->hasSSE2() && 374 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || 375 (Op2Info == TargetTransformInfo::OK_UniformValue))) { 376 if (const auto *Entry = 377 CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) 378 return LT.first * Entry->Cost; 379 } 380 381 if (ISD == ISD::SHL && 382 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { 383 MVT VT = LT.second; 384 // Vector shift left by non uniform constant can be lowered 385 // into vector multiply (pmullw/pmulld). 386 if ((VT == MVT::v8i16 && ST->hasSSE2()) || 387 (VT == MVT::v4i32 && ST->hasSSE41())) 388 return LT.first; 389 390 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a 391 // sequence of extract + two vector multiply + insert. 392 if ((VT == MVT::v8i32 || VT == MVT::v16i16) && 393 (ST->hasAVX() && !ST->hasAVX2())) 394 ISD = ISD::MUL; 395 396 // A vector shift left by non uniform constant is converted 397 // into a vector multiply; the new multiply is eventually 398 // lowered into a sequence of shuffles and 2 x pmuludq. 399 if (VT == MVT::v4i32 && ST->hasSSE2()) 400 ISD = ISD::MUL; 401 } 402 403 static const CostTblEntry SSE2CostTable[] = { 404 // We don't correctly identify costs of casts because they are marked as 405 // custom. 406 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. 407 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence. 408 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. 409 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence. 410 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. 411 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul. 412 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. 413 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 414 415 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. 416 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence. 417 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. 418 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence. 419 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. 420 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend. 421 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. 422 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence. 423 424 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. 425 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence. 426 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. 427 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence. 428 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. 429 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend. 430 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. 431 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence. 432 433 // It is not a good idea to vectorize division. We have to scalarize it and 434 // in the process we will often end up having to spilling regular 435 // registers. The overhead of division is going to dominate most kernels 436 // anyways so try hard to prevent vectorization of division - it is 437 // generally a bad idea. Assume somewhat arbitrarily that we have to be able 438 // to hide "20 cycles" for each lane. 439 { ISD::SDIV, MVT::v16i8, 16*20 }, 440 { ISD::SDIV, MVT::v8i16, 8*20 }, 441 { ISD::SDIV, MVT::v4i32, 4*20 }, 442 { ISD::SDIV, MVT::v2i64, 2*20 }, 443 { ISD::UDIV, MVT::v16i8, 16*20 }, 444 { ISD::UDIV, MVT::v8i16, 8*20 }, 445 { ISD::UDIV, MVT::v4i32, 4*20 }, 446 { ISD::UDIV, MVT::v2i64, 2*20 }, 447 }; 448 449 if (ST->hasSSE2()) { 450 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) 451 return LT.first * Entry->Cost; 452 } 453 454 static const CostTblEntry AVX1CostTable[] = { 455 // We don't have to scalarize unsupported ops. We can issue two half-sized 456 // operations and we only need to extract the upper YMM half. 457 // Two ops + 1 extract + 1 insert = 4. 458 { ISD::MUL, MVT::v16i16, 4 }, 459 { ISD::MUL, MVT::v8i32, 4 }, 460 { ISD::SUB, MVT::v8i32, 4 }, 461 { ISD::ADD, MVT::v8i32, 4 }, 462 { ISD::SUB, MVT::v4i64, 4 }, 463 { ISD::ADD, MVT::v4i64, 4 }, 464 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then 465 // are lowered as a series of long multiplies(3), shifts(4) and adds(2) 466 // Because we believe v4i64 to be a legal type, we must also include the 467 // split factor of two in the cost table. Therefore, the cost here is 18 468 // instead of 9. 469 { ISD::MUL, MVT::v4i64, 18 }, 470 }; 471 472 // Look for AVX1 lowering tricks. 473 if (ST->hasAVX() && !ST->hasAVX2()) { 474 MVT VT = LT.second; 475 476 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT)) 477 return LT.first * Entry->Cost; 478 } 479 480 // Custom lowering of vectors. 481 static const CostTblEntry CustomLowered[] = { 482 // A v2i64/v4i64 and multiply is custom lowered as a series of long 483 // multiplies(3), shifts(4) and adds(2). 484 { ISD::MUL, MVT::v2i64, 9 }, 485 { ISD::MUL, MVT::v4i64, 9 }, 486 }; 487 if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second)) 488 return LT.first * Entry->Cost; 489 490 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle, 491 // 2x pmuludq, 2x shuffle. 492 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && 493 !ST->hasSSE41()) 494 return LT.first * 6; 495 496 // Fallback to the default implementation. 497 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info); 498 } 499 500 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, 501 Type *SubTp) { 502 // We only estimate the cost of reverse and alternate shuffles. 503 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate) 504 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 505 506 if (Kind == TTI::SK_Reverse) { 507 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 508 int Cost = 1; 509 if (LT.second.getSizeInBits() > 128) 510 Cost = 3; // Extract + insert + copy. 511 512 // Multiple by the number of parts. 513 return Cost * LT.first; 514 } 515 516 if (Kind == TTI::SK_Alternate) { 517 // 64-bit packed float vectors (v2f32) are widened to type v4f32. 518 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64. 519 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp); 520 521 // The backend knows how to generate a single VEX.256 version of 522 // instruction VPBLENDW if the target supports AVX2. 523 if (ST->hasAVX2() && LT.second == MVT::v16i16) 524 return LT.first; 525 526 static const CostTblEntry AVXAltShuffleTbl[] = { 527 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd 528 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd 529 530 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps 531 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps 532 533 // This shuffle is custom lowered into a sequence of: 534 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128 535 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5}, 536 537 // This shuffle is custom lowered into a long sequence of: 538 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128 539 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9} 540 }; 541 542 if (ST->hasAVX()) 543 if (const auto *Entry = CostTableLookup(AVXAltShuffleTbl, 544 ISD::VECTOR_SHUFFLE, LT.second)) 545 return LT.first * Entry->Cost; 546 547 static const CostTblEntry SSE41AltShuffleTbl[] = { 548 // These are lowered into movsd. 549 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, 550 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, 551 552 // packed float vectors with four elements are lowered into BLENDI dag 553 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'. 554 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1}, 555 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1}, 556 557 // This shuffle generates a single pshufw. 558 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1}, 559 560 // There is no instruction that matches a v16i8 alternate shuffle. 561 // The backend will expand it into the sequence 'pshufb + pshufb + or'. 562 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} 563 }; 564 565 if (ST->hasSSE41()) 566 if (const auto *Entry = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, 567 LT.second)) 568 return LT.first * Entry->Cost; 569 570 static const CostTblEntry SSSE3AltShuffleTbl[] = { 571 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 572 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 573 574 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into 575 // the sequence 'shufps + pshufd' 576 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, 577 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, 578 579 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or 580 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or 581 }; 582 583 if (ST->hasSSSE3()) 584 if (const auto *Entry = CostTableLookup(SSSE3AltShuffleTbl, 585 ISD::VECTOR_SHUFFLE, LT.second)) 586 return LT.first * Entry->Cost; 587 588 static const CostTblEntry SSEAltShuffleTbl[] = { 589 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd 590 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd 591 592 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd 593 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd 594 595 // This is expanded into a long sequence of four extract + four insert. 596 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw. 597 598 // 8 x (pinsrw + pextrw + and + movb + movzb + or) 599 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48} 600 }; 601 602 // Fall-back (SSE3 and SSE2). 603 if (const auto *Entry = CostTableLookup(SSEAltShuffleTbl, 604 ISD::VECTOR_SHUFFLE, LT.second)) 605 return LT.first * Entry->Cost; 606 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 607 } 608 609 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp); 610 } 611 612 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) { 613 int ISD = TLI->InstructionOpcodeToISD(Opcode); 614 assert(ISD && "Invalid opcode"); 615 616 // FIXME: Need a better design of the cost table to handle non-simple types of 617 // potential massive combinations (elem_num x src_type x dst_type). 618 619 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { 620 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, 621 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, 622 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, 623 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, 624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, 625 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, 626 627 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, 628 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, 629 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, 630 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, 631 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, 632 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, 633 }; 634 635 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and 636 // 256-bit wide vectors. 637 638 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { 639 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, 640 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, 641 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, 642 643 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 }, 644 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 }, 645 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, 646 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, 647 648 // v16i1 -> v16i32 - load + broadcast 649 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 650 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, 651 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 652 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, 653 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 654 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, 655 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 656 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, 657 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 658 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, 659 660 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 661 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 662 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 663 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 664 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 665 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 666 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 667 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 668 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, 669 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 670 671 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, 672 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, 673 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, 674 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 675 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, 676 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, 677 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, 678 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, 679 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 680 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, 681 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, 682 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, 683 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, 684 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, 685 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 686 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 687 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 688 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, 689 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, 690 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, 691 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, 692 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 12 }, 693 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 26 }, 694 695 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, 696 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, 697 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, 698 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, 699 }; 700 701 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { 702 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 703 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, 704 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 705 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, 706 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 707 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 }, 708 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 709 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 }, 710 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 711 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, 712 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 713 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 714 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 715 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, 716 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 717 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, 718 719 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, 720 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, 721 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, 722 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, 723 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, 724 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 }, 725 726 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, 727 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, 728 729 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, 730 }; 731 732 static const TypeConversionCostTblEntry AVXConversionTbl[] = { 733 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, 734 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, 735 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, 736 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, 737 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 }, 738 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 739 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 }, 740 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, 741 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 742 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 743 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 }, 744 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 745 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 746 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 747 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 748 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, 749 750 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, 751 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 752 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 753 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, 754 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, 755 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 }, 756 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 }, 757 758 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, 759 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, 760 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, 761 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, 762 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, 763 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, 764 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, 765 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, 766 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 767 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, 768 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, 769 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, 770 771 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, 772 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, 773 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, 774 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, 775 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, 776 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, 777 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, 778 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, 779 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, 780 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, 781 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, 782 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, 783 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, 784 // The generic code to compute the scalar overhead is currently broken. 785 // Workaround this limitation by estimating the scalarization overhead 786 // here. We have roughly 10 instructions per scalar element. 787 // Multiply that by the vector width. 788 // FIXME: remove that when PR19268 is fixed. 789 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 10 }, 790 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 20 }, 791 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 792 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, 793 794 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, 795 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 }, 796 // This node is expanded into scalarized operations but BasicTTI is overly 797 // optimistic estimating its cost. It computes 3 per element (one 798 // vector-extract, one scalar conversion and one vector-insert). The 799 // problem is that the inserts form a read-modify-write chain so latency 800 // should be factored in too. Inflating the cost per element by 1. 801 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, 802 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, 803 804 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, 805 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, 806 }; 807 808 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { 809 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 810 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, 811 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 812 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, 813 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 814 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, 815 816 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 817 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, 818 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 819 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, 820 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 821 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 822 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 823 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, 824 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 825 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, 826 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 827 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, 828 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 829 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 830 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 831 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, 832 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 833 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, 834 835 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, 836 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, 837 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, 838 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, 839 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, 840 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, 841 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, 842 843 }; 844 845 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { 846 // These are somewhat magic numbers justified by looking at the output of 847 // Intel's IACA, running some kernels and making sure when we take 848 // legalization into account the throughput will be overestimated. 849 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 850 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 851 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 852 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 853 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, 854 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 855 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 856 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 857 858 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, 859 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, 860 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, 861 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, 862 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, 863 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, 864 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, 865 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, 866 867 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 }, 868 869 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, 870 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, 871 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, 872 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, 873 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, 874 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, 875 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, 876 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, 877 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 878 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, 879 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, 880 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, 881 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, 882 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, 883 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, 884 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, 885 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, 886 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, 887 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, 888 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, 889 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, 890 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, 891 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, 892 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, 893 894 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 }, 895 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, 896 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, 897 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, 898 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, 899 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, 900 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, 901 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, 902 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, 903 }; 904 905 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); 906 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); 907 908 if (ST->hasSSE2() && !ST->hasAVX()) { 909 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 910 LTDest.second, LTSrc.second)) 911 return LTSrc.first * Entry->Cost; 912 } 913 914 EVT SrcTy = TLI->getValueType(DL, Src); 915 EVT DstTy = TLI->getValueType(DL, Dst); 916 917 // The function getSimpleVT only handles simple value types. 918 if (!SrcTy.isSimple() || !DstTy.isSimple()) 919 return BaseT::getCastInstrCost(Opcode, Dst, Src); 920 921 if (ST->hasDQI()) 922 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, 923 DstTy.getSimpleVT(), 924 SrcTy.getSimpleVT())) 925 return Entry->Cost; 926 927 if (ST->hasAVX512()) 928 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, 929 DstTy.getSimpleVT(), 930 SrcTy.getSimpleVT())) 931 return Entry->Cost; 932 933 if (ST->hasAVX2()) { 934 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, 935 DstTy.getSimpleVT(), 936 SrcTy.getSimpleVT())) 937 return Entry->Cost; 938 } 939 940 if (ST->hasAVX()) { 941 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, 942 DstTy.getSimpleVT(), 943 SrcTy.getSimpleVT())) 944 return Entry->Cost; 945 } 946 947 if (ST->hasSSE41()) { 948 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, 949 DstTy.getSimpleVT(), 950 SrcTy.getSimpleVT())) 951 return Entry->Cost; 952 } 953 954 if (ST->hasSSE2()) { 955 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, 956 DstTy.getSimpleVT(), 957 SrcTy.getSimpleVT())) 958 return Entry->Cost; 959 } 960 961 return BaseT::getCastInstrCost(Opcode, Dst, Src); 962 } 963 964 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { 965 // Legalize the type. 966 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 967 968 MVT MTy = LT.second; 969 970 int ISD = TLI->InstructionOpcodeToISD(Opcode); 971 assert(ISD && "Invalid opcode"); 972 973 static const CostTblEntry SSE2CostTbl[] = { 974 { ISD::SETCC, MVT::v2i64, 8 }, 975 { ISD::SETCC, MVT::v4i32, 1 }, 976 { ISD::SETCC, MVT::v8i16, 1 }, 977 { ISD::SETCC, MVT::v16i8, 1 }, 978 }; 979 980 static const CostTblEntry SSE42CostTbl[] = { 981 { ISD::SETCC, MVT::v2f64, 1 }, 982 { ISD::SETCC, MVT::v4f32, 1 }, 983 { ISD::SETCC, MVT::v2i64, 1 }, 984 }; 985 986 static const CostTblEntry AVX1CostTbl[] = { 987 { ISD::SETCC, MVT::v4f64, 1 }, 988 { ISD::SETCC, MVT::v8f32, 1 }, 989 // AVX1 does not support 8-wide integer compare. 990 { ISD::SETCC, MVT::v4i64, 4 }, 991 { ISD::SETCC, MVT::v8i32, 4 }, 992 { ISD::SETCC, MVT::v16i16, 4 }, 993 { ISD::SETCC, MVT::v32i8, 4 }, 994 }; 995 996 static const CostTblEntry AVX2CostTbl[] = { 997 { ISD::SETCC, MVT::v4i64, 1 }, 998 { ISD::SETCC, MVT::v8i32, 1 }, 999 { ISD::SETCC, MVT::v16i16, 1 }, 1000 { ISD::SETCC, MVT::v32i8, 1 }, 1001 }; 1002 1003 static const CostTblEntry AVX512CostTbl[] = { 1004 { ISD::SETCC, MVT::v8i64, 1 }, 1005 { ISD::SETCC, MVT::v16i32, 1 }, 1006 { ISD::SETCC, MVT::v8f64, 1 }, 1007 { ISD::SETCC, MVT::v16f32, 1 }, 1008 }; 1009 1010 if (ST->hasAVX512()) 1011 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) 1012 return LT.first * Entry->Cost; 1013 1014 if (ST->hasAVX2()) 1015 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1016 return LT.first * Entry->Cost; 1017 1018 if (ST->hasAVX()) 1019 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1020 return LT.first * Entry->Cost; 1021 1022 if (ST->hasSSE42()) 1023 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) 1024 return LT.first * Entry->Cost; 1025 1026 if (ST->hasSSE2()) 1027 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1028 return LT.first * Entry->Cost; 1029 1030 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy); 1031 } 1032 1033 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1034 ArrayRef<Type *> Tys, FastMathFlags FMF) { 1035 // Costs should match the codegen from: 1036 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll 1037 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll 1038 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll 1039 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll 1040 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll 1041 static const CostTblEntry XOPCostTbl[] = { 1042 { ISD::BITREVERSE, MVT::v4i64, 4 }, 1043 { ISD::BITREVERSE, MVT::v8i32, 4 }, 1044 { ISD::BITREVERSE, MVT::v16i16, 4 }, 1045 { ISD::BITREVERSE, MVT::v32i8, 4 }, 1046 { ISD::BITREVERSE, MVT::v2i64, 1 }, 1047 { ISD::BITREVERSE, MVT::v4i32, 1 }, 1048 { ISD::BITREVERSE, MVT::v8i16, 1 }, 1049 { ISD::BITREVERSE, MVT::v16i8, 1 }, 1050 { ISD::BITREVERSE, MVT::i64, 3 }, 1051 { ISD::BITREVERSE, MVT::i32, 3 }, 1052 { ISD::BITREVERSE, MVT::i16, 3 }, 1053 { ISD::BITREVERSE, MVT::i8, 3 } 1054 }; 1055 static const CostTblEntry AVX2CostTbl[] = { 1056 { ISD::BITREVERSE, MVT::v4i64, 5 }, 1057 { ISD::BITREVERSE, MVT::v8i32, 5 }, 1058 { ISD::BITREVERSE, MVT::v16i16, 5 }, 1059 { ISD::BITREVERSE, MVT::v32i8, 5 }, 1060 { ISD::BSWAP, MVT::v4i64, 1 }, 1061 { ISD::BSWAP, MVT::v8i32, 1 }, 1062 { ISD::BSWAP, MVT::v16i16, 1 }, 1063 { ISD::CTLZ, MVT::v4i64, 23 }, 1064 { ISD::CTLZ, MVT::v8i32, 18 }, 1065 { ISD::CTLZ, MVT::v16i16, 14 }, 1066 { ISD::CTLZ, MVT::v32i8, 9 }, 1067 { ISD::CTPOP, MVT::v4i64, 7 }, 1068 { ISD::CTPOP, MVT::v8i32, 11 }, 1069 { ISD::CTPOP, MVT::v16i16, 9 }, 1070 { ISD::CTPOP, MVT::v32i8, 6 }, 1071 { ISD::CTTZ, MVT::v4i64, 10 }, 1072 { ISD::CTTZ, MVT::v8i32, 14 }, 1073 { ISD::CTTZ, MVT::v16i16, 12 }, 1074 { ISD::CTTZ, MVT::v32i8, 9 } 1075 }; 1076 static const CostTblEntry AVX1CostTbl[] = { 1077 { ISD::BITREVERSE, MVT::v4i64, 10 }, 1078 { ISD::BITREVERSE, MVT::v8i32, 10 }, 1079 { ISD::BITREVERSE, MVT::v16i16, 10 }, 1080 { ISD::BITREVERSE, MVT::v32i8, 10 }, 1081 { ISD::BSWAP, MVT::v4i64, 4 }, 1082 { ISD::BSWAP, MVT::v8i32, 4 }, 1083 { ISD::BSWAP, MVT::v16i16, 4 }, 1084 { ISD::CTLZ, MVT::v4i64, 46 }, 1085 { ISD::CTLZ, MVT::v8i32, 36 }, 1086 { ISD::CTLZ, MVT::v16i16, 28 }, 1087 { ISD::CTLZ, MVT::v32i8, 18 }, 1088 { ISD::CTPOP, MVT::v4i64, 14 }, 1089 { ISD::CTPOP, MVT::v8i32, 22 }, 1090 { ISD::CTPOP, MVT::v16i16, 18 }, 1091 { ISD::CTPOP, MVT::v32i8, 12 }, 1092 { ISD::CTTZ, MVT::v4i64, 20 }, 1093 { ISD::CTTZ, MVT::v8i32, 28 }, 1094 { ISD::CTTZ, MVT::v16i16, 24 }, 1095 { ISD::CTTZ, MVT::v32i8, 18 }, 1096 }; 1097 static const CostTblEntry SSSE3CostTbl[] = { 1098 { ISD::BITREVERSE, MVT::v2i64, 5 }, 1099 { ISD::BITREVERSE, MVT::v4i32, 5 }, 1100 { ISD::BITREVERSE, MVT::v8i16, 5 }, 1101 { ISD::BITREVERSE, MVT::v16i8, 5 }, 1102 { ISD::BSWAP, MVT::v2i64, 1 }, 1103 { ISD::BSWAP, MVT::v4i32, 1 }, 1104 { ISD::BSWAP, MVT::v8i16, 1 }, 1105 { ISD::CTLZ, MVT::v2i64, 23 }, 1106 { ISD::CTLZ, MVT::v4i32, 18 }, 1107 { ISD::CTLZ, MVT::v8i16, 14 }, 1108 { ISD::CTLZ, MVT::v16i8, 9 }, 1109 { ISD::CTPOP, MVT::v2i64, 7 }, 1110 { ISD::CTPOP, MVT::v4i32, 11 }, 1111 { ISD::CTPOP, MVT::v8i16, 9 }, 1112 { ISD::CTPOP, MVT::v16i8, 6 }, 1113 { ISD::CTTZ, MVT::v2i64, 10 }, 1114 { ISD::CTTZ, MVT::v4i32, 14 }, 1115 { ISD::CTTZ, MVT::v8i16, 12 }, 1116 { ISD::CTTZ, MVT::v16i8, 9 } 1117 }; 1118 static const CostTblEntry SSE2CostTbl[] = { 1119 { ISD::BSWAP, MVT::v2i64, 7 }, 1120 { ISD::BSWAP, MVT::v4i32, 7 }, 1121 { ISD::BSWAP, MVT::v8i16, 7 }, 1122 /* ISD::CTLZ - currently scalarized pre-SSSE3 */ 1123 { ISD::CTPOP, MVT::v2i64, 12 }, 1124 { ISD::CTPOP, MVT::v4i32, 15 }, 1125 { ISD::CTPOP, MVT::v8i16, 13 }, 1126 { ISD::CTPOP, MVT::v16i8, 10 }, 1127 { ISD::CTTZ, MVT::v2i64, 14 }, 1128 { ISD::CTTZ, MVT::v4i32, 18 }, 1129 { ISD::CTTZ, MVT::v8i16, 16 }, 1130 { ISD::CTTZ, MVT::v16i8, 13 } 1131 }; 1132 1133 unsigned ISD = ISD::DELETED_NODE; 1134 switch (IID) { 1135 default: 1136 break; 1137 case Intrinsic::bitreverse: 1138 ISD = ISD::BITREVERSE; 1139 break; 1140 case Intrinsic::bswap: 1141 ISD = ISD::BSWAP; 1142 break; 1143 case Intrinsic::ctlz: 1144 ISD = ISD::CTLZ; 1145 break; 1146 case Intrinsic::ctpop: 1147 ISD = ISD::CTPOP; 1148 break; 1149 case Intrinsic::cttz: 1150 ISD = ISD::CTTZ; 1151 break; 1152 } 1153 1154 // Legalize the type. 1155 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); 1156 MVT MTy = LT.second; 1157 1158 // Attempt to lookup cost. 1159 if (ST->hasXOP()) 1160 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) 1161 return LT.first * Entry->Cost; 1162 1163 if (ST->hasAVX2()) 1164 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) 1165 return LT.first * Entry->Cost; 1166 1167 if (ST->hasAVX()) 1168 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) 1169 return LT.first * Entry->Cost; 1170 1171 if (ST->hasSSSE3()) 1172 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) 1173 return LT.first * Entry->Cost; 1174 1175 if (ST->hasSSE2()) 1176 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) 1177 return LT.first * Entry->Cost; 1178 1179 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF); 1180 } 1181 1182 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, 1183 ArrayRef<Value *> Args, FastMathFlags FMF) { 1184 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF); 1185 } 1186 1187 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { 1188 assert(Val->isVectorTy() && "This must be a vector type"); 1189 1190 Type *ScalarType = Val->getScalarType(); 1191 1192 if (Index != -1U) { 1193 // Legalize the type. 1194 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); 1195 1196 // This type is legalized to a scalar type. 1197 if (!LT.second.isVector()) 1198 return 0; 1199 1200 // The type may be split. Normalize the index to the new type. 1201 unsigned Width = LT.second.getVectorNumElements(); 1202 Index = Index % Width; 1203 1204 // Floating point scalars are already located in index #0. 1205 if (ScalarType->isFloatingPointTy() && Index == 0) 1206 return 0; 1207 } 1208 1209 // Add to the base cost if we know that the extracted element of a vector is 1210 // destined to be moved to and used in the integer register file. 1211 int RegisterFileMoveCost = 0; 1212 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) 1213 RegisterFileMoveCost = 1; 1214 1215 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; 1216 } 1217 1218 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) { 1219 assert (Ty->isVectorTy() && "Can only scalarize vectors"); 1220 int Cost = 0; 1221 1222 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { 1223 if (Insert) 1224 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); 1225 if (Extract) 1226 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); 1227 } 1228 1229 return Cost; 1230 } 1231 1232 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment, 1233 unsigned AddressSpace) { 1234 // Handle non-power-of-two vectors such as <3 x float> 1235 if (VectorType *VTy = dyn_cast<VectorType>(Src)) { 1236 unsigned NumElem = VTy->getVectorNumElements(); 1237 1238 // Handle a few common cases: 1239 // <3 x float> 1240 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) 1241 // Cost = 64 bit store + extract + 32 bit store. 1242 return 3; 1243 1244 // <3 x double> 1245 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) 1246 // Cost = 128 bit store + unpack + 64 bit store. 1247 return 3; 1248 1249 // Assume that all other non-power-of-two numbers are scalarized. 1250 if (!isPowerOf2_32(NumElem)) { 1251 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment, 1252 AddressSpace); 1253 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load, 1254 Opcode == Instruction::Store); 1255 return NumElem * Cost + SplitCost; 1256 } 1257 } 1258 1259 // Legalize the type. 1260 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); 1261 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) && 1262 "Invalid Opcode"); 1263 1264 // Each load/store unit costs 1. 1265 int Cost = LT.first * 1; 1266 1267 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a 1268 // proxy for a double-pumped AVX memory interface such as on Sandybridge. 1269 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) 1270 Cost *= 2; 1271 1272 return Cost; 1273 } 1274 1275 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, 1276 unsigned Alignment, 1277 unsigned AddressSpace) { 1278 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy); 1279 if (!SrcVTy) 1280 // To calculate scalar take the regular cost, without mask 1281 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace); 1282 1283 unsigned NumElem = SrcVTy->getVectorNumElements(); 1284 VectorType *MaskTy = 1285 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); 1286 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) || 1287 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) || 1288 !isPowerOf2_32(NumElem)) { 1289 // Scalarization 1290 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true); 1291 int ScalarCompareCost = getCmpSelInstrCost( 1292 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr); 1293 int BranchCost = getCFInstrCost(Instruction::Br); 1294 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); 1295 1296 int ValueSplitCost = getScalarizationOverhead( 1297 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store); 1298 int MemopCost = 1299 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1300 Alignment, AddressSpace); 1301 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; 1302 } 1303 1304 // Legalize the type. 1305 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1306 auto VT = TLI->getValueType(DL, SrcVTy); 1307 int Cost = 0; 1308 if (VT.isSimple() && LT.second != VT.getSimpleVT() && 1309 LT.second.getVectorNumElements() == NumElem) 1310 // Promotion requires expand/truncate for data and a shuffle for mask. 1311 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) + 1312 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr); 1313 1314 else if (LT.second.getVectorNumElements() > NumElem) { 1315 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(), 1316 LT.second.getVectorNumElements()); 1317 // Expanding requires fill mask with zeroes 1318 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy); 1319 } 1320 if (!ST->hasAVX512()) 1321 return Cost + LT.first*4; // Each maskmov costs 4 1322 1323 // AVX-512 masked load/store is cheapper 1324 return Cost+LT.first; 1325 } 1326 1327 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) { 1328 // Address computations in vectorized code with non-consecutive addresses will 1329 // likely result in more instructions compared to scalar code where the 1330 // computation can more often be merged into the index mode. The resulting 1331 // extra micro-ops can significantly decrease throughput. 1332 unsigned NumVectorInstToHideOverhead = 10; 1333 1334 if (Ty->isVectorTy() && IsComplex) 1335 return NumVectorInstToHideOverhead; 1336 1337 return BaseT::getAddressComputationCost(Ty, IsComplex); 1338 } 1339 1340 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy, 1341 bool IsPairwise) { 1342 1343 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); 1344 1345 MVT MTy = LT.second; 1346 1347 int ISD = TLI->InstructionOpcodeToISD(Opcode); 1348 assert(ISD && "Invalid opcode"); 1349 1350 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput 1351 // and make it as the cost. 1352 1353 static const CostTblEntry SSE42CostTblPairWise[] = { 1354 { ISD::FADD, MVT::v2f64, 2 }, 1355 { ISD::FADD, MVT::v4f32, 4 }, 1356 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1357 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1358 { ISD::ADD, MVT::v8i16, 5 }, 1359 }; 1360 1361 static const CostTblEntry AVX1CostTblPairWise[] = { 1362 { ISD::FADD, MVT::v4f32, 4 }, 1363 { ISD::FADD, MVT::v4f64, 5 }, 1364 { ISD::FADD, MVT::v8f32, 7 }, 1365 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1366 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5". 1367 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8". 1368 { ISD::ADD, MVT::v8i16, 5 }, 1369 { ISD::ADD, MVT::v8i32, 5 }, 1370 }; 1371 1372 static const CostTblEntry SSE42CostTblNoPairWise[] = { 1373 { ISD::FADD, MVT::v2f64, 2 }, 1374 { ISD::FADD, MVT::v4f32, 4 }, 1375 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". 1376 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". 1377 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". 1378 }; 1379 1380 static const CostTblEntry AVX1CostTblNoPairWise[] = { 1381 { ISD::FADD, MVT::v4f32, 3 }, 1382 { ISD::FADD, MVT::v4f64, 3 }, 1383 { ISD::FADD, MVT::v8f32, 4 }, 1384 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". 1385 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8". 1386 { ISD::ADD, MVT::v4i64, 3 }, 1387 { ISD::ADD, MVT::v8i16, 4 }, 1388 { ISD::ADD, MVT::v8i32, 5 }, 1389 }; 1390 1391 if (IsPairwise) { 1392 if (ST->hasAVX()) 1393 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy)) 1394 return LT.first * Entry->Cost; 1395 1396 if (ST->hasSSE42()) 1397 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy)) 1398 return LT.first * Entry->Cost; 1399 } else { 1400 if (ST->hasAVX()) 1401 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) 1402 return LT.first * Entry->Cost; 1403 1404 if (ST->hasSSE42()) 1405 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy)) 1406 return LT.first * Entry->Cost; 1407 } 1408 1409 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise); 1410 } 1411 1412 /// \brief Calculate the cost of materializing a 64-bit value. This helper 1413 /// method might only calculate a fraction of a larger immediate. Therefore it 1414 /// is valid to return a cost of ZERO. 1415 int X86TTIImpl::getIntImmCost(int64_t Val) { 1416 if (Val == 0) 1417 return TTI::TCC_Free; 1418 1419 if (isInt<32>(Val)) 1420 return TTI::TCC_Basic; 1421 1422 return 2 * TTI::TCC_Basic; 1423 } 1424 1425 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) { 1426 assert(Ty->isIntegerTy()); 1427 1428 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1429 if (BitSize == 0) 1430 return ~0U; 1431 1432 // Never hoist constants larger than 128bit, because this might lead to 1433 // incorrect code generation or assertions in codegen. 1434 // Fixme: Create a cost model for types larger than i128 once the codegen 1435 // issues have been fixed. 1436 if (BitSize > 128) 1437 return TTI::TCC_Free; 1438 1439 if (Imm == 0) 1440 return TTI::TCC_Free; 1441 1442 // Sign-extend all constants to a multiple of 64-bit. 1443 APInt ImmVal = Imm; 1444 if (BitSize & 0x3f) 1445 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU); 1446 1447 // Split the constant into 64-bit chunks and calculate the cost for each 1448 // chunk. 1449 int Cost = 0; 1450 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { 1451 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); 1452 int64_t Val = Tmp.getSExtValue(); 1453 Cost += getIntImmCost(Val); 1454 } 1455 // We need at least one instruction to materialize the constant. 1456 return std::max(1, Cost); 1457 } 1458 1459 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, 1460 Type *Ty) { 1461 assert(Ty->isIntegerTy()); 1462 1463 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1464 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1465 // here, so that constant hoisting will ignore this constant. 1466 if (BitSize == 0) 1467 return TTI::TCC_Free; 1468 1469 unsigned ImmIdx = ~0U; 1470 switch (Opcode) { 1471 default: 1472 return TTI::TCC_Free; 1473 case Instruction::GetElementPtr: 1474 // Always hoist the base address of a GetElementPtr. This prevents the 1475 // creation of new constants for every base constant that gets constant 1476 // folded with the offset. 1477 if (Idx == 0) 1478 return 2 * TTI::TCC_Basic; 1479 return TTI::TCC_Free; 1480 case Instruction::Store: 1481 ImmIdx = 0; 1482 break; 1483 case Instruction::ICmp: 1484 // This is an imperfect hack to prevent constant hoisting of 1485 // compares that might be trying to check if a 64-bit value fits in 1486 // 32-bits. The backend can optimize these cases using a right shift by 32. 1487 // Ideally we would check the compare predicate here. There also other 1488 // similar immediates the backend can use shifts for. 1489 if (Idx == 1 && Imm.getBitWidth() == 64) { 1490 uint64_t ImmVal = Imm.getZExtValue(); 1491 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) 1492 return TTI::TCC_Free; 1493 } 1494 ImmIdx = 1; 1495 break; 1496 case Instruction::And: 1497 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes 1498 // by using a 32-bit operation with implicit zero extension. Detect such 1499 // immediates here as the normal path expects bit 31 to be sign extended. 1500 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) 1501 return TTI::TCC_Free; 1502 LLVM_FALLTHROUGH; 1503 case Instruction::Add: 1504 case Instruction::Sub: 1505 case Instruction::Mul: 1506 case Instruction::UDiv: 1507 case Instruction::SDiv: 1508 case Instruction::URem: 1509 case Instruction::SRem: 1510 case Instruction::Or: 1511 case Instruction::Xor: 1512 ImmIdx = 1; 1513 break; 1514 // Always return TCC_Free for the shift value of a shift instruction. 1515 case Instruction::Shl: 1516 case Instruction::LShr: 1517 case Instruction::AShr: 1518 if (Idx == 1) 1519 return TTI::TCC_Free; 1520 break; 1521 case Instruction::Trunc: 1522 case Instruction::ZExt: 1523 case Instruction::SExt: 1524 case Instruction::IntToPtr: 1525 case Instruction::PtrToInt: 1526 case Instruction::BitCast: 1527 case Instruction::PHI: 1528 case Instruction::Call: 1529 case Instruction::Select: 1530 case Instruction::Ret: 1531 case Instruction::Load: 1532 break; 1533 } 1534 1535 if (Idx == ImmIdx) { 1536 int NumConstants = (BitSize + 63) / 64; 1537 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty); 1538 return (Cost <= NumConstants * TTI::TCC_Basic) 1539 ? static_cast<int>(TTI::TCC_Free) 1540 : Cost; 1541 } 1542 1543 return X86TTIImpl::getIntImmCost(Imm, Ty); 1544 } 1545 1546 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm, 1547 Type *Ty) { 1548 assert(Ty->isIntegerTy()); 1549 1550 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 1551 // There is no cost model for constants with a bit size of 0. Return TCC_Free 1552 // here, so that constant hoisting will ignore this constant. 1553 if (BitSize == 0) 1554 return TTI::TCC_Free; 1555 1556 switch (IID) { 1557 default: 1558 return TTI::TCC_Free; 1559 case Intrinsic::sadd_with_overflow: 1560 case Intrinsic::uadd_with_overflow: 1561 case Intrinsic::ssub_with_overflow: 1562 case Intrinsic::usub_with_overflow: 1563 case Intrinsic::smul_with_overflow: 1564 case Intrinsic::umul_with_overflow: 1565 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) 1566 return TTI::TCC_Free; 1567 break; 1568 case Intrinsic::experimental_stackmap: 1569 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1570 return TTI::TCC_Free; 1571 break; 1572 case Intrinsic::experimental_patchpoint_void: 1573 case Intrinsic::experimental_patchpoint_i64: 1574 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) 1575 return TTI::TCC_Free; 1576 break; 1577 } 1578 return X86TTIImpl::getIntImmCost(Imm, Ty); 1579 } 1580 1581 // Return an average cost of Gather / Scatter instruction, maybe improved later 1582 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr, 1583 unsigned Alignment, unsigned AddressSpace) { 1584 1585 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost"); 1586 unsigned VF = SrcVTy->getVectorNumElements(); 1587 1588 // Try to reduce index size from 64 bit (default for GEP) 1589 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the 1590 // operation will use 16 x 64 indices which do not fit in a zmm and needs 1591 // to split. Also check that the base pointer is the same for all lanes, 1592 // and that there's at most one variable index. 1593 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) { 1594 unsigned IndexSize = DL.getPointerSizeInBits(); 1595 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 1596 if (IndexSize < 64 || !GEP) 1597 return IndexSize; 1598 1599 unsigned NumOfVarIndices = 0; 1600 Value *Ptrs = GEP->getPointerOperand(); 1601 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) 1602 return IndexSize; 1603 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { 1604 if (isa<Constant>(GEP->getOperand(i))) 1605 continue; 1606 Type *IndxTy = GEP->getOperand(i)->getType(); 1607 if (IndxTy->isVectorTy()) 1608 IndxTy = IndxTy->getVectorElementType(); 1609 if ((IndxTy->getPrimitiveSizeInBits() == 64 && 1610 !isa<SExtInst>(GEP->getOperand(i))) || 1611 ++NumOfVarIndices > 1) 1612 return IndexSize; // 64 1613 } 1614 return (unsigned)32; 1615 }; 1616 1617 1618 // Trying to reduce IndexSize to 32 bits for vector 16. 1619 // By default the IndexSize is equal to pointer size. 1620 unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) : 1621 DL.getPointerSizeInBits(); 1622 1623 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(), 1624 IndexSize), VF); 1625 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); 1626 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); 1627 int SplitFactor = std::max(IdxsLT.first, SrcLT.first); 1628 if (SplitFactor > 1) { 1629 // Handle splitting of vector of pointers 1630 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); 1631 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, 1632 AddressSpace); 1633 } 1634 1635 // The gather / scatter cost is given by Intel architects. It is a rough 1636 // number since we are looking at one instruction in a time. 1637 const int GSOverhead = 2; 1638 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1639 Alignment, AddressSpace); 1640 } 1641 1642 /// Return the cost of full scalarization of gather / scatter operation. 1643 /// 1644 /// Opcode - Load or Store instruction. 1645 /// SrcVTy - The type of the data vector that should be gathered or scattered. 1646 /// VariableMask - The mask is non-constant at compile time. 1647 /// Alignment - Alignment for one element. 1648 /// AddressSpace - pointer[s] address space. 1649 /// 1650 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, 1651 bool VariableMask, unsigned Alignment, 1652 unsigned AddressSpace) { 1653 unsigned VF = SrcVTy->getVectorNumElements(); 1654 1655 int MaskUnpackCost = 0; 1656 if (VariableMask) { 1657 VectorType *MaskTy = 1658 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); 1659 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true); 1660 int ScalarCompareCost = 1661 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), 1662 nullptr); 1663 int BranchCost = getCFInstrCost(Instruction::Br); 1664 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); 1665 } 1666 1667 // The cost of the scalar loads/stores. 1668 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), 1669 Alignment, AddressSpace); 1670 1671 int InsertExtractCost = 0; 1672 if (Opcode == Instruction::Load) 1673 for (unsigned i = 0; i < VF; ++i) 1674 // Add the cost of inserting each scalar load into the vector 1675 InsertExtractCost += 1676 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); 1677 else 1678 for (unsigned i = 0; i < VF; ++i) 1679 // Add the cost of extracting each element out of the data vector 1680 InsertExtractCost += 1681 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); 1682 1683 return MemoryOpCost + MaskUnpackCost + InsertExtractCost; 1684 } 1685 1686 /// Calculate the cost of Gather / Scatter operation 1687 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy, 1688 Value *Ptr, bool VariableMask, 1689 unsigned Alignment) { 1690 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter"); 1691 unsigned VF = SrcVTy->getVectorNumElements(); 1692 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); 1693 if (!PtrTy && Ptr->getType()->isVectorTy()) 1694 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType()); 1695 assert(PtrTy && "Unexpected type for Ptr argument"); 1696 unsigned AddressSpace = PtrTy->getAddressSpace(); 1697 1698 bool Scalarize = false; 1699 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) || 1700 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy))) 1701 Scalarize = true; 1702 // Gather / Scatter for vector 2 is not profitable on KNL / SKX 1703 // Vector-4 of gather/scatter instruction does not exist on KNL. 1704 // We can extend it to 8 elements, but zeroing upper bits of 1705 // the mask vector will add more instructions. Right now we give the scalar 1706 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction is 1707 // better in the VariableMask case. 1708 if (VF == 2 || (VF == 4 && !ST->hasVLX())) 1709 Scalarize = true; 1710 1711 if (Scalarize) 1712 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, AddressSpace); 1713 1714 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); 1715 } 1716 1717 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) { 1718 Type *ScalarTy = DataTy->getScalarType(); 1719 int DataWidth = isa<PointerType>(ScalarTy) ? 1720 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 1721 1722 return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) || 1723 ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI()); 1724 } 1725 1726 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) { 1727 return isLegalMaskedLoad(DataType); 1728 } 1729 1730 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) { 1731 // This function is called now in two cases: from the Loop Vectorizer 1732 // and from the Scalarizer. 1733 // When the Loop Vectorizer asks about legality of the feature, 1734 // the vectorization factor is not calculated yet. The Loop Vectorizer 1735 // sends a scalar type and the decision is based on the width of the 1736 // scalar element. 1737 // Later on, the cost model will estimate usage this intrinsic based on 1738 // the vector type. 1739 // The Scalarizer asks again about legality. It sends a vector type. 1740 // In this case we can reject non-power-of-2 vectors. 1741 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements())) 1742 return false; 1743 Type *ScalarTy = DataTy->getScalarType(); 1744 int DataWidth = isa<PointerType>(ScalarTy) ? 1745 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits(); 1746 1747 // AVX-512 allows gather and scatter 1748 return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512(); 1749 } 1750 1751 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) { 1752 return isLegalMaskedGather(DataType); 1753 } 1754 1755 bool X86TTIImpl::areInlineCompatible(const Function *Caller, 1756 const Function *Callee) const { 1757 const TargetMachine &TM = getTLI()->getTargetMachine(); 1758 1759 // Work this as a subsetting of subtarget features. 1760 const FeatureBitset &CallerBits = 1761 TM.getSubtargetImpl(*Caller)->getFeatureBits(); 1762 const FeatureBitset &CalleeBits = 1763 TM.getSubtargetImpl(*Callee)->getFeatureBits(); 1764 1765 // FIXME: This is likely too limiting as it will include subtarget features 1766 // that we might not care about for inlining, but it is conservatively 1767 // correct. 1768 return (CallerBits & CalleeBits) == CalleeBits; 1769 } 1770 1771 bool X86TTIImpl::enableInterleavedAccessVectorization() { 1772 // TODO: We expect this to be beneficial regardless of arch, 1773 // but there are currently some unexplained performance artifacts on Atom. 1774 // As a temporary solution, disable on Atom. 1775 return !(ST->isAtom() || ST->isSLM()); 1776 } 1777