1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
14 ///
15 //===----------------------------------------------------------------------===//
16 /// About Cost Model numbers used below it's necessary to say the following:
17 /// the numbers correspond to some "generic" X86 CPU instead of usage of
18 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
19 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
20 /// the lookups below the cost is based on Nehalem as that was the first CPU
21 /// to support that feature level and thus has most likely the worst case cost.
22 /// Some examples of other technologies/CPUs:
23 ///   SSE 3   - Pentium4 / Athlon64
24 ///   SSE 4.1 - Penryn
25 ///   SSE 4.2 - Nehalem
26 ///   AVX     - Sandy Bridge
27 ///   AVX2    - Haswell
28 ///   AVX-512 - Xeon Phi / Skylake
29 /// And some examples of instruction target dependent costs (latency)
30 ///                   divss     sqrtss          rsqrtss
31 ///   AMD K7            11-16     19              3
32 ///   Piledriver        9-24      13-15           5
33 ///   Jaguar            14        16              2
34 ///   Pentium II,III    18        30              2
35 ///   Nehalem           7-14      7-18            3
36 ///   Haswell           10-13     11              5
37 /// TODO: Develop and implement  the target dependent cost model and
38 /// specialize cost numbers for different Cost Model Targets such as throughput,
39 /// code size, latency and uop count.
40 //===----------------------------------------------------------------------===//
41 
42 #include "X86TargetTransformInfo.h"
43 #include "llvm/Analysis/TargetTransformInfo.h"
44 #include "llvm/CodeGen/BasicTTIImpl.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Target/CostTable.h"
48 #include "llvm/Target/TargetLowering.h"
49 
50 using namespace llvm;
51 
52 #define DEBUG_TYPE "x86tti"
53 
54 //===----------------------------------------------------------------------===//
55 //
56 // X86 cost model.
57 //
58 //===----------------------------------------------------------------------===//
59 
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63   // TODO: Currently the __builtin_popcount() implementation using SSE3
64   //   instructions is inefficient. Once the problem is fixed, we should
65   //   call ST->hasSSE3() instead of ST->hasPOPCNT().
66   return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
67 }
68 
69 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
70   if (Vector && !ST->hasSSE1())
71     return 0;
72 
73   if (ST->is64Bit()) {
74     if (Vector && ST->hasAVX512())
75       return 32;
76     return 16;
77   }
78   return 8;
79 }
80 
81 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
82   if (Vector) {
83     if (ST->hasAVX512()) return 512;
84     if (ST->hasAVX()) return 256;
85     if (ST->hasSSE1()) return 128;
86     return 0;
87   }
88 
89   if (ST->is64Bit())
90     return 64;
91 
92   return 32;
93 }
94 
95 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
96   // If the loop will not be vectorized, don't interleave the loop.
97   // Let regular unroll to unroll the loop, which saves the overflow
98   // check and memory check cost.
99   if (VF == 1)
100     return 1;
101 
102   if (ST->isAtom())
103     return 1;
104 
105   // Sandybridge and Haswell have multiple execution ports and pipelined
106   // vector units.
107   if (ST->hasAVX())
108     return 4;
109 
110   return 2;
111 }
112 
113 int X86TTIImpl::getArithmeticInstrCost(
114     unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
115     TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
116     TTI::OperandValueProperties Opd2PropInfo) {
117   // Legalize the type.
118   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
119 
120   int ISD = TLI->InstructionOpcodeToISD(Opcode);
121   assert(ISD && "Invalid opcode");
122 
123   if (ISD == ISD::SDIV &&
124       Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
125       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
126     // On X86, vector signed division by constants power-of-two are
127     // normally expanded to the sequence SRA + SRL + ADD + SRA.
128     // The OperandValue properties many not be same as that of previous
129     // operation;conservatively assume OP_None.
130     int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
131                                           Op2Info, TargetTransformInfo::OP_None,
132                                           TargetTransformInfo::OP_None);
133     Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
134                                    TargetTransformInfo::OP_None,
135                                    TargetTransformInfo::OP_None);
136     Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
137                                    TargetTransformInfo::OP_None,
138                                    TargetTransformInfo::OP_None);
139 
140     return Cost;
141   }
142 
143   static const CostTblEntry AVX2UniformConstCostTable[] = {
144     { ISD::SRA,  MVT::v4i64,   4 }, // 2 x psrad + shuffle.
145 
146     { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
147     { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
148     { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
149     { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
150   };
151 
152   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
153       ST->hasAVX2()) {
154     if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
155                                             LT.second))
156       return LT.first * Entry->Cost;
157   }
158 
159   static const CostTblEntry AVX512CostTable[] = {
160     { ISD::SHL,     MVT::v16i32,    1 },
161     { ISD::SRL,     MVT::v16i32,    1 },
162     { ISD::SRA,     MVT::v16i32,    1 },
163     { ISD::SHL,     MVT::v8i64,    1 },
164     { ISD::SRL,     MVT::v8i64,    1 },
165     { ISD::SRA,     MVT::v8i64,    1 },
166   };
167 
168   if (ST->hasAVX512()) {
169     if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
170       return LT.first * Entry->Cost;
171   }
172 
173   static const CostTblEntry AVX2CostTable[] = {
174     // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
175     // customize them to detect the cases where shift amount is a scalar one.
176     { ISD::SHL,     MVT::v4i32,    1 },
177     { ISD::SRL,     MVT::v4i32,    1 },
178     { ISD::SRA,     MVT::v4i32,    1 },
179     { ISD::SHL,     MVT::v8i32,    1 },
180     { ISD::SRL,     MVT::v8i32,    1 },
181     { ISD::SRA,     MVT::v8i32,    1 },
182     { ISD::SHL,     MVT::v2i64,    1 },
183     { ISD::SRL,     MVT::v2i64,    1 },
184     { ISD::SHL,     MVT::v4i64,    1 },
185     { ISD::SRL,     MVT::v4i64,    1 },
186   };
187 
188   // Look for AVX2 lowering tricks.
189   if (ST->hasAVX2()) {
190     if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
191         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
192          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
193       // On AVX2, a packed v16i16 shift left by a constant build_vector
194       // is lowered into a vector multiply (vpmullw).
195       return LT.first;
196 
197     if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
198       return LT.first * Entry->Cost;
199   }
200 
201   static const CostTblEntry XOPCostTable[] = {
202     // 128bit shifts take 1cy, but right shifts require negation beforehand.
203     { ISD::SHL,     MVT::v16i8,    1 },
204     { ISD::SRL,     MVT::v16i8,    2 },
205     { ISD::SRA,     MVT::v16i8,    2 },
206     { ISD::SHL,     MVT::v8i16,    1 },
207     { ISD::SRL,     MVT::v8i16,    2 },
208     { ISD::SRA,     MVT::v8i16,    2 },
209     { ISD::SHL,     MVT::v4i32,    1 },
210     { ISD::SRL,     MVT::v4i32,    2 },
211     { ISD::SRA,     MVT::v4i32,    2 },
212     { ISD::SHL,     MVT::v2i64,    1 },
213     { ISD::SRL,     MVT::v2i64,    2 },
214     { ISD::SRA,     MVT::v2i64,    2 },
215     // 256bit shifts require splitting if AVX2 didn't catch them above.
216     { ISD::SHL,     MVT::v32i8,    2 },
217     { ISD::SRL,     MVT::v32i8,    4 },
218     { ISD::SRA,     MVT::v32i8,    4 },
219     { ISD::SHL,     MVT::v16i16,   2 },
220     { ISD::SRL,     MVT::v16i16,   4 },
221     { ISD::SRA,     MVT::v16i16,   4 },
222     { ISD::SHL,     MVT::v8i32,    2 },
223     { ISD::SRL,     MVT::v8i32,    4 },
224     { ISD::SRA,     MVT::v8i32,    4 },
225     { ISD::SHL,     MVT::v4i64,    2 },
226     { ISD::SRL,     MVT::v4i64,    4 },
227     { ISD::SRA,     MVT::v4i64,    4 },
228   };
229 
230   // Look for XOP lowering tricks.
231   if (ST->hasXOP()) {
232     if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second))
233       return LT.first * Entry->Cost;
234   }
235 
236   static const CostTblEntry AVX2CustomCostTable[] = {
237     { ISD::SHL,  MVT::v32i8,      11 }, // vpblendvb sequence.
238     { ISD::SHL,  MVT::v16i16,     10 }, // extend/vpsrlvd/pack sequence.
239 
240     { ISD::SRL,  MVT::v32i8,      11 }, // vpblendvb sequence.
241     { ISD::SRL,  MVT::v16i16,     10 }, // extend/vpsrlvd/pack sequence.
242 
243     { ISD::SRA,  MVT::v32i8,      24 }, // vpblendvb sequence.
244     { ISD::SRA,  MVT::v16i16,     10 }, // extend/vpsravd/pack sequence.
245     { ISD::SRA,  MVT::v2i64,       4 }, // srl/xor/sub sequence.
246     { ISD::SRA,  MVT::v4i64,       4 }, // srl/xor/sub sequence.
247 
248     // Vectorizing division is a bad idea. See the SSE2 table for more comments.
249     { ISD::SDIV,  MVT::v32i8,  32*20 },
250     { ISD::SDIV,  MVT::v16i16, 16*20 },
251     { ISD::SDIV,  MVT::v8i32,  8*20 },
252     { ISD::SDIV,  MVT::v4i64,  4*20 },
253     { ISD::UDIV,  MVT::v32i8,  32*20 },
254     { ISD::UDIV,  MVT::v16i16, 16*20 },
255     { ISD::UDIV,  MVT::v8i32,  8*20 },
256     { ISD::UDIV,  MVT::v4i64,  4*20 },
257   };
258 
259   // Look for AVX2 lowering tricks for custom cases.
260   if (ST->hasAVX2()) {
261     if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD,
262                                             LT.second))
263       return LT.first * Entry->Cost;
264   }
265 
266   static const CostTblEntry
267   SSE2UniformConstCostTable[] = {
268     // Constant splats are cheaper for the following instructions.
269     { ISD::SDIV, MVT::v8i16,  6 }, // pmulhw sequence
270     { ISD::UDIV, MVT::v8i16,  6 }, // pmulhuw sequence
271     { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
272     { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
273   };
274 
275   static const CostTblEntry
276   SSE2UniformCostTable[] = {
277     // Uniform splats are cheaper for the following instructions.
278     { ISD::SHL,  MVT::v16i8,  1 }, // psllw.
279     { ISD::SHL,  MVT::v32i8,  2 }, // psllw.
280     { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
281     { ISD::SHL,  MVT::v16i16, 2 }, // psllw.
282     { ISD::SHL,  MVT::v4i32,  1 }, // pslld
283     { ISD::SHL,  MVT::v8i32,  2 }, // pslld
284     { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
285     { ISD::SHL,  MVT::v4i64,  2 }, // psllq.
286 
287     { ISD::SRL,  MVT::v16i8,  1 }, // psrlw.
288     { ISD::SRL,  MVT::v32i8,  2 }, // psrlw.
289     { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
290     { ISD::SRL,  MVT::v16i16, 2 }, // psrlw.
291     { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
292     { ISD::SRL,  MVT::v8i32,  2 }, // psrld.
293     { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
294     { ISD::SRL,  MVT::v4i64,  2 }, // psrlq.
295 
296     { ISD::SRA,  MVT::v16i8,  4 }, // psrlw, pand, pxor, psubb.
297     { ISD::SRA,  MVT::v32i8,  8 }, // psrlw, pand, pxor, psubb.
298     { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
299     { ISD::SRA,  MVT::v16i16, 2 }, // psraw.
300     { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
301     { ISD::SRA,  MVT::v8i32,  2 }, // psrad.
302     { ISD::SRA,  MVT::v2i64,  4 }, // 2 x psrad + shuffle.
303     { ISD::SRA,  MVT::v4i64,  8 }, // 2 x psrad + shuffle.
304   };
305 
306   if (ST->hasSSE2() &&
307       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
308        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
309     if (Op2Info == TargetTransformInfo::OK_UniformConstantValue) {
310       // pmuldq sequence.
311       if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
312         return LT.first * 15;
313       if (const auto *Entry =
314               CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
315         return LT.first * Entry->Cost;
316     }
317     if (const auto *Entry =
318             CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
319       return LT.first * Entry->Cost;
320   }
321 
322   if (ISD == ISD::SHL &&
323       Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
324     MVT VT = LT.second;
325     // Vector shift left by non uniform constant can be lowered
326     // into vector multiply (pmullw/pmulld).
327     if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
328         (VT == MVT::v4i32 && ST->hasSSE41()))
329       return LT.first;
330 
331     // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
332     // sequence of extract + two vector multiply + insert.
333     if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
334        (ST->hasAVX() && !ST->hasAVX2()))
335       ISD = ISD::MUL;
336 
337     // A vector shift left by non uniform constant is converted
338     // into a vector multiply; the new multiply is eventually
339     // lowered into a sequence of shuffles and 2 x pmuludq.
340     if (VT == MVT::v4i32 && ST->hasSSE2())
341       ISD = ISD::MUL;
342   }
343 
344   static const CostTblEntry SSE2CostTable[] = {
345     // We don't correctly identify costs of casts because they are marked as
346     // custom.
347     { ISD::SHL,  MVT::v16i8,    26 }, // cmpgtb sequence.
348     { ISD::SHL,  MVT::v32i8,  2*26 }, // cmpgtb sequence.
349     { ISD::SHL,  MVT::v8i16,    32 }, // cmpgtb sequence.
350     { ISD::SHL,  MVT::v16i16, 2*32 }, // cmpgtb sequence.
351     { ISD::SHL,  MVT::v4i32,   2*5 }, // We optimized this using mul.
352     { ISD::SHL,  MVT::v8i32, 2*2*5 }, // We optimized this using mul.
353     { ISD::SHL,  MVT::v2i64,     4 }, // splat+shuffle sequence.
354     { ISD::SHL,  MVT::v4i64,   2*4 }, // splat+shuffle sequence.
355 
356     { ISD::SRL,  MVT::v16i8,    26 }, // cmpgtb sequence.
357     { ISD::SRL,  MVT::v32i8,  2*26 }, // cmpgtb sequence.
358     { ISD::SRL,  MVT::v8i16,    32 }, // cmpgtb sequence.
359     { ISD::SRL,  MVT::v16i16, 2*32 }, // cmpgtb sequence.
360     { ISD::SRL,  MVT::v4i32,    16 }, // Shift each lane + blend.
361     { ISD::SRL,  MVT::v8i32,  2*16 }, // Shift each lane + blend.
362     { ISD::SRL,  MVT::v2i64,     4 }, // splat+shuffle sequence.
363     { ISD::SRL,  MVT::v4i64,   2*4 }, // splat+shuffle sequence.
364 
365     { ISD::SRA,  MVT::v16i8,    54 }, // unpacked cmpgtb sequence.
366     { ISD::SRA,  MVT::v32i8,  2*54 }, // unpacked cmpgtb sequence.
367     { ISD::SRA,  MVT::v8i16,    32 }, // cmpgtb sequence.
368     { ISD::SRA,  MVT::v16i16, 2*32 }, // cmpgtb sequence.
369     { ISD::SRA,  MVT::v4i32,    16 }, // Shift each lane + blend.
370     { ISD::SRA,  MVT::v8i32,  2*16 }, // Shift each lane + blend.
371     { ISD::SRA,  MVT::v2i64,    12 }, // srl/xor/sub sequence.
372     { ISD::SRA,  MVT::v4i64,  2*12 }, // srl/xor/sub sequence.
373 
374     // It is not a good idea to vectorize division. We have to scalarize it and
375     // in the process we will often end up having to spilling regular
376     // registers. The overhead of division is going to dominate most kernels
377     // anyways so try hard to prevent vectorization of division - it is
378     // generally a bad idea. Assume somewhat arbitrarily that we have to be able
379     // to hide "20 cycles" for each lane.
380     { ISD::SDIV,  MVT::v16i8,  16*20 },
381     { ISD::SDIV,  MVT::v8i16,  8*20 },
382     { ISD::SDIV,  MVT::v4i32,  4*20 },
383     { ISD::SDIV,  MVT::v2i64,  2*20 },
384     { ISD::UDIV,  MVT::v16i8,  16*20 },
385     { ISD::UDIV,  MVT::v8i16,  8*20 },
386     { ISD::UDIV,  MVT::v4i32,  4*20 },
387     { ISD::UDIV,  MVT::v2i64,  2*20 },
388   };
389 
390   if (ST->hasSSE2()) {
391     if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
392       return LT.first * Entry->Cost;
393   }
394 
395   static const CostTblEntry AVX1CostTable[] = {
396     // We don't have to scalarize unsupported ops. We can issue two half-sized
397     // operations and we only need to extract the upper YMM half.
398     // Two ops + 1 extract + 1 insert = 4.
399     { ISD::MUL,     MVT::v16i16,   4 },
400     { ISD::MUL,     MVT::v8i32,    4 },
401     { ISD::SUB,     MVT::v8i32,    4 },
402     { ISD::ADD,     MVT::v8i32,    4 },
403     { ISD::SUB,     MVT::v4i64,    4 },
404     { ISD::ADD,     MVT::v4i64,    4 },
405     // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
406     // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
407     // Because we believe v4i64 to be a legal type, we must also include the
408     // split factor of two in the cost table. Therefore, the cost here is 18
409     // instead of 9.
410     { ISD::MUL,     MVT::v4i64,    18 },
411   };
412 
413   // Look for AVX1 lowering tricks.
414   if (ST->hasAVX() && !ST->hasAVX2()) {
415     MVT VT = LT.second;
416 
417     if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT))
418       return LT.first * Entry->Cost;
419   }
420 
421   // Custom lowering of vectors.
422   static const CostTblEntry CustomLowered[] = {
423     // A v2i64/v4i64 and multiply is custom lowered as a series of long
424     // multiplies(3), shifts(4) and adds(2).
425     { ISD::MUL,     MVT::v2i64,    9 },
426     { ISD::MUL,     MVT::v4i64,    9 },
427   };
428   if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second))
429     return LT.first * Entry->Cost;
430 
431   // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
432   // 2x pmuludq, 2x shuffle.
433   if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
434       !ST->hasSSE41())
435     return LT.first * 6;
436 
437   // Fallback to the default implementation.
438   return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
439 }
440 
441 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
442                                Type *SubTp) {
443   // We only estimate the cost of reverse and alternate shuffles.
444   if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
445     return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
446 
447   if (Kind == TTI::SK_Reverse) {
448     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
449     int Cost = 1;
450     if (LT.second.getSizeInBits() > 128)
451       Cost = 3; // Extract + insert + copy.
452 
453     // Multiple by the number of parts.
454     return Cost * LT.first;
455   }
456 
457   if (Kind == TTI::SK_Alternate) {
458     // 64-bit packed float vectors (v2f32) are widened to type v4f32.
459     // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
460     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
461 
462     // The backend knows how to generate a single VEX.256 version of
463     // instruction VPBLENDW if the target supports AVX2.
464     if (ST->hasAVX2() && LT.second == MVT::v16i16)
465       return LT.first;
466 
467     static const CostTblEntry AVXAltShuffleTbl[] = {
468       {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1},  // vblendpd
469       {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1},  // vblendpd
470 
471       {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1},  // vblendps
472       {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1},  // vblendps
473 
474       // This shuffle is custom lowered into a sequence of:
475       //  2x  vextractf128 , 2x vpblendw , 1x vinsertf128
476       {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
477 
478       // This shuffle is custom lowered into a long sequence of:
479       //  2x vextractf128 , 4x vpshufb , 2x vpor ,  1x vinsertf128
480       {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
481     };
482 
483     if (ST->hasAVX())
484       if (const auto *Entry = CostTableLookup(AVXAltShuffleTbl,
485                                               ISD::VECTOR_SHUFFLE, LT.second))
486         return LT.first * Entry->Cost;
487 
488     static const CostTblEntry SSE41AltShuffleTbl[] = {
489       // These are lowered into movsd.
490       {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
491       {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
492 
493       // packed float vectors with four elements are lowered into BLENDI dag
494       // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
495       {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
496       {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
497 
498       // This shuffle generates a single pshufw.
499       {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
500 
501       // There is no instruction that matches a v16i8 alternate shuffle.
502       // The backend will expand it into the sequence 'pshufb + pshufb + or'.
503       {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
504     };
505 
506     if (ST->hasSSE41())
507       if (const auto *Entry = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE,
508                                               LT.second))
509         return LT.first * Entry->Cost;
510 
511     static const CostTblEntry SSSE3AltShuffleTbl[] = {
512       {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},  // movsd
513       {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},  // movsd
514 
515       // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
516       // the sequence 'shufps + pshufd'
517       {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
518       {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
519 
520       {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
521       {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}  // pshufb + pshufb + or
522     };
523 
524     if (ST->hasSSSE3())
525       if (const auto *Entry = CostTableLookup(SSSE3AltShuffleTbl,
526                                               ISD::VECTOR_SHUFFLE, LT.second))
527         return LT.first * Entry->Cost;
528 
529     static const CostTblEntry SSEAltShuffleTbl[] = {
530       {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},  // movsd
531       {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},  // movsd
532 
533       {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
534       {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
535 
536       // This is expanded into a long sequence of four extract + four insert.
537       {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
538 
539       // 8 x (pinsrw + pextrw + and + movb + movzb + or)
540       {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
541     };
542 
543     // Fall-back (SSE3 and SSE2).
544     if (const auto *Entry = CostTableLookup(SSEAltShuffleTbl,
545                                             ISD::VECTOR_SHUFFLE, LT.second))
546       return LT.first * Entry->Cost;
547     return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
548   }
549 
550   return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
551 }
552 
553 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
554   int ISD = TLI->InstructionOpcodeToISD(Opcode);
555   assert(ISD && "Invalid opcode");
556 
557   // FIXME: Need a better design of the cost table to handle non-simple types of
558   // potential massive combinations (elem_num x src_type x dst_type).
559 
560   static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
561     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
562     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
563     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
564     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
565     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
566     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
567 
568     { ISD::FP_TO_UINT,  MVT::v2i64, MVT::v2f32, 1 },
569     { ISD::FP_TO_UINT,  MVT::v4i64, MVT::v4f32, 1 },
570     { ISD::FP_TO_UINT,  MVT::v8i64, MVT::v8f32, 1 },
571     { ISD::FP_TO_UINT,  MVT::v2i64, MVT::v2f64, 1 },
572     { ISD::FP_TO_UINT,  MVT::v4i64, MVT::v4f64, 1 },
573     { ISD::FP_TO_UINT,  MVT::v8i64, MVT::v8f64, 1 },
574   };
575 
576   // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
577   // 256-bit wide vectors.
578 
579   static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
580     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
581     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
582     { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
583 
584     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 1 },
585     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 1 },
586     { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  1 },
587     { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 },
588 
589     // v16i1 -> v16i32 - load + broadcast
590     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  2 },
591     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 },
592     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
593     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
594     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
595     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
596     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
597     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
598     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
599     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
600 
601     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
602     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
603     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i8,   2 },
604     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
605     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
606     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
607     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
608     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
609     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64, 26 },
610     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64, 26 },
611 
612     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
613     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
614     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i8,   2 },
615     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i8,   2 },
616     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i8,   2 },
617     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i8,   2 },
618     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
619     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i16,  5 },
620     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i16,  2 },
621     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
622     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
623     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
624     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  2 },
625     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  1 },
626     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
627     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
628     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
629     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
630     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
631     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  5 },
632     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
633     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64, 12 },
634     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64, 26 },
635 
636     { ISD::FP_TO_UINT,  MVT::v2i32,  MVT::v2f32,  1 },
637     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  1 },
638     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  1 },
639     { ISD::FP_TO_UINT,  MVT::v16i32, MVT::v16f32, 1 },
640   };
641 
642   static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
643     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
644     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
645     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
646     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
647     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   3 },
648     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   3 },
649     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   3 },
650     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   3 },
651     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
652     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
653     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
654     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
655     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
656     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
657     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
658     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
659 
660     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i64,  2 },
661     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i64,  2 },
662     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
663     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  2 },
664     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
665     { ISD::TRUNCATE,    MVT::v8i32,  MVT::v8i64,  4 },
666 
667     { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
668     { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
669 
670     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  8 },
671   };
672 
673   static const TypeConversionCostTblEntry AVXConversionTbl[] = {
674     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,  6 },
675     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,  4 },
676     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,  7 },
677     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,  4 },
678     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,  6 },
679     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,  4 },
680     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  7 },
681     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  4 },
682     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
683     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
684     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 6 },
685     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
686     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
687     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
688     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
689     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
690 
691     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i16, 4 },
692     { ISD::TRUNCATE,    MVT::v8i8,  MVT::v8i32,  4 },
693     { ISD::TRUNCATE,    MVT::v8i16, MVT::v8i32,  5 },
694     { ISD::TRUNCATE,    MVT::v4i8,  MVT::v4i64,  4 },
695     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i64,  4 },
696     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64,  4 },
697     { ISD::TRUNCATE,    MVT::v8i32, MVT::v8i64,  9 },
698 
699     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1,  3 },
700     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i1,  3 },
701     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i1,  8 },
702     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8,  3 },
703     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i8,  3 },
704     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i8,  8 },
705     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 3 },
706     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i16, 3 },
707     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
708     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
709     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i32, 1 },
710     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 1 },
711 
712     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1,  7 },
713     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i1,  7 },
714     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i1,  6 },
715     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8,  2 },
716     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i8,  2 },
717     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i8,  5 },
718     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
719     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i16, 2 },
720     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
721     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 6 },
722     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 6 },
723     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i32, 6 },
724     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 9 },
725     // The generic code to compute the scalar overhead is currently broken.
726     // Workaround this limitation by estimating the scalarization overhead
727     // here. We have roughly 10 instructions per scalar element.
728     // Multiply that by the vector width.
729     // FIXME: remove that when PR19268 is fixed.
730     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i64, 10 },
731     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i64, 20 },
732     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i64, 13 },
733     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i64, 13 },
734 
735     { ISD::FP_TO_SINT,  MVT::v4i8,  MVT::v4f32, 1 },
736     { ISD::FP_TO_SINT,  MVT::v8i8,  MVT::v8f32, 7 },
737     // This node is expanded into scalarized operations but BasicTTI is overly
738     // optimistic estimating its cost.  It computes 3 per element (one
739     // vector-extract, one scalar conversion and one vector-insert).  The
740     // problem is that the inserts form a read-modify-write chain so latency
741     // should be factored in too.  Inflating the cost per element by 1.
742     { ISD::FP_TO_UINT,  MVT::v8i32, MVT::v8f32, 8*4 },
743     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f64, 4*4 },
744 
745     { ISD::FP_EXTEND,   MVT::v4f64,  MVT::v4f32,  1 },
746     { ISD::FP_ROUND,    MVT::v4f32,  MVT::v4f64,  1 },
747   };
748 
749   static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
750     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8,    2 },
751     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8,    2 },
752     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16,   2 },
753     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16,   2 },
754     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32,   2 },
755     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32,   2 },
756 
757     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
758     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i8,   2 },
759     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i8,   1 },
760     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i8,   1 },
761     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
762     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
763     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   2 },
764     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   2 },
765     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
766     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
767     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  4 },
768     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  4 },
769     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
770     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
771     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
772     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
773     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
774     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
775 
776     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  2 },
777     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  1 },
778     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i32,  1 },
779     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i32,  1 },
780     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  3 },
781     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  3 },
782     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 6 },
783 
784   };
785 
786   static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
787     // These are somewhat magic numbers justified by looking at the output of
788     // Intel's IACA, running some kernels and making sure when we take
789     // legalization into account the throughput will be overestimated.
790     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
791     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
792     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
793     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
794     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
795     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
796     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
797     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
798 
799     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
800     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
801     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
802     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
803     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
804     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
805     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
806     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
807 
808     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
809     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i8,   6 },
810     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i8,   2 },
811     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i8,   3 },
812     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   4 },
813     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   8 },
814     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
815     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i8,   2 },
816     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   6 },
817     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   6 },
818     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
819     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  4 },
820     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  9 },
821     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  12 },
822     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
823     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i16,  2 },
824     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
825     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  10 },
826     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
827     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  4 },
828     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
829     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
830     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
831     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  5 },
832 
833     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  4 },
834     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  2 },
835     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 3 },
836     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i32,  3 },
837     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i32,  3 },
838     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  4 },
839     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 7 },
840     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
841     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 10 },
842   };
843 
844   std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
845   std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
846 
847   if (ST->hasSSE2() && !ST->hasAVX()) {
848     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
849                                                    LTDest.second, LTSrc.second))
850       return LTSrc.first * Entry->Cost;
851   }
852 
853   EVT SrcTy = TLI->getValueType(DL, Src);
854   EVT DstTy = TLI->getValueType(DL, Dst);
855 
856   // The function getSimpleVT only handles simple value types.
857   if (!SrcTy.isSimple() || !DstTy.isSimple())
858     return BaseT::getCastInstrCost(Opcode, Dst, Src);
859 
860   if (ST->hasDQI())
861     if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
862                                                    DstTy.getSimpleVT(),
863                                                    SrcTy.getSimpleVT()))
864       return Entry->Cost;
865 
866   if (ST->hasAVX512())
867     if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
868                                                    DstTy.getSimpleVT(),
869                                                    SrcTy.getSimpleVT()))
870       return Entry->Cost;
871 
872   if (ST->hasAVX2()) {
873     if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
874                                                    DstTy.getSimpleVT(),
875                                                    SrcTy.getSimpleVT()))
876       return Entry->Cost;
877   }
878 
879   if (ST->hasAVX()) {
880     if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
881                                                    DstTy.getSimpleVT(),
882                                                    SrcTy.getSimpleVT()))
883       return Entry->Cost;
884   }
885 
886   if (ST->hasSSE41()) {
887     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
888                                                    DstTy.getSimpleVT(),
889                                                    SrcTy.getSimpleVT()))
890       return Entry->Cost;
891   }
892 
893   if (ST->hasSSE2()) {
894     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
895                                                    DstTy.getSimpleVT(),
896                                                    SrcTy.getSimpleVT()))
897       return Entry->Cost;
898   }
899 
900   return BaseT::getCastInstrCost(Opcode, Dst, Src);
901 }
902 
903 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
904   // Legalize the type.
905   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
906 
907   MVT MTy = LT.second;
908 
909   int ISD = TLI->InstructionOpcodeToISD(Opcode);
910   assert(ISD && "Invalid opcode");
911 
912   static const CostTblEntry SSE2CostTbl[] = {
913     { ISD::SETCC,   MVT::v2i64,   8 },
914     { ISD::SETCC,   MVT::v4i32,   1 },
915     { ISD::SETCC,   MVT::v8i16,   1 },
916     { ISD::SETCC,   MVT::v16i8,   1 },
917   };
918 
919   static const CostTblEntry SSE42CostTbl[] = {
920     { ISD::SETCC,   MVT::v2f64,   1 },
921     { ISD::SETCC,   MVT::v4f32,   1 },
922     { ISD::SETCC,   MVT::v2i64,   1 },
923   };
924 
925   static const CostTblEntry AVX1CostTbl[] = {
926     { ISD::SETCC,   MVT::v4f64,   1 },
927     { ISD::SETCC,   MVT::v8f32,   1 },
928     // AVX1 does not support 8-wide integer compare.
929     { ISD::SETCC,   MVT::v4i64,   4 },
930     { ISD::SETCC,   MVT::v8i32,   4 },
931     { ISD::SETCC,   MVT::v16i16,  4 },
932     { ISD::SETCC,   MVT::v32i8,   4 },
933   };
934 
935   static const CostTblEntry AVX2CostTbl[] = {
936     { ISD::SETCC,   MVT::v4i64,   1 },
937     { ISD::SETCC,   MVT::v8i32,   1 },
938     { ISD::SETCC,   MVT::v16i16,  1 },
939     { ISD::SETCC,   MVT::v32i8,   1 },
940   };
941 
942   static const CostTblEntry AVX512CostTbl[] = {
943     { ISD::SETCC,   MVT::v8i64,   1 },
944     { ISD::SETCC,   MVT::v16i32,  1 },
945     { ISD::SETCC,   MVT::v8f64,   1 },
946     { ISD::SETCC,   MVT::v16f32,  1 },
947   };
948 
949   if (ST->hasAVX512())
950     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
951       return LT.first * Entry->Cost;
952 
953   if (ST->hasAVX2())
954     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
955       return LT.first * Entry->Cost;
956 
957   if (ST->hasAVX())
958     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
959       return LT.first * Entry->Cost;
960 
961   if (ST->hasSSE42())
962     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
963       return LT.first * Entry->Cost;
964 
965   if (ST->hasSSE2())
966     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
967       return LT.first * Entry->Cost;
968 
969   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
970 }
971 
972 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
973                                       ArrayRef<Type *> Tys, FastMathFlags FMF) {
974   // Costs should match the codegen from:
975   // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
976   // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
977   // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
978   // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
979   // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
980   static const CostTblEntry XOPCostTbl[] = {
981     { ISD::BITREVERSE, MVT::v4i64,   4 },
982     { ISD::BITREVERSE, MVT::v8i32,   4 },
983     { ISD::BITREVERSE, MVT::v16i16,  4 },
984     { ISD::BITREVERSE, MVT::v32i8,   4 },
985     { ISD::BITREVERSE, MVT::v2i64,   1 },
986     { ISD::BITREVERSE, MVT::v4i32,   1 },
987     { ISD::BITREVERSE, MVT::v8i16,   1 },
988     { ISD::BITREVERSE, MVT::v16i8,   1 },
989     { ISD::BITREVERSE, MVT::i64,     3 },
990     { ISD::BITREVERSE, MVT::i32,     3 },
991     { ISD::BITREVERSE, MVT::i16,     3 },
992     { ISD::BITREVERSE, MVT::i8,      3 }
993   };
994   static const CostTblEntry AVX2CostTbl[] = {
995     { ISD::BITREVERSE, MVT::v4i64,   5 },
996     { ISD::BITREVERSE, MVT::v8i32,   5 },
997     { ISD::BITREVERSE, MVT::v16i16,  5 },
998     { ISD::BITREVERSE, MVT::v32i8,   5 },
999     { ISD::BSWAP,      MVT::v4i64,   1 },
1000     { ISD::BSWAP,      MVT::v8i32,   1 },
1001     { ISD::BSWAP,      MVT::v16i16,  1 },
1002     { ISD::CTLZ,       MVT::v4i64,  23 },
1003     { ISD::CTLZ,       MVT::v8i32,  18 },
1004     { ISD::CTLZ,       MVT::v16i16, 14 },
1005     { ISD::CTLZ,       MVT::v32i8,   9 },
1006     { ISD::CTPOP,      MVT::v4i64,   7 },
1007     { ISD::CTPOP,      MVT::v8i32,  11 },
1008     { ISD::CTPOP,      MVT::v16i16,  9 },
1009     { ISD::CTPOP,      MVT::v32i8,   6 },
1010     { ISD::CTTZ,       MVT::v4i64,  10 },
1011     { ISD::CTTZ,       MVT::v8i32,  14 },
1012     { ISD::CTTZ,       MVT::v16i16, 12 },
1013     { ISD::CTTZ,       MVT::v32i8,   9 }
1014   };
1015   static const CostTblEntry AVX1CostTbl[] = {
1016     { ISD::BITREVERSE, MVT::v4i64,  10 },
1017     { ISD::BITREVERSE, MVT::v8i32,  10 },
1018     { ISD::BITREVERSE, MVT::v16i16, 10 },
1019     { ISD::BITREVERSE, MVT::v32i8,  10 },
1020     { ISD::BSWAP,      MVT::v4i64,   4 },
1021     { ISD::BSWAP,      MVT::v8i32,   4 },
1022     { ISD::BSWAP,      MVT::v16i16,  4 },
1023     { ISD::CTLZ,       MVT::v4i64,  46 },
1024     { ISD::CTLZ,       MVT::v8i32,  36 },
1025     { ISD::CTLZ,       MVT::v16i16, 28 },
1026     { ISD::CTLZ,       MVT::v32i8,  18 },
1027     { ISD::CTPOP,      MVT::v4i64,  14 },
1028     { ISD::CTPOP,      MVT::v8i32,  22 },
1029     { ISD::CTPOP,      MVT::v16i16, 18 },
1030     { ISD::CTPOP,      MVT::v32i8,  12 },
1031     { ISD::CTTZ,       MVT::v4i64,  20 },
1032     { ISD::CTTZ,       MVT::v8i32,  28 },
1033     { ISD::CTTZ,       MVT::v16i16, 24 },
1034     { ISD::CTTZ,       MVT::v32i8,  18 },
1035   };
1036   static const CostTblEntry SSSE3CostTbl[] = {
1037     { ISD::BITREVERSE, MVT::v2i64,   5 },
1038     { ISD::BITREVERSE, MVT::v4i32,   5 },
1039     { ISD::BITREVERSE, MVT::v8i16,   5 },
1040     { ISD::BITREVERSE, MVT::v16i8,   5 },
1041     { ISD::BSWAP,      MVT::v2i64,   1 },
1042     { ISD::BSWAP,      MVT::v4i32,   1 },
1043     { ISD::BSWAP,      MVT::v8i16,   1 },
1044     { ISD::CTLZ,       MVT::v2i64,  23 },
1045     { ISD::CTLZ,       MVT::v4i32,  18 },
1046     { ISD::CTLZ,       MVT::v8i16,  14 },
1047     { ISD::CTLZ,       MVT::v16i8,   9 },
1048     { ISD::CTPOP,      MVT::v2i64,   7 },
1049     { ISD::CTPOP,      MVT::v4i32,  11 },
1050     { ISD::CTPOP,      MVT::v8i16,   9 },
1051     { ISD::CTPOP,      MVT::v16i8,   6 },
1052     { ISD::CTTZ,       MVT::v2i64,  10 },
1053     { ISD::CTTZ,       MVT::v4i32,  14 },
1054     { ISD::CTTZ,       MVT::v8i16,  12 },
1055     { ISD::CTTZ,       MVT::v16i8,   9 }
1056   };
1057   static const CostTblEntry SSE2CostTbl[] = {
1058     { ISD::BSWAP,      MVT::v2i64,   7 },
1059     { ISD::BSWAP,      MVT::v4i32,   7 },
1060     { ISD::BSWAP,      MVT::v8i16,   7 },
1061     /* ISD::CTLZ - currently scalarized pre-SSSE3 */
1062     { ISD::CTPOP,      MVT::v2i64,  12 },
1063     { ISD::CTPOP,      MVT::v4i32,  15 },
1064     { ISD::CTPOP,      MVT::v8i16,  13 },
1065     { ISD::CTPOP,      MVT::v16i8,  10 },
1066     { ISD::CTTZ,       MVT::v2i64,  14 },
1067     { ISD::CTTZ,       MVT::v4i32,  18 },
1068     { ISD::CTTZ,       MVT::v8i16,  16 },
1069     { ISD::CTTZ,       MVT::v16i8,  13 }
1070   };
1071 
1072   unsigned ISD = ISD::DELETED_NODE;
1073   switch (IID) {
1074   default:
1075     break;
1076   case Intrinsic::bitreverse:
1077     ISD = ISD::BITREVERSE;
1078     break;
1079   case Intrinsic::bswap:
1080     ISD = ISD::BSWAP;
1081     break;
1082   case Intrinsic::ctlz:
1083     ISD = ISD::CTLZ;
1084     break;
1085   case Intrinsic::ctpop:
1086     ISD = ISD::CTPOP;
1087     break;
1088   case Intrinsic::cttz:
1089     ISD = ISD::CTTZ;
1090     break;
1091   }
1092 
1093   // Legalize the type.
1094   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
1095   MVT MTy = LT.second;
1096 
1097   // Attempt to lookup cost.
1098   if (ST->hasXOP())
1099     if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
1100       return LT.first * Entry->Cost;
1101 
1102   if (ST->hasAVX2())
1103     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1104       return LT.first * Entry->Cost;
1105 
1106   if (ST->hasAVX())
1107     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1108       return LT.first * Entry->Cost;
1109 
1110   if (ST->hasSSSE3())
1111     if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
1112       return LT.first * Entry->Cost;
1113 
1114   if (ST->hasSSE2())
1115     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1116       return LT.first * Entry->Cost;
1117 
1118   return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF);
1119 }
1120 
1121 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1122                                       ArrayRef<Value *> Args, FastMathFlags FMF) {
1123   return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF);
1124 }
1125 
1126 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
1127   assert(Val->isVectorTy() && "This must be a vector type");
1128 
1129   Type *ScalarType = Val->getScalarType();
1130 
1131   if (Index != -1U) {
1132     // Legalize the type.
1133     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1134 
1135     // This type is legalized to a scalar type.
1136     if (!LT.second.isVector())
1137       return 0;
1138 
1139     // The type may be split. Normalize the index to the new type.
1140     unsigned Width = LT.second.getVectorNumElements();
1141     Index = Index % Width;
1142 
1143     // Floating point scalars are already located in index #0.
1144     if (ScalarType->isFloatingPointTy() && Index == 0)
1145       return 0;
1146   }
1147 
1148   // Add to the base cost if we know that the extracted element of a vector is
1149   // destined to be moved to and used in the integer register file.
1150   int RegisterFileMoveCost = 0;
1151   if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
1152     RegisterFileMoveCost = 1;
1153 
1154   return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
1155 }
1156 
1157 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
1158   assert (Ty->isVectorTy() && "Can only scalarize vectors");
1159   int Cost = 0;
1160 
1161   for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
1162     if (Insert)
1163       Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
1164     if (Extract)
1165       Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
1166   }
1167 
1168   return Cost;
1169 }
1170 
1171 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
1172                                 unsigned AddressSpace) {
1173   // Handle non-power-of-two vectors such as <3 x float>
1174   if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
1175     unsigned NumElem = VTy->getVectorNumElements();
1176 
1177     // Handle a few common cases:
1178     // <3 x float>
1179     if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
1180       // Cost = 64 bit store + extract + 32 bit store.
1181       return 3;
1182 
1183     // <3 x double>
1184     if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
1185       // Cost = 128 bit store + unpack + 64 bit store.
1186       return 3;
1187 
1188     // Assume that all other non-power-of-two numbers are scalarized.
1189     if (!isPowerOf2_32(NumElem)) {
1190       int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
1191                                         AddressSpace);
1192       int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
1193                                                Opcode == Instruction::Store);
1194       return NumElem * Cost + SplitCost;
1195     }
1196   }
1197 
1198   // Legalize the type.
1199   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1200   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
1201          "Invalid Opcode");
1202 
1203   // Each load/store unit costs 1.
1204   int Cost = LT.first * 1;
1205 
1206   // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
1207   // proxy for a double-pumped AVX memory interface such as on Sandybridge.
1208   if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
1209     Cost *= 2;
1210 
1211   return Cost;
1212 }
1213 
1214 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
1215                                       unsigned Alignment,
1216                                       unsigned AddressSpace) {
1217   VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
1218   if (!SrcVTy)
1219     // To calculate scalar take the regular cost, without mask
1220     return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
1221 
1222   unsigned NumElem = SrcVTy->getVectorNumElements();
1223   VectorType *MaskTy =
1224     VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
1225   if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
1226       (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
1227       !isPowerOf2_32(NumElem)) {
1228     // Scalarization
1229     int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
1230     int ScalarCompareCost = getCmpSelInstrCost(
1231         Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
1232     int BranchCost = getCFInstrCost(Instruction::Br);
1233     int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
1234 
1235     int ValueSplitCost = getScalarizationOverhead(
1236         SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
1237     int MemopCost =
1238         NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1239                                          Alignment, AddressSpace);
1240     return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
1241   }
1242 
1243   // Legalize the type.
1244   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1245   auto VT = TLI->getValueType(DL, SrcVTy);
1246   int Cost = 0;
1247   if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
1248       LT.second.getVectorNumElements() == NumElem)
1249     // Promotion requires expand/truncate for data and a shuffle for mask.
1250     Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
1251             getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
1252 
1253   else if (LT.second.getVectorNumElements() > NumElem) {
1254     VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
1255                                             LT.second.getVectorNumElements());
1256     // Expanding requires fill mask with zeroes
1257     Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
1258   }
1259   if (!ST->hasAVX512())
1260     return Cost + LT.first*4; // Each maskmov costs 4
1261 
1262   // AVX-512 masked load/store is cheapper
1263   return Cost+LT.first;
1264 }
1265 
1266 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
1267   // Address computations in vectorized code with non-consecutive addresses will
1268   // likely result in more instructions compared to scalar code where the
1269   // computation can more often be merged into the index mode. The resulting
1270   // extra micro-ops can significantly decrease throughput.
1271   unsigned NumVectorInstToHideOverhead = 10;
1272 
1273   if (Ty->isVectorTy() && IsComplex)
1274     return NumVectorInstToHideOverhead;
1275 
1276   return BaseT::getAddressComputationCost(Ty, IsComplex);
1277 }
1278 
1279 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
1280                                  bool IsPairwise) {
1281 
1282   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1283 
1284   MVT MTy = LT.second;
1285 
1286   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1287   assert(ISD && "Invalid opcode");
1288 
1289   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
1290   // and make it as the cost.
1291 
1292   static const CostTblEntry SSE42CostTblPairWise[] = {
1293     { ISD::FADD,  MVT::v2f64,   2 },
1294     { ISD::FADD,  MVT::v4f32,   4 },
1295     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
1296     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.5".
1297     { ISD::ADD,   MVT::v8i16,   5 },
1298   };
1299 
1300   static const CostTblEntry AVX1CostTblPairWise[] = {
1301     { ISD::FADD,  MVT::v4f32,   4 },
1302     { ISD::FADD,  MVT::v4f64,   5 },
1303     { ISD::FADD,  MVT::v8f32,   7 },
1304     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
1305     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.5".
1306     { ISD::ADD,   MVT::v4i64,   5 },      // The data reported by the IACA tool is "4.8".
1307     { ISD::ADD,   MVT::v8i16,   5 },
1308     { ISD::ADD,   MVT::v8i32,   5 },
1309   };
1310 
1311   static const CostTblEntry SSE42CostTblNoPairWise[] = {
1312     { ISD::FADD,  MVT::v2f64,   2 },
1313     { ISD::FADD,  MVT::v4f32,   4 },
1314     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
1315     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
1316     { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
1317   };
1318 
1319   static const CostTblEntry AVX1CostTblNoPairWise[] = {
1320     { ISD::FADD,  MVT::v4f32,   3 },
1321     { ISD::FADD,  MVT::v4f64,   3 },
1322     { ISD::FADD,  MVT::v8f32,   4 },
1323     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
1324     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "2.8".
1325     { ISD::ADD,   MVT::v4i64,   3 },
1326     { ISD::ADD,   MVT::v8i16,   4 },
1327     { ISD::ADD,   MVT::v8i32,   5 },
1328   };
1329 
1330   if (IsPairwise) {
1331     if (ST->hasAVX())
1332       if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
1333         return LT.first * Entry->Cost;
1334 
1335     if (ST->hasSSE42())
1336       if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
1337         return LT.first * Entry->Cost;
1338   } else {
1339     if (ST->hasAVX())
1340       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
1341         return LT.first * Entry->Cost;
1342 
1343     if (ST->hasSSE42())
1344       if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1345         return LT.first * Entry->Cost;
1346   }
1347 
1348   return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1349 }
1350 
1351 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1352 /// method might only calculate a fraction of a larger immediate. Therefore it
1353 /// is valid to return a cost of ZERO.
1354 int X86TTIImpl::getIntImmCost(int64_t Val) {
1355   if (Val == 0)
1356     return TTI::TCC_Free;
1357 
1358   if (isInt<32>(Val))
1359     return TTI::TCC_Basic;
1360 
1361   return 2 * TTI::TCC_Basic;
1362 }
1363 
1364 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1365   assert(Ty->isIntegerTy());
1366 
1367   unsigned BitSize = Ty->getPrimitiveSizeInBits();
1368   if (BitSize == 0)
1369     return ~0U;
1370 
1371   // Never hoist constants larger than 128bit, because this might lead to
1372   // incorrect code generation or assertions in codegen.
1373   // Fixme: Create a cost model for types larger than i128 once the codegen
1374   // issues have been fixed.
1375   if (BitSize > 128)
1376     return TTI::TCC_Free;
1377 
1378   if (Imm == 0)
1379     return TTI::TCC_Free;
1380 
1381   // Sign-extend all constants to a multiple of 64-bit.
1382   APInt ImmVal = Imm;
1383   if (BitSize & 0x3f)
1384     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1385 
1386   // Split the constant into 64-bit chunks and calculate the cost for each
1387   // chunk.
1388   int Cost = 0;
1389   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1390     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1391     int64_t Val = Tmp.getSExtValue();
1392     Cost += getIntImmCost(Val);
1393   }
1394   // We need at least one instruction to materialize the constant.
1395   return std::max(1, Cost);
1396 }
1397 
1398 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1399                               Type *Ty) {
1400   assert(Ty->isIntegerTy());
1401 
1402   unsigned BitSize = Ty->getPrimitiveSizeInBits();
1403   // There is no cost model for constants with a bit size of 0. Return TCC_Free
1404   // here, so that constant hoisting will ignore this constant.
1405   if (BitSize == 0)
1406     return TTI::TCC_Free;
1407 
1408   unsigned ImmIdx = ~0U;
1409   switch (Opcode) {
1410   default:
1411     return TTI::TCC_Free;
1412   case Instruction::GetElementPtr:
1413     // Always hoist the base address of a GetElementPtr. This prevents the
1414     // creation of new constants for every base constant that gets constant
1415     // folded with the offset.
1416     if (Idx == 0)
1417       return 2 * TTI::TCC_Basic;
1418     return TTI::TCC_Free;
1419   case Instruction::Store:
1420     ImmIdx = 0;
1421     break;
1422   case Instruction::ICmp:
1423     // This is an imperfect hack to prevent constant hoisting of
1424     // compares that might be trying to check if a 64-bit value fits in
1425     // 32-bits. The backend can optimize these cases using a right shift by 32.
1426     // Ideally we would check the compare predicate here. There also other
1427     // similar immediates the backend can use shifts for.
1428     if (Idx == 1 && Imm.getBitWidth() == 64) {
1429       uint64_t ImmVal = Imm.getZExtValue();
1430       if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
1431         return TTI::TCC_Free;
1432     }
1433     ImmIdx = 1;
1434     break;
1435   case Instruction::And:
1436     // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1437     // by using a 32-bit operation with implicit zero extension. Detect such
1438     // immediates here as the normal path expects bit 31 to be sign extended.
1439     if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1440       return TTI::TCC_Free;
1441     LLVM_FALLTHROUGH;
1442   case Instruction::Add:
1443   case Instruction::Sub:
1444   case Instruction::Mul:
1445   case Instruction::UDiv:
1446   case Instruction::SDiv:
1447   case Instruction::URem:
1448   case Instruction::SRem:
1449   case Instruction::Or:
1450   case Instruction::Xor:
1451     ImmIdx = 1;
1452     break;
1453   // Always return TCC_Free for the shift value of a shift instruction.
1454   case Instruction::Shl:
1455   case Instruction::LShr:
1456   case Instruction::AShr:
1457     if (Idx == 1)
1458       return TTI::TCC_Free;
1459     break;
1460   case Instruction::Trunc:
1461   case Instruction::ZExt:
1462   case Instruction::SExt:
1463   case Instruction::IntToPtr:
1464   case Instruction::PtrToInt:
1465   case Instruction::BitCast:
1466   case Instruction::PHI:
1467   case Instruction::Call:
1468   case Instruction::Select:
1469   case Instruction::Ret:
1470   case Instruction::Load:
1471     break;
1472   }
1473 
1474   if (Idx == ImmIdx) {
1475     int NumConstants = (BitSize + 63) / 64;
1476     int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1477     return (Cost <= NumConstants * TTI::TCC_Basic)
1478                ? static_cast<int>(TTI::TCC_Free)
1479                : Cost;
1480   }
1481 
1482   return X86TTIImpl::getIntImmCost(Imm, Ty);
1483 }
1484 
1485 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1486                               Type *Ty) {
1487   assert(Ty->isIntegerTy());
1488 
1489   unsigned BitSize = Ty->getPrimitiveSizeInBits();
1490   // There is no cost model for constants with a bit size of 0. Return TCC_Free
1491   // here, so that constant hoisting will ignore this constant.
1492   if (BitSize == 0)
1493     return TTI::TCC_Free;
1494 
1495   switch (IID) {
1496   default:
1497     return TTI::TCC_Free;
1498   case Intrinsic::sadd_with_overflow:
1499   case Intrinsic::uadd_with_overflow:
1500   case Intrinsic::ssub_with_overflow:
1501   case Intrinsic::usub_with_overflow:
1502   case Intrinsic::smul_with_overflow:
1503   case Intrinsic::umul_with_overflow:
1504     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1505       return TTI::TCC_Free;
1506     break;
1507   case Intrinsic::experimental_stackmap:
1508     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1509       return TTI::TCC_Free;
1510     break;
1511   case Intrinsic::experimental_patchpoint_void:
1512   case Intrinsic::experimental_patchpoint_i64:
1513     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1514       return TTI::TCC_Free;
1515     break;
1516   }
1517   return X86TTIImpl::getIntImmCost(Imm, Ty);
1518 }
1519 
1520 // Return an average cost of Gather / Scatter instruction, maybe improved later
1521 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
1522                                 unsigned Alignment, unsigned AddressSpace) {
1523 
1524   assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
1525   unsigned VF = SrcVTy->getVectorNumElements();
1526 
1527   // Try to reduce index size from 64 bit (default for GEP)
1528   // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
1529   // operation will use 16 x 64 indices which do not fit in a zmm and needs
1530   // to split. Also check that the base pointer is the same for all lanes,
1531   // and that there's at most one variable index.
1532   auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
1533     unsigned IndexSize = DL.getPointerSizeInBits();
1534     GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1535     if (IndexSize < 64 || !GEP)
1536       return IndexSize;
1537 
1538     unsigned NumOfVarIndices = 0;
1539     Value *Ptrs = GEP->getPointerOperand();
1540     if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
1541       return IndexSize;
1542     for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
1543       if (isa<Constant>(GEP->getOperand(i)))
1544         continue;
1545       Type *IndxTy = GEP->getOperand(i)->getType();
1546       if (IndxTy->isVectorTy())
1547         IndxTy = IndxTy->getVectorElementType();
1548       if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
1549           !isa<SExtInst>(GEP->getOperand(i))) ||
1550          ++NumOfVarIndices > 1)
1551         return IndexSize; // 64
1552     }
1553     return (unsigned)32;
1554   };
1555 
1556 
1557   // Trying to reduce IndexSize to 32 bits for vector 16.
1558   // By default the IndexSize is equal to pointer size.
1559   unsigned IndexSize = (VF >= 16) ? getIndexSizeInBits(Ptr, DL) :
1560     DL.getPointerSizeInBits();
1561 
1562   Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
1563                                                     IndexSize), VF);
1564   std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
1565   std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
1566   int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
1567   if (SplitFactor > 1) {
1568     // Handle splitting of vector of pointers
1569     Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
1570     return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
1571                                          AddressSpace);
1572   }
1573 
1574   // The gather / scatter cost is given by Intel architects. It is a rough
1575   // number since we are looking at one instruction in a time.
1576   const int GSOverhead = 2;
1577   return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1578                                            Alignment, AddressSpace);
1579 }
1580 
1581 /// Return the cost of full scalarization of gather / scatter operation.
1582 ///
1583 /// Opcode - Load or Store instruction.
1584 /// SrcVTy - The type of the data vector that should be gathered or scattered.
1585 /// VariableMask - The mask is non-constant at compile time.
1586 /// Alignment - Alignment for one element.
1587 /// AddressSpace - pointer[s] address space.
1588 ///
1589 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
1590                                 bool VariableMask, unsigned Alignment,
1591                                 unsigned AddressSpace) {
1592   unsigned VF = SrcVTy->getVectorNumElements();
1593 
1594   int MaskUnpackCost = 0;
1595   if (VariableMask) {
1596     VectorType *MaskTy =
1597       VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
1598     MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
1599     int ScalarCompareCost =
1600       getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
1601                          nullptr);
1602     int BranchCost = getCFInstrCost(Instruction::Br);
1603     MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
1604   }
1605 
1606   // The cost of the scalar loads/stores.
1607   int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
1608                                           Alignment, AddressSpace);
1609 
1610   int InsertExtractCost = 0;
1611   if (Opcode == Instruction::Load)
1612     for (unsigned i = 0; i < VF; ++i)
1613       // Add the cost of inserting each scalar load into the vector
1614       InsertExtractCost +=
1615         getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
1616   else
1617     for (unsigned i = 0; i < VF; ++i)
1618       // Add the cost of extracting each element out of the data vector
1619       InsertExtractCost +=
1620         getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
1621 
1622   return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
1623 }
1624 
1625 /// Calculate the cost of Gather / Scatter operation
1626 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
1627                                        Value *Ptr, bool VariableMask,
1628                                        unsigned Alignment) {
1629   assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
1630   unsigned VF = SrcVTy->getVectorNumElements();
1631   PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
1632   if (!PtrTy && Ptr->getType()->isVectorTy())
1633     PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
1634   assert(PtrTy && "Unexpected type for Ptr argument");
1635   unsigned AddressSpace = PtrTy->getAddressSpace();
1636 
1637   bool Scalarize = false;
1638   if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
1639       (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
1640     Scalarize = true;
1641   // Gather / Scatter for vector 2 is not profitable on KNL / SKX
1642   // Vector-4 of gather/scatter instruction does not exist on KNL.
1643   // We can extend it to 8 elements, but zeroing upper bits of
1644   // the mask vector will add more instructions. Right now we give the scalar
1645   // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction is
1646   // better in the VariableMask case.
1647   if (VF == 2 || (VF == 4 && !ST->hasVLX()))
1648     Scalarize = true;
1649 
1650   if (Scalarize)
1651     return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, AddressSpace);
1652 
1653   return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
1654 }
1655 
1656 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
1657   Type *ScalarTy = DataTy->getScalarType();
1658   int DataWidth = isa<PointerType>(ScalarTy) ?
1659     DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
1660 
1661   return ((DataWidth == 32 || DataWidth == 64) && ST->hasAVX()) ||
1662          ((DataWidth == 8 || DataWidth == 16) && ST->hasBWI());
1663 }
1664 
1665 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
1666   return isLegalMaskedLoad(DataType);
1667 }
1668 
1669 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
1670   // This function is called now in two cases: from the Loop Vectorizer
1671   // and from the Scalarizer.
1672   // When the Loop Vectorizer asks about legality of the feature,
1673   // the vectorization factor is not calculated yet. The Loop Vectorizer
1674   // sends a scalar type and the decision is based on the width of the
1675   // scalar element.
1676   // Later on, the cost model will estimate usage this intrinsic based on
1677   // the vector type.
1678   // The Scalarizer asks again about legality. It sends a vector type.
1679   // In this case we can reject non-power-of-2 vectors.
1680   if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
1681     return false;
1682   Type *ScalarTy = DataTy->getScalarType();
1683   int DataWidth = isa<PointerType>(ScalarTy) ?
1684     DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
1685 
1686   // AVX-512 allows gather and scatter
1687   return (DataWidth == 32 || DataWidth == 64) && ST->hasAVX512();
1688 }
1689 
1690 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
1691   return isLegalMaskedGather(DataType);
1692 }
1693 
1694 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1695                                      const Function *Callee) const {
1696   const TargetMachine &TM = getTLI()->getTargetMachine();
1697 
1698   // Work this as a subsetting of subtarget features.
1699   const FeatureBitset &CallerBits =
1700       TM.getSubtargetImpl(*Caller)->getFeatureBits();
1701   const FeatureBitset &CalleeBits =
1702       TM.getSubtargetImpl(*Callee)->getFeatureBits();
1703 
1704   // FIXME: This is likely too limiting as it will include subtarget features
1705   // that we might not care about for inlining, but it is conservatively
1706   // correct.
1707   return (CallerBits & CalleeBits) == CalleeBits;
1708 }
1709