1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 ///   SSE 3   - Pentium4 / Athlon64
23 ///   SSE 4.1 - Penryn
24 ///   SSE 4.2 - Nehalem
25 ///   AVX     - Sandy Bridge
26 ///   AVX2    - Haswell
27 ///   AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 ///                   divss     sqrtss          rsqrtss
30 ///   AMD K7            11-16     19              3
31 ///   Piledriver        9-24      13-15           5
32 ///   Jaguar            14        16              2
33 ///   Pentium II,III    18        30              2
34 ///   Nehalem           7-14      7-18            3
35 ///   Haswell           10-13     11              5
36 /// TODO: Develop and implement  the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
40 
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "x86tti"
52 
53 //===----------------------------------------------------------------------===//
54 //
55 // X86 cost model.
56 //
57 //===----------------------------------------------------------------------===//
58 
59 TargetTransformInfo::PopcntSupportKind
60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
62   // TODO: Currently the __builtin_popcount() implementation using SSE3
63   //   instructions is inefficient. Once the problem is fixed, we should
64   //   call ST->hasSSE3() instead of ST->hasPOPCNT().
65   return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66 }
67 
68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69   TargetTransformInfo::CacheLevel Level) const {
70   switch (Level) {
71   case TargetTransformInfo::CacheLevel::L1D:
72     //   - Penryn
73     //   - Nehalem
74     //   - Westmere
75     //   - Sandy Bridge
76     //   - Ivy Bridge
77     //   - Haswell
78     //   - Broadwell
79     //   - Skylake
80     //   - Kabylake
81     return 32 * 1024;  //  32 KByte
82   case TargetTransformInfo::CacheLevel::L2D:
83     //   - Penryn
84     //   - Nehalem
85     //   - Westmere
86     //   - Sandy Bridge
87     //   - Ivy Bridge
88     //   - Haswell
89     //   - Broadwell
90     //   - Skylake
91     //   - Kabylake
92     return 256 * 1024; // 256 KByte
93   }
94 
95   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
96 }
97 
98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99   TargetTransformInfo::CacheLevel Level) const {
100   //   - Penryn
101   //   - Nehalem
102   //   - Westmere
103   //   - Sandy Bridge
104   //   - Ivy Bridge
105   //   - Haswell
106   //   - Broadwell
107   //   - Skylake
108   //   - Kabylake
109   switch (Level) {
110   case TargetTransformInfo::CacheLevel::L1D:
111     LLVM_FALLTHROUGH;
112   case TargetTransformInfo::CacheLevel::L2D:
113     return 8;
114   }
115 
116   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
117 }
118 
119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120   bool Vector = (ClassID == 1);
121   if (Vector && !ST->hasSSE1())
122     return 0;
123 
124   if (ST->is64Bit()) {
125     if (Vector && ST->hasAVX512())
126       return 32;
127     return 16;
128   }
129   return 8;
130 }
131 
132 TypeSize
133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
134   unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135   switch (K) {
136   case TargetTransformInfo::RGK_Scalar:
137     return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
138   case TargetTransformInfo::RGK_FixedWidthVector:
139     if (ST->hasAVX512() && PreferVectorWidth >= 512)
140       return TypeSize::getFixed(512);
141     if (ST->hasAVX() && PreferVectorWidth >= 256)
142       return TypeSize::getFixed(256);
143     if (ST->hasSSE1() && PreferVectorWidth >= 128)
144       return TypeSize::getFixed(128);
145     return TypeSize::getFixed(0);
146   case TargetTransformInfo::RGK_ScalableVector:
147     return TypeSize::getScalable(0);
148   }
149 
150   llvm_unreachable("Unsupported register kind");
151 }
152 
153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154   return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
155       .getFixedSize();
156 }
157 
158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
159   // If the loop will not be vectorized, don't interleave the loop.
160   // Let regular unroll to unroll the loop, which saves the overflow
161   // check and memory check cost.
162   if (VF == 1)
163     return 1;
164 
165   if (ST->isAtom())
166     return 1;
167 
168   // Sandybridge and Haswell have multiple execution ports and pipelined
169   // vector units.
170   if (ST->hasAVX())
171     return 4;
172 
173   return 2;
174 }
175 
176 InstructionCost X86TTIImpl::getArithmeticInstrCost(
177     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
178     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
179     TTI::OperandValueProperties Opd1PropInfo,
180     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
181     const Instruction *CxtI) {
182   // TODO: Handle more cost kinds.
183   if (CostKind != TTI::TCK_RecipThroughput)
184     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
185                                          Op2Info, Opd1PropInfo,
186                                          Opd2PropInfo, Args, CxtI);
187 
188   // vXi8 multiplications are always promoted to vXi16.
189   if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
190       Ty->getScalarSizeInBits() == 8) {
191     Type *WideVecTy =
192         VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
193     return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
194                             TargetTransformInfo::CastContextHint::None,
195                             CostKind) +
196            getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
197                             TargetTransformInfo::CastContextHint::None,
198                             CostKind) +
199            getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
200                                   Opd1PropInfo, Opd2PropInfo);
201   }
202 
203   // Legalize the type.
204   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
205 
206   int ISD = TLI->InstructionOpcodeToISD(Opcode);
207   assert(ISD && "Invalid opcode");
208 
209   static const CostTblEntry GLMCostTable[] = {
210     { ISD::FDIV,  MVT::f32,   18 }, // divss
211     { ISD::FDIV,  MVT::v4f32, 35 }, // divps
212     { ISD::FDIV,  MVT::f64,   33 }, // divsd
213     { ISD::FDIV,  MVT::v2f64, 65 }, // divpd
214   };
215 
216   if (ST->useGLMDivSqrtCosts())
217     if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
218                                             LT.second))
219       return LT.first * Entry->Cost;
220 
221   static const CostTblEntry SLMCostTable[] = {
222     { ISD::MUL,   MVT::v4i32, 11 }, // pmulld
223     { ISD::MUL,   MVT::v8i16, 2  }, // pmullw
224     { ISD::FMUL,  MVT::f64,   2  }, // mulsd
225     { ISD::FMUL,  MVT::v2f64, 4  }, // mulpd
226     { ISD::FMUL,  MVT::v4f32, 2  }, // mulps
227     { ISD::FDIV,  MVT::f32,   17 }, // divss
228     { ISD::FDIV,  MVT::v4f32, 39 }, // divps
229     { ISD::FDIV,  MVT::f64,   32 }, // divsd
230     { ISD::FDIV,  MVT::v2f64, 69 }, // divpd
231     { ISD::FADD,  MVT::v2f64, 2  }, // addpd
232     { ISD::FSUB,  MVT::v2f64, 2  }, // subpd
233     // v2i64/v4i64 mul is custom lowered as a series of long:
234     // multiplies(3), shifts(3) and adds(2)
235     // slm muldq version throughput is 2 and addq throughput 4
236     // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
237     //       3X4 (addq throughput) = 17
238     { ISD::MUL,   MVT::v2i64, 17 },
239     // slm addq\subq throughput is 4
240     { ISD::ADD,   MVT::v2i64, 4  },
241     { ISD::SUB,   MVT::v2i64, 4  },
242   };
243 
244   if (ST->isSLM()) {
245     if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
246       // Check if the operands can be shrinked into a smaller datatype.
247       bool Op1Signed = false;
248       unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
249       bool Op2Signed = false;
250       unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
251 
252       bool SignedMode = Op1Signed || Op2Signed;
253       unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
254 
255       if (OpMinSize <= 7)
256         return LT.first * 3; // pmullw/sext
257       if (!SignedMode && OpMinSize <= 8)
258         return LT.first * 3; // pmullw/zext
259       if (OpMinSize <= 15)
260         return LT.first * 5; // pmullw/pmulhw/pshuf
261       if (!SignedMode && OpMinSize <= 16)
262         return LT.first * 5; // pmullw/pmulhw/pshuf
263     }
264 
265     if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
266                                             LT.second)) {
267       return LT.first * Entry->Cost;
268     }
269   }
270 
271   if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
272        ISD == ISD::UREM) &&
273       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
274        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
275       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
276     if (ISD == ISD::SDIV || ISD == ISD::SREM) {
277       // On X86, vector signed division by constants power-of-two are
278       // normally expanded to the sequence SRA + SRL + ADD + SRA.
279       // The OperandValue properties may not be the same as that of the previous
280       // operation; conservatively assume OP_None.
281       InstructionCost Cost =
282           2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
283                                      Op2Info, TargetTransformInfo::OP_None,
284                                      TargetTransformInfo::OP_None);
285       Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
286                                      Op2Info,
287                                      TargetTransformInfo::OP_None,
288                                      TargetTransformInfo::OP_None);
289       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
290                                      Op2Info,
291                                      TargetTransformInfo::OP_None,
292                                      TargetTransformInfo::OP_None);
293 
294       if (ISD == ISD::SREM) {
295         // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
296         Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
297                                        Op2Info);
298         Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
299                                        Op2Info);
300       }
301 
302       return Cost;
303     }
304 
305     // Vector unsigned division/remainder will be simplified to shifts/masks.
306     if (ISD == ISD::UDIV)
307       return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
308                                     Op1Info, Op2Info,
309                                     TargetTransformInfo::OP_None,
310                                     TargetTransformInfo::OP_None);
311 
312     else // UREM
313       return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
314                                     Op1Info, Op2Info,
315                                     TargetTransformInfo::OP_None,
316                                     TargetTransformInfo::OP_None);
317   }
318 
319   static const CostTblEntry AVX512BWUniformConstCostTable[] = {
320     { ISD::SHL,  MVT::v64i8,   2 }, // psllw + pand.
321     { ISD::SRL,  MVT::v64i8,   2 }, // psrlw + pand.
322     { ISD::SRA,  MVT::v64i8,   4 }, // psrlw, pand, pxor, psubb.
323   };
324 
325   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
326       ST->hasBWI()) {
327     if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
328                                             LT.second))
329       return LT.first * Entry->Cost;
330   }
331 
332   static const CostTblEntry AVX512UniformConstCostTable[] = {
333     { ISD::SRA,  MVT::v2i64,   1 },
334     { ISD::SRA,  MVT::v4i64,   1 },
335     { ISD::SRA,  MVT::v8i64,   1 },
336 
337     { ISD::SHL,  MVT::v64i8,   4 }, // psllw + pand.
338     { ISD::SRL,  MVT::v64i8,   4 }, // psrlw + pand.
339     { ISD::SRA,  MVT::v64i8,   8 }, // psrlw, pand, pxor, psubb.
340 
341     { ISD::SDIV, MVT::v16i32,  6 }, // pmuludq sequence
342     { ISD::SREM, MVT::v16i32,  8 }, // pmuludq+mul+sub sequence
343     { ISD::UDIV, MVT::v16i32,  5 }, // pmuludq sequence
344     { ISD::UREM, MVT::v16i32,  7 }, // pmuludq+mul+sub sequence
345   };
346 
347   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
348       ST->hasAVX512()) {
349     if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
350                                             LT.second))
351       return LT.first * Entry->Cost;
352   }
353 
354   static const CostTblEntry AVX2UniformConstCostTable[] = {
355     { ISD::SHL,  MVT::v32i8,   2 }, // psllw + pand.
356     { ISD::SRL,  MVT::v32i8,   2 }, // psrlw + pand.
357     { ISD::SRA,  MVT::v32i8,   4 }, // psrlw, pand, pxor, psubb.
358 
359     { ISD::SRA,  MVT::v4i64,   4 }, // 2 x psrad + shuffle.
360 
361     { ISD::SDIV, MVT::v8i32,   6 }, // pmuludq sequence
362     { ISD::SREM, MVT::v8i32,   8 }, // pmuludq+mul+sub sequence
363     { ISD::UDIV, MVT::v8i32,   5 }, // pmuludq sequence
364     { ISD::UREM, MVT::v8i32,   7 }, // pmuludq+mul+sub sequence
365   };
366 
367   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
368       ST->hasAVX2()) {
369     if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
370                                             LT.second))
371       return LT.first * Entry->Cost;
372   }
373 
374   static const CostTblEntry SSE2UniformConstCostTable[] = {
375     { ISD::SHL,  MVT::v16i8,     2 }, // psllw + pand.
376     { ISD::SRL,  MVT::v16i8,     2 }, // psrlw + pand.
377     { ISD::SRA,  MVT::v16i8,     4 }, // psrlw, pand, pxor, psubb.
378 
379     { ISD::SHL,  MVT::v32i8,   4+2 }, // 2*(psllw + pand) + split.
380     { ISD::SRL,  MVT::v32i8,   4+2 }, // 2*(psrlw + pand) + split.
381     { ISD::SRA,  MVT::v32i8,   8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
382 
383     { ISD::SDIV, MVT::v8i32,  12+2 }, // 2*pmuludq sequence + split.
384     { ISD::SREM, MVT::v8i32,  16+2 }, // 2*pmuludq+mul+sub sequence + split.
385     { ISD::SDIV, MVT::v4i32,     6 }, // pmuludq sequence
386     { ISD::SREM, MVT::v4i32,     8 }, // pmuludq+mul+sub sequence
387     { ISD::UDIV, MVT::v8i32,  10+2 }, // 2*pmuludq sequence + split.
388     { ISD::UREM, MVT::v8i32,  14+2 }, // 2*pmuludq+mul+sub sequence + split.
389     { ISD::UDIV, MVT::v4i32,     5 }, // pmuludq sequence
390     { ISD::UREM, MVT::v4i32,     7 }, // pmuludq+mul+sub sequence
391   };
392 
393   // XOP has faster vXi8 shifts.
394   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
395       ST->hasSSE2() && !ST->hasXOP()) {
396     if (const auto *Entry =
397             CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
398       return LT.first * Entry->Cost;
399   }
400 
401   static const CostTblEntry AVX512BWConstCostTable[] = {
402     { ISD::SDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
403     { ISD::SREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
404     { ISD::UDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
405     { ISD::UREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
406     { ISD::SDIV, MVT::v32i16,  6 }, // vpmulhw sequence
407     { ISD::SREM, MVT::v32i16,  8 }, // vpmulhw+mul+sub sequence
408     { ISD::UDIV, MVT::v32i16,  6 }, // vpmulhuw sequence
409     { ISD::UREM, MVT::v32i16,  8 }, // vpmulhuw+mul+sub sequence
410   };
411 
412   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
413        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
414       ST->hasBWI()) {
415     if (const auto *Entry =
416             CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
417       return LT.first * Entry->Cost;
418   }
419 
420   static const CostTblEntry AVX512ConstCostTable[] = {
421     { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
422     { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
423     { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
424     { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
425     { ISD::SDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
426     { ISD::SREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
427     { ISD::UDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
428     { ISD::UREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
429     { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
430     { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
431     { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
432     { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
433   };
434 
435   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
436        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
437       ST->hasAVX512()) {
438     if (const auto *Entry =
439             CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
440       return LT.first * Entry->Cost;
441   }
442 
443   static const CostTblEntry AVX2ConstCostTable[] = {
444     { ISD::SDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
445     { ISD::SREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
446     { ISD::UDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
447     { ISD::UREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
448     { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
449     { ISD::SREM, MVT::v16i16,  8 }, // vpmulhw+mul+sub sequence
450     { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
451     { ISD::UREM, MVT::v16i16,  8 }, // vpmulhuw+mul+sub sequence
452     { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
453     { ISD::SREM, MVT::v8i32,  19 }, // vpmuldq+mul+sub sequence
454     { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
455     { ISD::UREM, MVT::v8i32,  19 }, // vpmuludq+mul+sub sequence
456   };
457 
458   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
459        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
460       ST->hasAVX2()) {
461     if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
462       return LT.first * Entry->Cost;
463   }
464 
465   static const CostTblEntry SSE2ConstCostTable[] = {
466     { ISD::SDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
467     { ISD::SREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
468     { ISD::SDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
469     { ISD::SREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
470     { ISD::UDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
471     { ISD::UREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
472     { ISD::UDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
473     { ISD::UREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
474     { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
475     { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
476     { ISD::SDIV, MVT::v8i16,     6 }, // pmulhw sequence
477     { ISD::SREM, MVT::v8i16,     8 }, // pmulhw+mul+sub sequence
478     { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
479     { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
480     { ISD::UDIV, MVT::v8i16,     6 }, // pmulhuw sequence
481     { ISD::UREM, MVT::v8i16,     8 }, // pmulhuw+mul+sub sequence
482     { ISD::SDIV, MVT::v8i32,  38+2 }, // 2*pmuludq sequence + split.
483     { ISD::SREM, MVT::v8i32,  48+2 }, // 2*pmuludq+mul+sub sequence + split.
484     { ISD::SDIV, MVT::v4i32,    19 }, // pmuludq sequence
485     { ISD::SREM, MVT::v4i32,    24 }, // pmuludq+mul+sub sequence
486     { ISD::UDIV, MVT::v8i32,  30+2 }, // 2*pmuludq sequence + split.
487     { ISD::UREM, MVT::v8i32,  40+2 }, // 2*pmuludq+mul+sub sequence + split.
488     { ISD::UDIV, MVT::v4i32,    15 }, // pmuludq sequence
489     { ISD::UREM, MVT::v4i32,    20 }, // pmuludq+mul+sub sequence
490   };
491 
492   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
493        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
494       ST->hasSSE2()) {
495     // pmuldq sequence.
496     if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
497       return LT.first * 32;
498     if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
499       return LT.first * 38;
500     if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
501       return LT.first * 15;
502     if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
503       return LT.first * 20;
504 
505     if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
506       return LT.first * Entry->Cost;
507   }
508 
509   static const CostTblEntry AVX512BWShiftCostTable[] = {
510     { ISD::SHL,   MVT::v16i8,      4 }, // extend/vpsllvw/pack sequence.
511     { ISD::SRL,   MVT::v16i8,      4 }, // extend/vpsrlvw/pack sequence.
512     { ISD::SRA,   MVT::v16i8,      4 }, // extend/vpsravw/pack sequence.
513     { ISD::SHL,   MVT::v32i8,      4 }, // extend/vpsllvw/pack sequence.
514     { ISD::SRL,   MVT::v32i8,      4 }, // extend/vpsrlvw/pack sequence.
515     { ISD::SRA,   MVT::v32i8,      6 }, // extend/vpsravw/pack sequence.
516     { ISD::SHL,   MVT::v64i8,      6 }, // extend/vpsllvw/pack sequence.
517     { ISD::SRL,   MVT::v64i8,      7 }, // extend/vpsrlvw/pack sequence.
518     { ISD::SRA,   MVT::v64i8,     15 }, // extend/vpsravw/pack sequence.
519 
520     { ISD::SHL,   MVT::v8i16,      1 }, // vpsllvw
521     { ISD::SRL,   MVT::v8i16,      1 }, // vpsrlvw
522     { ISD::SRA,   MVT::v8i16,      1 }, // vpsravw
523     { ISD::SHL,   MVT::v16i16,     1 }, // vpsllvw
524     { ISD::SRL,   MVT::v16i16,     1 }, // vpsrlvw
525     { ISD::SRA,   MVT::v16i16,     1 }, // vpsravw
526     { ISD::SHL,   MVT::v32i16,     1 }, // vpsllvw
527     { ISD::SRL,   MVT::v32i16,     1 }, // vpsrlvw
528     { ISD::SRA,   MVT::v32i16,     1 }, // vpsravw
529   };
530 
531   if (ST->hasBWI())
532     if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
533       return LT.first * Entry->Cost;
534 
535   static const CostTblEntry AVX2UniformCostTable[] = {
536     // Uniform splats are cheaper for the following instructions.
537     { ISD::SHL,  MVT::v16i16, 1 }, // psllw.
538     { ISD::SRL,  MVT::v16i16, 1 }, // psrlw.
539     { ISD::SRA,  MVT::v16i16, 1 }, // psraw.
540     { ISD::SHL,  MVT::v32i16, 2 }, // 2*psllw.
541     { ISD::SRL,  MVT::v32i16, 2 }, // 2*psrlw.
542     { ISD::SRA,  MVT::v32i16, 2 }, // 2*psraw.
543 
544     { ISD::SHL,  MVT::v8i32,  1 }, // pslld
545     { ISD::SRL,  MVT::v8i32,  1 }, // psrld
546     { ISD::SRA,  MVT::v8i32,  1 }, // psrad
547     { ISD::SHL,  MVT::v4i64,  1 }, // psllq
548     { ISD::SRL,  MVT::v4i64,  1 }, // psrlq
549   };
550 
551   if (ST->hasAVX2() &&
552       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
553        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
554     if (const auto *Entry =
555             CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
556       return LT.first * Entry->Cost;
557   }
558 
559   static const CostTblEntry SSE2UniformCostTable[] = {
560     // Uniform splats are cheaper for the following instructions.
561     { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
562     { ISD::SHL,  MVT::v4i32,  1 }, // pslld
563     { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
564 
565     { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
566     { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
567     { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
568 
569     { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
570     { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
571   };
572 
573   if (ST->hasSSE2() &&
574       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
575        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
576     if (const auto *Entry =
577             CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
578       return LT.first * Entry->Cost;
579   }
580 
581   static const CostTblEntry AVX512DQCostTable[] = {
582     { ISD::MUL,  MVT::v2i64, 2 }, // pmullq
583     { ISD::MUL,  MVT::v4i64, 2 }, // pmullq
584     { ISD::MUL,  MVT::v8i64, 2 }  // pmullq
585   };
586 
587   // Look for AVX512DQ lowering tricks for custom cases.
588   if (ST->hasDQI())
589     if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
590       return LT.first * Entry->Cost;
591 
592   static const CostTblEntry AVX512BWCostTable[] = {
593     { ISD::SHL,   MVT::v64i8,     11 }, // vpblendvb sequence.
594     { ISD::SRL,   MVT::v64i8,     11 }, // vpblendvb sequence.
595     { ISD::SRA,   MVT::v64i8,     24 }, // vpblendvb sequence.
596   };
597 
598   // Look for AVX512BW lowering tricks for custom cases.
599   if (ST->hasBWI())
600     if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
601       return LT.first * Entry->Cost;
602 
603   static const CostTblEntry AVX512CostTable[] = {
604     { ISD::SHL,     MVT::v4i32,      1 },
605     { ISD::SRL,     MVT::v4i32,      1 },
606     { ISD::SRA,     MVT::v4i32,      1 },
607     { ISD::SHL,     MVT::v8i32,      1 },
608     { ISD::SRL,     MVT::v8i32,      1 },
609     { ISD::SRA,     MVT::v8i32,      1 },
610     { ISD::SHL,     MVT::v16i32,     1 },
611     { ISD::SRL,     MVT::v16i32,     1 },
612     { ISD::SRA,     MVT::v16i32,     1 },
613 
614     { ISD::SHL,     MVT::v2i64,      1 },
615     { ISD::SRL,     MVT::v2i64,      1 },
616     { ISD::SHL,     MVT::v4i64,      1 },
617     { ISD::SRL,     MVT::v4i64,      1 },
618     { ISD::SHL,     MVT::v8i64,      1 },
619     { ISD::SRL,     MVT::v8i64,      1 },
620 
621     { ISD::SRA,     MVT::v2i64,      1 },
622     { ISD::SRA,     MVT::v4i64,      1 },
623     { ISD::SRA,     MVT::v8i64,      1 },
624 
625     { ISD::MUL,     MVT::v16i32,     1 }, // pmulld (Skylake from agner.org)
626     { ISD::MUL,     MVT::v8i32,      1 }, // pmulld (Skylake from agner.org)
627     { ISD::MUL,     MVT::v4i32,      1 }, // pmulld (Skylake from agner.org)
628     { ISD::MUL,     MVT::v8i64,      6 }, // 3*pmuludq/3*shift/2*add
629 
630     { ISD::FNEG,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
631     { ISD::FADD,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
632     { ISD::FSUB,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
633     { ISD::FMUL,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
634 
635     { ISD::FNEG,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
636     { ISD::FADD,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
637     { ISD::FSUB,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
638     { ISD::FMUL,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
639   };
640 
641   if (ST->hasAVX512())
642     if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
643       return LT.first * Entry->Cost;
644 
645   static const CostTblEntry AVX2ShiftCostTable[] = {
646     // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
647     // customize them to detect the cases where shift amount is a scalar one.
648     { ISD::SHL,     MVT::v4i32,    2 }, // vpsllvd (Haswell from agner.org)
649     { ISD::SRL,     MVT::v4i32,    2 }, // vpsrlvd (Haswell from agner.org)
650     { ISD::SRA,     MVT::v4i32,    2 }, // vpsravd (Haswell from agner.org)
651     { ISD::SHL,     MVT::v8i32,    2 }, // vpsllvd (Haswell from agner.org)
652     { ISD::SRL,     MVT::v8i32,    2 }, // vpsrlvd (Haswell from agner.org)
653     { ISD::SRA,     MVT::v8i32,    2 }, // vpsravd (Haswell from agner.org)
654     { ISD::SHL,     MVT::v2i64,    1 }, // vpsllvq (Haswell from agner.org)
655     { ISD::SRL,     MVT::v2i64,    1 }, // vpsrlvq (Haswell from agner.org)
656     { ISD::SHL,     MVT::v4i64,    1 }, // vpsllvq (Haswell from agner.org)
657     { ISD::SRL,     MVT::v4i64,    1 }, // vpsrlvq (Haswell from agner.org)
658   };
659 
660   if (ST->hasAVX512()) {
661     if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
662         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
663          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
664       // On AVX512, a packed v32i16 shift left by a constant build_vector
665       // is lowered into a vector multiply (vpmullw).
666       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
667                                     Op1Info, Op2Info,
668                                     TargetTransformInfo::OP_None,
669                                     TargetTransformInfo::OP_None);
670   }
671 
672   // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
673   if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
674     if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
675         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
676          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
677       // On AVX2, a packed v16i16 shift left by a constant build_vector
678       // is lowered into a vector multiply (vpmullw).
679       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
680                                     Op1Info, Op2Info,
681                                     TargetTransformInfo::OP_None,
682                                     TargetTransformInfo::OP_None);
683 
684     if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
685       return LT.first * Entry->Cost;
686   }
687 
688   static const CostTblEntry XOPShiftCostTable[] = {
689     // 128bit shifts take 1cy, but right shifts require negation beforehand.
690     { ISD::SHL,     MVT::v16i8,    1 },
691     { ISD::SRL,     MVT::v16i8,    2 },
692     { ISD::SRA,     MVT::v16i8,    2 },
693     { ISD::SHL,     MVT::v8i16,    1 },
694     { ISD::SRL,     MVT::v8i16,    2 },
695     { ISD::SRA,     MVT::v8i16,    2 },
696     { ISD::SHL,     MVT::v4i32,    1 },
697     { ISD::SRL,     MVT::v4i32,    2 },
698     { ISD::SRA,     MVT::v4i32,    2 },
699     { ISD::SHL,     MVT::v2i64,    1 },
700     { ISD::SRL,     MVT::v2i64,    2 },
701     { ISD::SRA,     MVT::v2i64,    2 },
702     // 256bit shifts require splitting if AVX2 didn't catch them above.
703     { ISD::SHL,     MVT::v32i8,  2+2 },
704     { ISD::SRL,     MVT::v32i8,  4+2 },
705     { ISD::SRA,     MVT::v32i8,  4+2 },
706     { ISD::SHL,     MVT::v16i16, 2+2 },
707     { ISD::SRL,     MVT::v16i16, 4+2 },
708     { ISD::SRA,     MVT::v16i16, 4+2 },
709     { ISD::SHL,     MVT::v8i32,  2+2 },
710     { ISD::SRL,     MVT::v8i32,  4+2 },
711     { ISD::SRA,     MVT::v8i32,  4+2 },
712     { ISD::SHL,     MVT::v4i64,  2+2 },
713     { ISD::SRL,     MVT::v4i64,  4+2 },
714     { ISD::SRA,     MVT::v4i64,  4+2 },
715   };
716 
717   // Look for XOP lowering tricks.
718   if (ST->hasXOP()) {
719     // If the right shift is constant then we'll fold the negation so
720     // it's as cheap as a left shift.
721     int ShiftISD = ISD;
722     if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
723         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
724          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
725       ShiftISD = ISD::SHL;
726     if (const auto *Entry =
727             CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
728       return LT.first * Entry->Cost;
729   }
730 
731   static const CostTblEntry SSE2UniformShiftCostTable[] = {
732     // Uniform splats are cheaper for the following instructions.
733     { ISD::SHL,  MVT::v16i16, 2+2 }, // 2*psllw + split.
734     { ISD::SHL,  MVT::v8i32,  2+2 }, // 2*pslld + split.
735     { ISD::SHL,  MVT::v4i64,  2+2 }, // 2*psllq + split.
736 
737     { ISD::SRL,  MVT::v16i16, 2+2 }, // 2*psrlw + split.
738     { ISD::SRL,  MVT::v8i32,  2+2 }, // 2*psrld + split.
739     { ISD::SRL,  MVT::v4i64,  2+2 }, // 2*psrlq + split.
740 
741     { ISD::SRA,  MVT::v16i16, 2+2 }, // 2*psraw + split.
742     { ISD::SRA,  MVT::v8i32,  2+2 }, // 2*psrad + split.
743     { ISD::SRA,  MVT::v2i64,    4 }, // 2*psrad + shuffle.
744     { ISD::SRA,  MVT::v4i64,  8+2 }, // 2*(2*psrad + shuffle) + split.
745   };
746 
747   if (ST->hasSSE2() &&
748       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
749        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
750 
751     // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
752     if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
753       return LT.first * 4; // 2*psrad + shuffle.
754 
755     if (const auto *Entry =
756             CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
757       return LT.first * Entry->Cost;
758   }
759 
760   if (ISD == ISD::SHL &&
761       Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
762     MVT VT = LT.second;
763     // Vector shift left by non uniform constant can be lowered
764     // into vector multiply.
765     if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
766         ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
767       ISD = ISD::MUL;
768   }
769 
770   static const CostTblEntry AVX2CostTable[] = {
771     { ISD::SHL,  MVT::v16i8,      6 }, // vpblendvb sequence.
772     { ISD::SHL,  MVT::v32i8,      6 }, // vpblendvb sequence.
773     { ISD::SHL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
774     { ISD::SHL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
775     { ISD::SHL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
776     { ISD::SHL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
777 
778     { ISD::SRL,  MVT::v16i8,      6 }, // vpblendvb sequence.
779     { ISD::SRL,  MVT::v32i8,      6 }, // vpblendvb sequence.
780     { ISD::SRL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
781     { ISD::SRL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
782     { ISD::SRL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
783     { ISD::SRL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
784 
785     { ISD::SRA,  MVT::v16i8,     17 }, // vpblendvb sequence.
786     { ISD::SRA,  MVT::v32i8,     17 }, // vpblendvb sequence.
787     { ISD::SRA,  MVT::v64i8,     34 }, // 2*vpblendvb sequence.
788     { ISD::SRA,  MVT::v8i16,      5 }, // extend/vpsravd/pack sequence.
789     { ISD::SRA,  MVT::v16i16,     7 }, // extend/vpsravd/pack sequence.
790     { ISD::SRA,  MVT::v32i16,    14 }, // 2*extend/vpsravd/pack sequence.
791     { ISD::SRA,  MVT::v2i64,      2 }, // srl/xor/sub sequence.
792     { ISD::SRA,  MVT::v4i64,      2 }, // srl/xor/sub sequence.
793 
794     { ISD::SUB,  MVT::v32i8,      1 }, // psubb
795     { ISD::ADD,  MVT::v32i8,      1 }, // paddb
796     { ISD::SUB,  MVT::v16i16,     1 }, // psubw
797     { ISD::ADD,  MVT::v16i16,     1 }, // paddw
798     { ISD::SUB,  MVT::v8i32,      1 }, // psubd
799     { ISD::ADD,  MVT::v8i32,      1 }, // paddd
800     { ISD::SUB,  MVT::v4i64,      1 }, // psubq
801     { ISD::ADD,  MVT::v4i64,      1 }, // paddq
802 
803     { ISD::MUL,  MVT::v16i16,     1 }, // pmullw
804     { ISD::MUL,  MVT::v8i32,      2 }, // pmulld (Haswell from agner.org)
805     { ISD::MUL,  MVT::v4i64,      6 }, // 3*pmuludq/3*shift/2*add
806 
807     { ISD::FNEG, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
808     { ISD::FNEG, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
809     { ISD::FADD, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
810     { ISD::FADD, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
811     { ISD::FSUB, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
812     { ISD::FSUB, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
813     { ISD::FMUL, MVT::f64,        1 }, // Haswell from http://www.agner.org/
814     { ISD::FMUL, MVT::v2f64,      1 }, // Haswell from http://www.agner.org/
815     { ISD::FMUL, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
816     { ISD::FMUL, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
817 
818     { ISD::FDIV, MVT::f32,        7 }, // Haswell from http://www.agner.org/
819     { ISD::FDIV, MVT::v4f32,      7 }, // Haswell from http://www.agner.org/
820     { ISD::FDIV, MVT::v8f32,     14 }, // Haswell from http://www.agner.org/
821     { ISD::FDIV, MVT::f64,       14 }, // Haswell from http://www.agner.org/
822     { ISD::FDIV, MVT::v2f64,     14 }, // Haswell from http://www.agner.org/
823     { ISD::FDIV, MVT::v4f64,     28 }, // Haswell from http://www.agner.org/
824   };
825 
826   // Look for AVX2 lowering tricks for custom cases.
827   if (ST->hasAVX2())
828     if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
829       return LT.first * Entry->Cost;
830 
831   static const CostTblEntry AVX1CostTable[] = {
832     // We don't have to scalarize unsupported ops. We can issue two half-sized
833     // operations and we only need to extract the upper YMM half.
834     // Two ops + 1 extract + 1 insert = 4.
835     { ISD::MUL,     MVT::v16i16,     4 },
836     { ISD::MUL,     MVT::v8i32,      5 }, // BTVER2 from http://www.agner.org/
837     { ISD::MUL,     MVT::v4i64,     12 },
838 
839     { ISD::SUB,     MVT::v32i8,      4 },
840     { ISD::ADD,     MVT::v32i8,      4 },
841     { ISD::SUB,     MVT::v16i16,     4 },
842     { ISD::ADD,     MVT::v16i16,     4 },
843     { ISD::SUB,     MVT::v8i32,      4 },
844     { ISD::ADD,     MVT::v8i32,      4 },
845     { ISD::SUB,     MVT::v4i64,      4 },
846     { ISD::ADD,     MVT::v4i64,      4 },
847 
848     { ISD::SHL,     MVT::v16i8,     10 }, // pblendvb sequence .
849     { ISD::SHL,     MVT::v32i8,     22 }, // pblendvb sequence + split.
850     { ISD::SHL,     MVT::v8i16,      6 }, // pblendvb sequence.
851     { ISD::SHL,     MVT::v16i16,    13 }, // pblendvb sequence + split.
852     { ISD::SHL,     MVT::v4i32,      3 }, // pslld/paddd/cvttps2dq/pmulld
853     { ISD::SHL,     MVT::v8i32,      9 }, // pslld/paddd/cvttps2dq/pmulld + split
854     { ISD::SHL,     MVT::v2i64,      2 }, // Shift each lane + blend.
855     { ISD::SHL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
856 
857     { ISD::SRL,     MVT::v16i8,     11 }, // pblendvb sequence.
858     { ISD::SRL,     MVT::v32i8,     23 }, // pblendvb sequence + split.
859     { ISD::SRL,     MVT::v8i16,     13 }, // pblendvb sequence.
860     { ISD::SRL,     MVT::v16i16,    28 }, // pblendvb sequence + split.
861     { ISD::SRL,     MVT::v4i32,      6 }, // Shift each lane + blend.
862     { ISD::SRL,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
863     { ISD::SRL,     MVT::v2i64,      2 }, // Shift each lane + blend.
864     { ISD::SRL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
865 
866     { ISD::SRA,     MVT::v16i8,     21 }, // pblendvb sequence.
867     { ISD::SRA,     MVT::v32i8,     44 }, // pblendvb sequence + split.
868     { ISD::SRA,     MVT::v8i16,     13 }, // pblendvb sequence.
869     { ISD::SRA,     MVT::v16i16,    28 }, // pblendvb sequence + split.
870     { ISD::SRA,     MVT::v4i32,      6 }, // Shift each lane + blend.
871     { ISD::SRA,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
872     { ISD::SRA,     MVT::v2i64,      5 }, // Shift each lane + blend.
873     { ISD::SRA,     MVT::v4i64,     12 }, // Shift each lane + blend + split.
874 
875     { ISD::FNEG,    MVT::v4f64,      2 }, // BTVER2 from http://www.agner.org/
876     { ISD::FNEG,    MVT::v8f32,      2 }, // BTVER2 from http://www.agner.org/
877 
878     { ISD::FMUL,    MVT::f64,        2 }, // BTVER2 from http://www.agner.org/
879     { ISD::FMUL,    MVT::v2f64,      2 }, // BTVER2 from http://www.agner.org/
880     { ISD::FMUL,    MVT::v4f64,      4 }, // BTVER2 from http://www.agner.org/
881 
882     { ISD::FDIV,    MVT::f32,       14 }, // SNB from http://www.agner.org/
883     { ISD::FDIV,    MVT::v4f32,     14 }, // SNB from http://www.agner.org/
884     { ISD::FDIV,    MVT::v8f32,     28 }, // SNB from http://www.agner.org/
885     { ISD::FDIV,    MVT::f64,       22 }, // SNB from http://www.agner.org/
886     { ISD::FDIV,    MVT::v2f64,     22 }, // SNB from http://www.agner.org/
887     { ISD::FDIV,    MVT::v4f64,     44 }, // SNB from http://www.agner.org/
888   };
889 
890   if (ST->hasAVX())
891     if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
892       return LT.first * Entry->Cost;
893 
894   static const CostTblEntry SSE42CostTable[] = {
895     { ISD::FADD, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
896     { ISD::FADD, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
897     { ISD::FADD, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
898     { ISD::FADD, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
899 
900     { ISD::FSUB, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
901     { ISD::FSUB, MVT::f32 ,    1 }, // Nehalem from http://www.agner.org/
902     { ISD::FSUB, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
903     { ISD::FSUB, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
904 
905     { ISD::FMUL, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
906     { ISD::FMUL, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
907     { ISD::FMUL, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
908     { ISD::FMUL, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
909 
910     { ISD::FDIV,  MVT::f32,   14 }, // Nehalem from http://www.agner.org/
911     { ISD::FDIV,  MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
912     { ISD::FDIV,  MVT::f64,   22 }, // Nehalem from http://www.agner.org/
913     { ISD::FDIV,  MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
914 
915     { ISD::MUL,   MVT::v2i64,  6 }  // 3*pmuludq/3*shift/2*add
916   };
917 
918   if (ST->hasSSE42())
919     if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
920       return LT.first * Entry->Cost;
921 
922   static const CostTblEntry SSE41CostTable[] = {
923     { ISD::SHL,  MVT::v16i8,      11 }, // pblendvb sequence.
924     { ISD::SHL,  MVT::v8i16,      14 }, // pblendvb sequence.
925     { ISD::SHL,  MVT::v4i32,       4 }, // pslld/paddd/cvttps2dq/pmulld
926 
927     { ISD::SRL,  MVT::v16i8,      12 }, // pblendvb sequence.
928     { ISD::SRL,  MVT::v8i16,      14 }, // pblendvb sequence.
929     { ISD::SRL,  MVT::v4i32,      11 }, // Shift each lane + blend.
930 
931     { ISD::SRA,  MVT::v16i8,      24 }, // pblendvb sequence.
932     { ISD::SRA,  MVT::v8i16,      14 }, // pblendvb sequence.
933     { ISD::SRA,  MVT::v4i32,      12 }, // Shift each lane + blend.
934 
935     { ISD::MUL,  MVT::v4i32,       2 }  // pmulld (Nehalem from agner.org)
936   };
937 
938   if (ST->hasSSE41())
939     if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
940       return LT.first * Entry->Cost;
941 
942   static const CostTblEntry SSE2CostTable[] = {
943     // We don't correctly identify costs of casts because they are marked as
944     // custom.
945     { ISD::SHL,  MVT::v16i8,      26 }, // cmpgtb sequence.
946     { ISD::SHL,  MVT::v8i16,      32 }, // cmpgtb sequence.
947     { ISD::SHL,  MVT::v4i32,     2*5 }, // We optimized this using mul.
948     { ISD::SHL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
949 
950     { ISD::SRL,  MVT::v16i8,      26 }, // cmpgtb sequence.
951     { ISD::SRL,  MVT::v8i16,      32 }, // cmpgtb sequence.
952     { ISD::SRL,  MVT::v4i32,      16 }, // Shift each lane + blend.
953     { ISD::SRL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
954 
955     { ISD::SRA,  MVT::v16i8,      54 }, // unpacked cmpgtb sequence.
956     { ISD::SRA,  MVT::v8i16,      32 }, // cmpgtb sequence.
957     { ISD::SRA,  MVT::v4i32,      16 }, // Shift each lane + blend.
958     { ISD::SRA,  MVT::v2i64,      12 }, // srl/xor/sub sequence.
959 
960     { ISD::MUL,  MVT::v8i16,       1 }, // pmullw
961     { ISD::MUL,  MVT::v4i32,       6 }, // 3*pmuludq/4*shuffle
962     { ISD::MUL,  MVT::v2i64,       8 }, // 3*pmuludq/3*shift/2*add
963 
964     { ISD::FDIV, MVT::f32,        23 }, // Pentium IV from http://www.agner.org/
965     { ISD::FDIV, MVT::v4f32,      39 }, // Pentium IV from http://www.agner.org/
966     { ISD::FDIV, MVT::f64,        38 }, // Pentium IV from http://www.agner.org/
967     { ISD::FDIV, MVT::v2f64,      69 }, // Pentium IV from http://www.agner.org/
968 
969     { ISD::FNEG, MVT::f32,         1 }, // Pentium IV from http://www.agner.org/
970     { ISD::FNEG, MVT::f64,         1 }, // Pentium IV from http://www.agner.org/
971     { ISD::FNEG, MVT::v4f32,       1 }, // Pentium IV from http://www.agner.org/
972     { ISD::FNEG, MVT::v2f64,       1 }, // Pentium IV from http://www.agner.org/
973 
974     { ISD::FADD, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
975     { ISD::FADD, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
976 
977     { ISD::FSUB, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
978     { ISD::FSUB, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
979   };
980 
981   if (ST->hasSSE2())
982     if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
983       return LT.first * Entry->Cost;
984 
985   static const CostTblEntry SSE1CostTable[] = {
986     { ISD::FDIV, MVT::f32,   17 }, // Pentium III from http://www.agner.org/
987     { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
988 
989     { ISD::FNEG, MVT::f32,    2 }, // Pentium III from http://www.agner.org/
990     { ISD::FNEG, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
991 
992     { ISD::FADD, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
993     { ISD::FADD, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
994 
995     { ISD::FSUB, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
996     { ISD::FSUB, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
997   };
998 
999   if (ST->hasSSE1())
1000     if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1001       return LT.first * Entry->Cost;
1002 
1003   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1004     { ISD::ADD,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1005     { ISD::SUB,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1006   };
1007 
1008   if (ST->is64Bit())
1009     if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1010       return LT.first * Entry->Cost;
1011 
1012   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1013     { ISD::ADD,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1014     { ISD::ADD,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1015     { ISD::ADD,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1016 
1017     { ISD::SUB,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1018     { ISD::SUB,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1019     { ISD::SUB,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1020   };
1021 
1022   if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1023     return LT.first * Entry->Cost;
1024 
1025   // It is not a good idea to vectorize division. We have to scalarize it and
1026   // in the process we will often end up having to spilling regular
1027   // registers. The overhead of division is going to dominate most kernels
1028   // anyways so try hard to prevent vectorization of division - it is
1029   // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1030   // to hide "20 cycles" for each lane.
1031   if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
1032                                ISD == ISD::UDIV || ISD == ISD::UREM)) {
1033     InstructionCost ScalarCost = getArithmeticInstrCost(
1034         Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1035         TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1036     return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1037   }
1038 
1039   // Fallback to the default implementation.
1040   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1041 }
1042 
1043 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1044                                            VectorType *BaseTp,
1045                                            ArrayRef<int> Mask, int Index,
1046                                            VectorType *SubTp) {
1047   // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1048   // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1049   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1050 
1051   Kind = improveShuffleKindFromMask(Kind, Mask);
1052   // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1053   if (Kind == TTI::SK_Transpose)
1054     Kind = TTI::SK_PermuteTwoSrc;
1055 
1056   // For Broadcasts we are splatting the first element from the first input
1057   // register, so only need to reference that input and all the output
1058   // registers are the same.
1059   if (Kind == TTI::SK_Broadcast)
1060     LT.first = 1;
1061 
1062   // Subvector extractions are free if they start at the beginning of a
1063   // vector and cheap if the subvectors are aligned.
1064   if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1065     int NumElts = LT.second.getVectorNumElements();
1066     if ((Index % NumElts) == 0)
1067       return 0;
1068     std::pair<InstructionCost, MVT> SubLT =
1069         TLI->getTypeLegalizationCost(DL, SubTp);
1070     if (SubLT.second.isVector()) {
1071       int NumSubElts = SubLT.second.getVectorNumElements();
1072       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1073         return SubLT.first;
1074       // Handle some cases for widening legalization. For now we only handle
1075       // cases where the original subvector was naturally aligned and evenly
1076       // fit in its legalized subvector type.
1077       // FIXME: Remove some of the alignment restrictions.
1078       // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1079       // vectors.
1080       int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1081       if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1082           (NumSubElts % OrigSubElts) == 0 &&
1083           LT.second.getVectorElementType() ==
1084               SubLT.second.getVectorElementType() &&
1085           LT.second.getVectorElementType().getSizeInBits() ==
1086               BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1087         assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1088                "Unexpected number of elements!");
1089         auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1090                                            LT.second.getVectorNumElements());
1091         auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1092                                            SubLT.second.getVectorNumElements());
1093         int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1094         InstructionCost ExtractCost = getShuffleCost(
1095             TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1096 
1097         // If the original size is 32-bits or more, we can use pshufd. Otherwise
1098         // if we have SSSE3 we can use pshufb.
1099         if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1100           return ExtractCost + 1; // pshufd or pshufb
1101 
1102         assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1103                "Unexpected vector size");
1104 
1105         return ExtractCost + 2; // worst case pshufhw + pshufd
1106       }
1107     }
1108   }
1109 
1110   // Subvector insertions are cheap if the subvectors are aligned.
1111   // Note that in general, the insertion starting at the beginning of a vector
1112   // isn't free, because we need to preserve the rest of the wide vector.
1113   if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1114     int NumElts = LT.second.getVectorNumElements();
1115     std::pair<InstructionCost, MVT> SubLT =
1116         TLI->getTypeLegalizationCost(DL, SubTp);
1117     if (SubLT.second.isVector()) {
1118       int NumSubElts = SubLT.second.getVectorNumElements();
1119       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1120         return SubLT.first;
1121     }
1122   }
1123 
1124   // Handle some common (illegal) sub-vector types as they are often very cheap
1125   // to shuffle even on targets without PSHUFB.
1126   EVT VT = TLI->getValueType(DL, BaseTp);
1127   if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1128       !ST->hasSSSE3()) {
1129      static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1130       {TTI::SK_Broadcast,        MVT::v4i16, 1}, // pshuflw
1131       {TTI::SK_Broadcast,        MVT::v2i16, 1}, // pshuflw
1132       {TTI::SK_Broadcast,        MVT::v8i8,  2}, // punpck/pshuflw
1133       {TTI::SK_Broadcast,        MVT::v4i8,  2}, // punpck/pshuflw
1134       {TTI::SK_Broadcast,        MVT::v2i8,  1}, // punpck
1135 
1136       {TTI::SK_Reverse,          MVT::v4i16, 1}, // pshuflw
1137       {TTI::SK_Reverse,          MVT::v2i16, 1}, // pshuflw
1138       {TTI::SK_Reverse,          MVT::v4i8,  3}, // punpck/pshuflw/packus
1139       {TTI::SK_Reverse,          MVT::v2i8,  1}, // punpck
1140 
1141       {TTI::SK_PermuteTwoSrc,    MVT::v4i16, 2}, // punpck/pshuflw
1142       {TTI::SK_PermuteTwoSrc,    MVT::v2i16, 2}, // punpck/pshuflw
1143       {TTI::SK_PermuteTwoSrc,    MVT::v8i8,  7}, // punpck/pshuflw
1144       {TTI::SK_PermuteTwoSrc,    MVT::v4i8,  4}, // punpck/pshuflw
1145       {TTI::SK_PermuteTwoSrc,    MVT::v2i8,  2}, // punpck
1146 
1147       {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1148       {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1149       {TTI::SK_PermuteSingleSrc, MVT::v8i8,  5}, // punpck/pshuflw
1150       {TTI::SK_PermuteSingleSrc, MVT::v4i8,  3}, // punpck/pshuflw
1151       {TTI::SK_PermuteSingleSrc, MVT::v2i8,  1}, // punpck
1152     };
1153 
1154     if (ST->hasSSE2())
1155       if (const auto *Entry =
1156               CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1157         return Entry->Cost;
1158   }
1159 
1160   // We are going to permute multiple sources and the result will be in multiple
1161   // destinations. Providing an accurate cost only for splits where the element
1162   // type remains the same.
1163   if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1164     MVT LegalVT = LT.second;
1165     if (LegalVT.isVector() &&
1166         LegalVT.getVectorElementType().getSizeInBits() ==
1167             BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1168         LegalVT.getVectorNumElements() <
1169             cast<FixedVectorType>(BaseTp)->getNumElements()) {
1170 
1171       unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1172       unsigned LegalVTSize = LegalVT.getStoreSize();
1173       // Number of source vectors after legalization:
1174       unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1175       // Number of destination vectors after legalization:
1176       InstructionCost NumOfDests = LT.first;
1177 
1178       auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1179                                               LegalVT.getVectorNumElements());
1180 
1181       InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1182       return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1183                                             None, 0, nullptr);
1184     }
1185 
1186     return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1187   }
1188 
1189   // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1190   if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1191     // We assume that source and destination have the same vector type.
1192     InstructionCost NumOfDests = LT.first;
1193     InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1194     LT.first = NumOfDests * NumOfShufflesPerDest;
1195   }
1196 
1197   static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1198       {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1199       {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1200 
1201       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1202       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1203 
1204       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1205       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1206       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2}  // vpermt2b
1207   };
1208 
1209   if (ST->hasVBMI())
1210     if (const auto *Entry =
1211             CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1212       return LT.first * Entry->Cost;
1213 
1214   static const CostTblEntry AVX512BWShuffleTbl[] = {
1215       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1216       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1217 
1218       {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1219       {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1220       {TTI::SK_Reverse, MVT::v64i8, 2},  // pshufb + vshufi64x2
1221 
1222       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1223       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1224       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8},  // extend to v32i16
1225 
1226       {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1227       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1228       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2},  // vpermt2w
1229       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1230 
1231       {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1232       {TTI::SK_Select, MVT::v64i8,  1}, // vblendmb
1233   };
1234 
1235   if (ST->hasBWI())
1236     if (const auto *Entry =
1237             CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1238       return LT.first * Entry->Cost;
1239 
1240   static const CostTblEntry AVX512ShuffleTbl[] = {
1241       {TTI::SK_Broadcast, MVT::v8f64, 1},  // vbroadcastpd
1242       {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1243       {TTI::SK_Broadcast, MVT::v8i64, 1},  // vpbroadcastq
1244       {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1245       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1246       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1247 
1248       {TTI::SK_Reverse, MVT::v8f64, 1},  // vpermpd
1249       {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1250       {TTI::SK_Reverse, MVT::v8i64, 1},  // vpermq
1251       {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1252       {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1253       {TTI::SK_Reverse, MVT::v64i8,  7}, // per mca
1254 
1255       {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1},  // vpermpd
1256       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1257       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1},  // vpermpd
1258       {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1259       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1260       {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1},  // vpermps
1261       {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1},  // vpermq
1262       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1263       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1},  // vpermq
1264       {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1265       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1266       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1},  // vpermd
1267       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1},  // pshufb
1268 
1269       {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1},  // vpermt2pd
1270       {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1271       {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1},  // vpermt2q
1272       {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1273       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1},  // vpermt2pd
1274       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1},  // vpermt2ps
1275       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1},  // vpermt2q
1276       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1},  // vpermt2d
1277       {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1},  // vpermt2pd
1278       {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1},  // vpermt2ps
1279       {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1},  // vpermt2q
1280       {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1},  // vpermt2d
1281 
1282       // FIXME: This just applies the type legalization cost rules above
1283       // assuming these completely split.
1284       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1285       {TTI::SK_PermuteSingleSrc, MVT::v64i8,  14},
1286       {TTI::SK_PermuteTwoSrc,    MVT::v32i16, 42},
1287       {TTI::SK_PermuteTwoSrc,    MVT::v64i8,  42},
1288 
1289       {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1290       {TTI::SK_Select, MVT::v64i8,  1}, // vpternlogq
1291       {TTI::SK_Select, MVT::v8f64,  1}, // vblendmpd
1292       {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1293       {TTI::SK_Select, MVT::v8i64,  1}, // vblendmq
1294       {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1295   };
1296 
1297   if (ST->hasAVX512())
1298     if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1299       return LT.first * Entry->Cost;
1300 
1301   static const CostTblEntry AVX2ShuffleTbl[] = {
1302       {TTI::SK_Broadcast, MVT::v4f64, 1},  // vbroadcastpd
1303       {TTI::SK_Broadcast, MVT::v8f32, 1},  // vbroadcastps
1304       {TTI::SK_Broadcast, MVT::v4i64, 1},  // vpbroadcastq
1305       {TTI::SK_Broadcast, MVT::v8i32, 1},  // vpbroadcastd
1306       {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1307       {TTI::SK_Broadcast, MVT::v32i8, 1},  // vpbroadcastb
1308 
1309       {TTI::SK_Reverse, MVT::v4f64, 1},  // vpermpd
1310       {TTI::SK_Reverse, MVT::v8f32, 1},  // vpermps
1311       {TTI::SK_Reverse, MVT::v4i64, 1},  // vpermq
1312       {TTI::SK_Reverse, MVT::v8i32, 1},  // vpermd
1313       {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1314       {TTI::SK_Reverse, MVT::v32i8, 2},  // vperm2i128 + pshufb
1315 
1316       {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1317       {TTI::SK_Select, MVT::v32i8, 1},  // vpblendvb
1318 
1319       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1320       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1321       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1322       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1323       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1324                                                   // + vpblendvb
1325       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vperm2i128 + 2*vpshufb
1326                                                   // + vpblendvb
1327 
1328       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},  // 2*vpermpd + vblendpd
1329       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3},  // 2*vpermps + vblendps
1330       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},  // 2*vpermq + vpblendd
1331       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3},  // 2*vpermd + vpblendd
1332       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1333                                                // + vpblendvb
1334       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7},  // 2*vperm2i128 + 4*vpshufb
1335                                                // + vpblendvb
1336   };
1337 
1338   if (ST->hasAVX2())
1339     if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1340       return LT.first * Entry->Cost;
1341 
1342   static const CostTblEntry XOPShuffleTbl[] = {
1343       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vpermil2pd
1344       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2},  // vperm2f128 + vpermil2ps
1345       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vpermil2pd
1346       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2},  // vperm2f128 + vpermil2ps
1347       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1348                                                   // + vinsertf128
1349       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vextractf128 + 2*vpperm
1350                                                   // + vinsertf128
1351 
1352       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1353                                                // + vinsertf128
1354       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1},  // vpperm
1355       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9},  // 2*vextractf128 + 6*vpperm
1356                                                // + vinsertf128
1357       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1},  // vpperm
1358   };
1359 
1360   if (ST->hasXOP())
1361     if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1362       return LT.first * Entry->Cost;
1363 
1364   static const CostTblEntry AVX1ShuffleTbl[] = {
1365       {TTI::SK_Broadcast, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1366       {TTI::SK_Broadcast, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1367       {TTI::SK_Broadcast, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1368       {TTI::SK_Broadcast, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1369       {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1370       {TTI::SK_Broadcast, MVT::v32i8, 2},  // vpshufb + vinsertf128
1371 
1372       {TTI::SK_Reverse, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1373       {TTI::SK_Reverse, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1374       {TTI::SK_Reverse, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1375       {TTI::SK_Reverse, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1376       {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1377                                          // + vinsertf128
1378       {TTI::SK_Reverse, MVT::v32i8, 4},  // vextractf128 + 2*pshufb
1379                                          // + vinsertf128
1380 
1381       {TTI::SK_Select, MVT::v4i64, 1},  // vblendpd
1382       {TTI::SK_Select, MVT::v4f64, 1},  // vblendpd
1383       {TTI::SK_Select, MVT::v8i32, 1},  // vblendps
1384       {TTI::SK_Select, MVT::v8f32, 1},  // vblendps
1385       {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1386       {TTI::SK_Select, MVT::v32i8, 3},  // vpand + vpandn + vpor
1387 
1388       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vshufpd
1389       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vshufpd
1390       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4},  // 2*vperm2f128 + 2*vshufps
1391       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4},  // 2*vperm2f128 + 2*vshufps
1392       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1393                                                   // + 2*por + vinsertf128
1394       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8},  // vextractf128 + 4*pshufb
1395                                                   // + 2*por + vinsertf128
1396 
1397       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},   // 2*vperm2f128 + vshufpd
1398       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},   // 2*vperm2f128 + vshufpd
1399       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4},   // 2*vperm2f128 + 2*vshufps
1400       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4},   // 2*vperm2f128 + 2*vshufps
1401       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1402                                                 // + 4*por + vinsertf128
1403       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15},  // 2*vextractf128 + 8*pshufb
1404                                                 // + 4*por + vinsertf128
1405   };
1406 
1407   if (ST->hasAVX())
1408     if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1409       return LT.first * Entry->Cost;
1410 
1411   static const CostTblEntry SSE41ShuffleTbl[] = {
1412       {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1413       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1414       {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1415       {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1416       {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1417       {TTI::SK_Select, MVT::v16i8, 1}  // pblendvb
1418   };
1419 
1420   if (ST->hasSSE41())
1421     if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1422       return LT.first * Entry->Cost;
1423 
1424   static const CostTblEntry SSSE3ShuffleTbl[] = {
1425       {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1426       {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1427 
1428       {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1429       {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1430 
1431       {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1432       {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1433 
1434       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1435       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1436 
1437       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1438       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1439   };
1440 
1441   if (ST->hasSSSE3())
1442     if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1443       return LT.first * Entry->Cost;
1444 
1445   static const CostTblEntry SSE2ShuffleTbl[] = {
1446       {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1447       {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1448       {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1449       {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1450       {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1451 
1452       {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1453       {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1454       {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1455       {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1456       {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1457                                         // + 2*pshufd + 2*unpck + packus
1458 
1459       {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1460       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1461       {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1462       {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1463       {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1464 
1465       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1466       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1467       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1468       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1469                                                   // + pshufd/unpck
1470     { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1471                                                   // + 2*pshufd + 2*unpck + 2*packus
1472 
1473     { TTI::SK_PermuteTwoSrc,    MVT::v2f64,  1 }, // shufpd
1474     { TTI::SK_PermuteTwoSrc,    MVT::v2i64,  1 }, // shufpd
1475     { TTI::SK_PermuteTwoSrc,    MVT::v4i32,  2 }, // 2*{unpck,movsd,pshufd}
1476     { TTI::SK_PermuteTwoSrc,    MVT::v8i16,  8 }, // blend+permute
1477     { TTI::SK_PermuteTwoSrc,    MVT::v16i8, 13 }, // blend+permute
1478   };
1479 
1480   if (ST->hasSSE2())
1481     if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1482       return LT.first * Entry->Cost;
1483 
1484   static const CostTblEntry SSE1ShuffleTbl[] = {
1485     { TTI::SK_Broadcast,        MVT::v4f32, 1 }, // shufps
1486     { TTI::SK_Reverse,          MVT::v4f32, 1 }, // shufps
1487     { TTI::SK_Select,           MVT::v4f32, 2 }, // 2*shufps
1488     { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1489     { TTI::SK_PermuteTwoSrc,    MVT::v4f32, 2 }, // 2*shufps
1490   };
1491 
1492   if (ST->hasSSE1())
1493     if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1494       return LT.first * Entry->Cost;
1495 
1496   return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1497 }
1498 
1499 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1500                                              Type *Src,
1501                                              TTI::CastContextHint CCH,
1502                                              TTI::TargetCostKind CostKind,
1503                                              const Instruction *I) {
1504   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1505   assert(ISD && "Invalid opcode");
1506 
1507   // TODO: Allow non-throughput costs that aren't binary.
1508   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1509     if (CostKind != TTI::TCK_RecipThroughput)
1510       return Cost == 0 ? 0 : 1;
1511     return Cost;
1512   };
1513 
1514   // FIXME: Need a better design of the cost table to handle non-simple types of
1515   // potential massive combinations (elem_num x src_type x dst_type).
1516 
1517   static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1518     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1519     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1520 
1521     // Mask sign extend has an instruction.
1522     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1523     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1524     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1525     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1526     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1527     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1528     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1529     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1530     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1531     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1532     { ISD::SIGN_EXTEND, MVT::v64i8,  MVT::v64i1, 1 },
1533 
1534     // Mask zero extend is a sext + shift.
1535     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1536     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1537     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1538     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1539     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1540     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1541     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1542     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1543     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1544     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1545     { ISD::ZERO_EXTEND, MVT::v64i8,  MVT::v64i1, 2 },
1546 
1547     { ISD::TRUNCATE,    MVT::v32i8,  MVT::v32i16, 2 },
1548     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // widen to zmm
1549     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // widen to zmm
1550     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // widen to zmm
1551     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // widen to zmm
1552     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // widen to zmm
1553     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // widen to zmm
1554     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // widen to zmm
1555     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // widen to zmm
1556     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // widen to zmm
1557     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // widen to zmm
1558     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i16, 2 },
1559     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v64i8,  2 },
1560   };
1561 
1562   static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1563     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1564     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1565 
1566     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1567     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1568 
1569     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f32,  1 },
1570     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f64,  1 },
1571 
1572     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f32,  1 },
1573     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f64,  1 },
1574   };
1575 
1576   // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1577   // 256-bit wide vectors.
1578 
1579   static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1580     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
1581     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
1582     { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
1583 
1584     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1585     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1586     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1587     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  3 }, // sext+vpslld+vptestmd
1588     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1589     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1590     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1591     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1592     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // zmm vpslld+vptestmd
1593     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // zmm vpslld+vptestmd
1594     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // zmm vpslld+vptestmd
1595     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i32, 2 }, // vpslld+vptestmd
1596     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // zmm vpsllq+vptestmq
1597     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // zmm vpsllq+vptestmq
1598     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i64,  2 }, // vpsllq+vptestmq
1599     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 2 },
1600     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 2 },
1601     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i64,  2 },
1602     { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  2 },
1603     { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 },
1604     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // zmm vpmovqd
1605     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1606 
1607     { ISD::TRUNCATE,  MVT::v16i8,  MVT::v16i16,  3 }, // extend to v16i32
1608     { ISD::TRUNCATE,  MVT::v32i8,  MVT::v32i16,  8 },
1609 
1610     // Sign extend is zmm vpternlogd+vptruncdb.
1611     // Zero extend is zmm broadcast load+vptruncdw.
1612     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   3 },
1613     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   4 },
1614     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   3 },
1615     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   4 },
1616     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   3 },
1617     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   4 },
1618     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  3 },
1619     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  4 },
1620 
1621     // Sign extend is zmm vpternlogd+vptruncdw.
1622     // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1623     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   3 },
1624     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1625     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   3 },
1626     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1627     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   3 },
1628     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1629     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  3 },
1630     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
1631 
1632     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // zmm vpternlogd
1633     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // zmm vpternlogd+psrld
1634     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // zmm vpternlogd
1635     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // zmm vpternlogd+psrld
1636     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // zmm vpternlogd
1637     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // zmm vpternlogd+psrld
1638     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // zmm vpternlogq
1639     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // zmm vpternlogq+psrlq
1640     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // zmm vpternlogq
1641     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // zmm vpternlogq+psrlq
1642 
1643     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  1 }, // vpternlogd
1644     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 }, // vpternlogd+psrld
1645     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i1,   1 }, // vpternlogq
1646     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i1,   2 }, // vpternlogq+psrlq
1647 
1648     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1649     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1650     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1651     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1652     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1653     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1654     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1655     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1656     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1657     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1658 
1659     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1660     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1661 
1662     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1663     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1664     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i8,   2 },
1665     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
1666     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1667     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
1668     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1669     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1670 
1671     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1672     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1673     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i8,   2 },
1674     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i8,  2 },
1675     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1676     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 2 },
1677     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1678     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1679     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64, 26 },
1680     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  5 },
1681 
1682     { ISD::FP_TO_SINT,  MVT::v8i8,   MVT::v8f64,  3 },
1683     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f64,  3 },
1684     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 3 },
1685     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 3 },
1686 
1687     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1688     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f64,  3 },
1689     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f64,  3 },
1690     { ISD::FP_TO_UINT,  MVT::v16i32, MVT::v16f32, 1 },
1691     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 3 },
1692     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v16f32, 3 },
1693   };
1694 
1695   static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1696     // Mask sign extend has an instruction.
1697     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1698     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1699     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1700     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1701     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1702     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1703     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1704     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1705     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1706 
1707     // Mask zero extend is a sext + shift.
1708     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1709     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1710     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1711     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1712     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1713     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1714     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1715     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1716     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1717 
1718     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 },
1719     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // vpsllw+vptestmb
1720     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // vpsllw+vptestmw
1721     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // vpsllw+vptestmb
1722     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // vpsllw+vptestmw
1723     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // vpsllw+vptestmb
1724     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // vpsllw+vptestmw
1725     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // vpsllw+vptestmb
1726     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // vpsllw+vptestmw
1727     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // vpsllw+vptestmb
1728   };
1729 
1730   static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1731     { ISD::SINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1732     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1733     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1734     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1735 
1736     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1737     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1738     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1739     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1740 
1741     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f32,  1 },
1742     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f32,  1 },
1743     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f64,  1 },
1744     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f64,  1 },
1745 
1746     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f32,  1 },
1747     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f32,  1 },
1748     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f64,  1 },
1749     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f64,  1 },
1750   };
1751 
1752   static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1753     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1754     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1755     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1756     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  8 }, // split+2*v8i8
1757     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1758     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1759     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1760     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 8 }, // split+2*v8i16
1761     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // vpslld+vptestmd
1762     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // vpslld+vptestmd
1763     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // vpslld+vptestmd
1764     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // vpsllq+vptestmq
1765     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // vpsllq+vptestmq
1766     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // vpmovqd
1767     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i64,  2 }, // vpmovqb
1768     { ISD::TRUNCATE,  MVT::v4i16,   MVT::v4i64,  2 }, // vpmovqw
1769     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i32,  2 }, // vpmovwb
1770 
1771     // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1772     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1773     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   5 },
1774     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   6 },
1775     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   5 },
1776     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   6 },
1777     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   5 },
1778     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   6 },
1779     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 10 },
1780     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 12 },
1781 
1782     // sign extend is vpcmpeq+maskedmove+vpmovdw
1783     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1784     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1785     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   5 },
1786     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1787     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   5 },
1788     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1789     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   5 },
1790     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1791     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1792 
1793     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // vpternlogd
1794     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // vpternlogd+psrld
1795     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // vpternlogd
1796     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // vpternlogd+psrld
1797     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // vpternlogd
1798     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // vpternlogd+psrld
1799     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // vpternlogq
1800     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // vpternlogq+psrlq
1801     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // vpternlogq
1802     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // vpternlogq+psrlq
1803 
1804     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i8,   2 },
1805     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i8,   2 },
1806     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i8,   2 },
1807     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i16,  5 },
1808     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i16,  2 },
1809     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
1810     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  2 },
1811     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  1 },
1812     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
1813     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
1814     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
1815     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  5 },
1816     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
1817     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  5 },
1818 
1819     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
1820     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
1821 
1822     { ISD::FP_TO_SINT,  MVT::v8i8,   MVT::v8f32,  3 },
1823     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f32,  3 },
1824 
1825     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    1 },
1826     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    1 },
1827 
1828     { ISD::FP_TO_UINT,  MVT::v2i32,  MVT::v2f32,  1 },
1829     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  1 },
1830     { ISD::FP_TO_UINT,  MVT::v2i32,  MVT::v2f64,  1 },
1831     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  1 },
1832     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  1 },
1833   };
1834 
1835   static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1836     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1837     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1838     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1839     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1840     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   1 },
1841     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   1 },
1842     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   1 },
1843     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   1 },
1844     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1845     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1846     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1847     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1848     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  1 },
1849     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  1 },
1850     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1851     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1852     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1853     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1854     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1855     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1856 
1857     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
1858     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
1859     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
1860 
1861     { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
1862     { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
1863 
1864     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  5 },
1865   };
1866 
1867   static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1868     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,  6 },
1869     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,  4 },
1870     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,  7 },
1871     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,  4 },
1872     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,  4 },
1873     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,  4 },
1874     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  4 },
1875     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  4 },
1876     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1877     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1878     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1879     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1880     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 4 },
1881     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
1882     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
1883     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 4 },
1884     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
1885     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 4 },
1886 
1887     { ISD::TRUNCATE,    MVT::v4i1,  MVT::v4i64,  4 },
1888     { ISD::TRUNCATE,    MVT::v8i1,  MVT::v8i32,  5 },
1889     { ISD::TRUNCATE,    MVT::v16i1, MVT::v16i16, 4 },
1890     { ISD::TRUNCATE,    MVT::v8i1,  MVT::v8i64,  9 },
1891     { ISD::TRUNCATE,    MVT::v16i1, MVT::v16i64, 11 },
1892 
1893     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i16, 4 },
1894     { ISD::TRUNCATE,    MVT::v8i8,  MVT::v8i32,  4 },
1895     { ISD::TRUNCATE,    MVT::v8i16, MVT::v8i32,  5 },
1896     { ISD::TRUNCATE,    MVT::v4i8,  MVT::v4i64,  4 },
1897     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i64,  4 },
1898     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64,  2 },
1899     { ISD::TRUNCATE,    MVT::v8i8,  MVT::v8i64, 11 },
1900     { ISD::TRUNCATE,    MVT::v8i16, MVT::v8i64,  9 },
1901     { ISD::TRUNCATE,    MVT::v8i32, MVT::v8i64,  3 },
1902     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i64, 11 },
1903 
1904     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1,  3 },
1905     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i1,  3 },
1906     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i1,  8 },
1907     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8,  3 },
1908     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i8,  3 },
1909     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i8,  8 },
1910     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 3 },
1911     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i16, 3 },
1912     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
1913     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
1914     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i32, 1 },
1915     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 1 },
1916 
1917     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1,  7 },
1918     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i1,  7 },
1919     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i1,  6 },
1920     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8,  2 },
1921     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i8,  2 },
1922     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i8,  5 },
1923     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
1924     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i16, 2 },
1925     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 5 },
1926     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 6 },
1927     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 6 },
1928     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i32, 6 },
1929     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 8 },
1930     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i64, 5 },
1931     { ISD::UINT_TO_FP,  MVT::v4f64, MVT::v4i64, 6 },
1932     // The generic code to compute the scalar overhead is currently broken.
1933     // Workaround this limitation by estimating the scalarization overhead
1934     // here. We have roughly 10 instructions per scalar element.
1935     // Multiply that by the vector width.
1936     // FIXME: remove that when PR19268 is fixed.
1937     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i64, 13 },
1938     { ISD::SINT_TO_FP,  MVT::v4f64, MVT::v4i64, 13 },
1939 
1940     { ISD::FP_TO_SINT,  MVT::v8i8,  MVT::v8f32, 4 },
1941     { ISD::FP_TO_SINT,  MVT::v4i8,  MVT::v4f64, 3 },
1942     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f64, 2 },
1943     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 3 },
1944 
1945     { ISD::FP_TO_UINT,  MVT::v4i8,  MVT::v4f64, 3 },
1946     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f64, 2 },
1947     { ISD::FP_TO_UINT,  MVT::v8i8,  MVT::v8f32, 4 },
1948     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 3 },
1949     { ISD::FP_TO_UINT,  MVT::v8i32, MVT::v8f32, 9 },
1950     // This node is expanded into scalarized operations but BasicTTI is overly
1951     // optimistic estimating its cost.  It computes 3 per element (one
1952     // vector-extract, one scalar conversion and one vector-insert).  The
1953     // problem is that the inserts form a read-modify-write chain so latency
1954     // should be factored in too.  Inflating the cost per element by 1.
1955     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f64, 4*4 },
1956 
1957     { ISD::FP_EXTEND,   MVT::v4f64,  MVT::v4f32,  1 },
1958     { ISD::FP_ROUND,    MVT::v4f32,  MVT::v4f64,  1 },
1959   };
1960 
1961   static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1962     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8,    2 },
1963     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8,    2 },
1964     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16,   2 },
1965     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16,   2 },
1966     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32,   2 },
1967     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32,   2 },
1968 
1969     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i8,   1 },
1970     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i8,   1 },
1971     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i8,   1 },
1972     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i8,   1 },
1973     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i8,   1 },
1974     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i8,   1 },
1975     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
1976     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
1977     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i8,   1 },
1978     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i8,   1 },
1979     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
1980     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
1981     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   2 },
1982     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   2 },
1983     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1984     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1985     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  4 },
1986     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  4 },
1987     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i16,  1 },
1988     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i16,  1 },
1989     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i16,  1 },
1990     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i16,  1 },
1991     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
1992     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
1993     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1994     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1995     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1996     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1997     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i32,  1 },
1998     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i32,  1 },
1999 
2000     // These truncates end up widening elements.
2001     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   1 }, // PMOVXZBQ
2002     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  1 }, // PMOVXZWQ
2003     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   1 }, // PMOVXZBD
2004 
2005     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  1 },
2006     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  1 },
2007     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  1 },
2008     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i32,  1 },
2009     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i32,  1 },
2010     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  3 },
2011     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  3 },
2012     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 6 },
2013     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i64,  1 }, // PSHUFB
2014 
2015     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    4 },
2016     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    4 },
2017 
2018     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f32,  3 },
2019     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f64,  3 },
2020 
2021     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f32,  3 },
2022     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f64,  3 },
2023     { ISD::FP_TO_UINT,  MVT::v4i16,  MVT::v4f32,  2 },
2024   };
2025 
2026   static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2027     // These are somewhat magic numbers justified by looking at the output of
2028     // Intel's IACA, running some kernels and making sure when we take
2029     // legalization into account the throughput will be overestimated.
2030     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
2031     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
2032     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
2033     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
2034     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2035     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 },
2036     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 },
2037     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
2038     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
2039 
2040     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
2041     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
2042     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
2043     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
2044     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
2045     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2046     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
2047     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
2048 
2049     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f32,  4 },
2050     { ISD::FP_TO_SINT,  MVT::v2i16,  MVT::v2f32,  2 },
2051     { ISD::FP_TO_SINT,  MVT::v4i8,   MVT::v4f32,  3 },
2052     { ISD::FP_TO_SINT,  MVT::v4i16,  MVT::v4f32,  2 },
2053     { ISD::FP_TO_SINT,  MVT::v2i16,  MVT::v2f64,  2 },
2054     { ISD::FP_TO_SINT,  MVT::v2i8,   MVT::v2f64,  4 },
2055 
2056     { ISD::FP_TO_SINT,  MVT::v2i32,  MVT::v2f64,  1 },
2057 
2058     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    6 },
2059     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    6 },
2060 
2061     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2062     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    4 },
2063     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f32,  4 },
2064     { ISD::FP_TO_UINT,  MVT::v2i8,   MVT::v2f64,  4 },
2065     { ISD::FP_TO_UINT,  MVT::v4i8,   MVT::v4f32,  3 },
2066     { ISD::FP_TO_UINT,  MVT::v2i16,  MVT::v2f32,  2 },
2067     { ISD::FP_TO_UINT,  MVT::v2i16,  MVT::v2f64,  2 },
2068     { ISD::FP_TO_UINT,  MVT::v4i16,  MVT::v4f32,  4 },
2069     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  8 },
2070 
2071     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i8,   1 },
2072     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i8,   6 },
2073     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i8,   2 },
2074     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i8,   3 },
2075     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i8,   4 },
2076     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i8,   8 },
2077     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i8,   1 },
2078     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i8,   2 },
2079     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,   6 },
2080     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,   6 },
2081     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2082     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  4 },
2083     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  9 },
2084     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  12 },
2085     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i16,  1 },
2086     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i16,  2 },
2087     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16,  3 },
2088     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16,  10 },
2089     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2090     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  4 },
2091     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
2092     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
2093     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2094     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  5 },
2095 
2096     // These truncates are really widening elements.
2097     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i32,  1 }, // PSHUFD
2098     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // PUNPCKLWD+DQ
2099     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   3 }, // PUNPCKLBW+WD+PSHUFD
2100     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  1 }, // PUNPCKLWD
2101     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // PUNPCKLBW+WD
2102     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   1 }, // PUNPCKLBW
2103 
2104     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  2 }, // PAND+PACKUSWB
2105     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  2 }, // PAND+PACKUSWB
2106     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  2 }, // PAND+PACKUSWB
2107     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 3 },
2108     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i32,  3 }, // PAND+2*PACKUSWB
2109     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i32,  1 },
2110     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i32,  3 },
2111     { ISD::TRUNCATE,    MVT::v4i16,  MVT::v4i32,  3 },
2112     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i32,  4 },
2113     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 7 },
2114     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2115     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 10 },
2116     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i64,  4 }, // PAND+3*PACKUSWB
2117     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i64,  2 }, // PSHUFD+PSHUFLW
2118     { ISD::TRUNCATE,    MVT::v2i32,  MVT::v2i64,  1 }, // PSHUFD
2119   };
2120 
2121   std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2122   std::pair<InstructionCost, MVT> LTDest =
2123       TLI->getTypeLegalizationCost(DL, Dst);
2124 
2125   if (ST->hasSSE41() && !ST->hasAVX())
2126     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2127                                                    LTDest.second, LTSrc.second))
2128       return AdjustCost(LTSrc.first * Entry->Cost);
2129 
2130   if (ST->hasSSE2() && !ST->hasAVX())
2131     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2132                                                    LTDest.second, LTSrc.second))
2133       return AdjustCost(LTSrc.first * Entry->Cost);
2134 
2135   EVT SrcTy = TLI->getValueType(DL, Src);
2136   EVT DstTy = TLI->getValueType(DL, Dst);
2137 
2138   // The function getSimpleVT only handles simple value types.
2139   if (!SrcTy.isSimple() || !DstTy.isSimple())
2140     return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind));
2141 
2142   MVT SimpleSrcTy = SrcTy.getSimpleVT();
2143   MVT SimpleDstTy = DstTy.getSimpleVT();
2144 
2145   if (ST->useAVX512Regs()) {
2146     if (ST->hasBWI())
2147       if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
2148                                                      SimpleDstTy, SimpleSrcTy))
2149         return AdjustCost(Entry->Cost);
2150 
2151     if (ST->hasDQI())
2152       if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
2153                                                      SimpleDstTy, SimpleSrcTy))
2154         return AdjustCost(Entry->Cost);
2155 
2156     if (ST->hasAVX512())
2157       if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
2158                                                      SimpleDstTy, SimpleSrcTy))
2159         return AdjustCost(Entry->Cost);
2160   }
2161 
2162   if (ST->hasBWI())
2163     if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2164                                                    SimpleDstTy, SimpleSrcTy))
2165       return AdjustCost(Entry->Cost);
2166 
2167   if (ST->hasDQI())
2168     if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2169                                                    SimpleDstTy, SimpleSrcTy))
2170       return AdjustCost(Entry->Cost);
2171 
2172   if (ST->hasAVX512())
2173     if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2174                                                    SimpleDstTy, SimpleSrcTy))
2175       return AdjustCost(Entry->Cost);
2176 
2177   if (ST->hasAVX2()) {
2178     if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2179                                                    SimpleDstTy, SimpleSrcTy))
2180       return AdjustCost(Entry->Cost);
2181   }
2182 
2183   if (ST->hasAVX()) {
2184     if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2185                                                    SimpleDstTy, SimpleSrcTy))
2186       return AdjustCost(Entry->Cost);
2187   }
2188 
2189   if (ST->hasSSE41()) {
2190     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2191                                                    SimpleDstTy, SimpleSrcTy))
2192       return AdjustCost(Entry->Cost);
2193   }
2194 
2195   if (ST->hasSSE2()) {
2196     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2197                                                    SimpleDstTy, SimpleSrcTy))
2198       return AdjustCost(Entry->Cost);
2199   }
2200 
2201   return AdjustCost(
2202       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2203 }
2204 
2205 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2206                                                Type *CondTy,
2207                                                CmpInst::Predicate VecPred,
2208                                                TTI::TargetCostKind CostKind,
2209                                                const Instruction *I) {
2210   // TODO: Handle other cost kinds.
2211   if (CostKind != TTI::TCK_RecipThroughput)
2212     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2213                                      I);
2214 
2215   // Legalize the type.
2216   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2217 
2218   MVT MTy = LT.second;
2219 
2220   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2221   assert(ISD && "Invalid opcode");
2222 
2223   unsigned ExtraCost = 0;
2224   if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2225     // Some vector comparison predicates cost extra instructions.
2226     if (MTy.isVector() &&
2227         !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2228           (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2229           ST->hasBWI())) {
2230       switch (cast<CmpInst>(I)->getPredicate()) {
2231       case CmpInst::Predicate::ICMP_NE:
2232         // xor(cmpeq(x,y),-1)
2233         ExtraCost = 1;
2234         break;
2235       case CmpInst::Predicate::ICMP_SGE:
2236       case CmpInst::Predicate::ICMP_SLE:
2237         // xor(cmpgt(x,y),-1)
2238         ExtraCost = 1;
2239         break;
2240       case CmpInst::Predicate::ICMP_ULT:
2241       case CmpInst::Predicate::ICMP_UGT:
2242         // cmpgt(xor(x,signbit),xor(y,signbit))
2243         // xor(cmpeq(pmaxu(x,y),x),-1)
2244         ExtraCost = 2;
2245         break;
2246       case CmpInst::Predicate::ICMP_ULE:
2247       case CmpInst::Predicate::ICMP_UGE:
2248         if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2249             (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2250           // cmpeq(psubus(x,y),0)
2251           // cmpeq(pminu(x,y),x)
2252           ExtraCost = 1;
2253         } else {
2254           // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2255           ExtraCost = 3;
2256         }
2257         break;
2258       default:
2259         break;
2260       }
2261     }
2262   }
2263 
2264   static const CostTblEntry SLMCostTbl[] = {
2265     // slm pcmpeq/pcmpgt throughput is 2
2266     { ISD::SETCC,   MVT::v2i64,   2 },
2267   };
2268 
2269   static const CostTblEntry AVX512BWCostTbl[] = {
2270     { ISD::SETCC,   MVT::v32i16,  1 },
2271     { ISD::SETCC,   MVT::v64i8,   1 },
2272 
2273     { ISD::SELECT,  MVT::v32i16,  1 },
2274     { ISD::SELECT,  MVT::v64i8,   1 },
2275   };
2276 
2277   static const CostTblEntry AVX512CostTbl[] = {
2278     { ISD::SETCC,   MVT::v8i64,   1 },
2279     { ISD::SETCC,   MVT::v16i32,  1 },
2280     { ISD::SETCC,   MVT::v8f64,   1 },
2281     { ISD::SETCC,   MVT::v16f32,  1 },
2282 
2283     { ISD::SELECT,  MVT::v8i64,   1 },
2284     { ISD::SELECT,  MVT::v16i32,  1 },
2285     { ISD::SELECT,  MVT::v8f64,   1 },
2286     { ISD::SELECT,  MVT::v16f32,  1 },
2287 
2288     { ISD::SETCC,   MVT::v32i16,  2 }, // FIXME: should probably be 4
2289     { ISD::SETCC,   MVT::v64i8,   2 }, // FIXME: should probably be 4
2290 
2291     { ISD::SELECT,  MVT::v32i16,  2 }, // FIXME: should be 3
2292     { ISD::SELECT,  MVT::v64i8,   2 }, // FIXME: should be 3
2293   };
2294 
2295   static const CostTblEntry AVX2CostTbl[] = {
2296     { ISD::SETCC,   MVT::v4i64,   1 },
2297     { ISD::SETCC,   MVT::v8i32,   1 },
2298     { ISD::SETCC,   MVT::v16i16,  1 },
2299     { ISD::SETCC,   MVT::v32i8,   1 },
2300 
2301     { ISD::SELECT,  MVT::v4i64,   1 }, // pblendvb
2302     { ISD::SELECT,  MVT::v8i32,   1 }, // pblendvb
2303     { ISD::SELECT,  MVT::v16i16,  1 }, // pblendvb
2304     { ISD::SELECT,  MVT::v32i8,   1 }, // pblendvb
2305   };
2306 
2307   static const CostTblEntry AVX1CostTbl[] = {
2308     { ISD::SETCC,   MVT::v4f64,   1 },
2309     { ISD::SETCC,   MVT::v8f32,   1 },
2310     // AVX1 does not support 8-wide integer compare.
2311     { ISD::SETCC,   MVT::v4i64,   4 },
2312     { ISD::SETCC,   MVT::v8i32,   4 },
2313     { ISD::SETCC,   MVT::v16i16,  4 },
2314     { ISD::SETCC,   MVT::v32i8,   4 },
2315 
2316     { ISD::SELECT,  MVT::v4f64,   1 }, // vblendvpd
2317     { ISD::SELECT,  MVT::v8f32,   1 }, // vblendvps
2318     { ISD::SELECT,  MVT::v4i64,   1 }, // vblendvpd
2319     { ISD::SELECT,  MVT::v8i32,   1 }, // vblendvps
2320     { ISD::SELECT,  MVT::v16i16,  3 }, // vandps + vandnps + vorps
2321     { ISD::SELECT,  MVT::v32i8,   3 }, // vandps + vandnps + vorps
2322   };
2323 
2324   static const CostTblEntry SSE42CostTbl[] = {
2325     { ISD::SETCC,   MVT::v2f64,   1 },
2326     { ISD::SETCC,   MVT::v4f32,   1 },
2327     { ISD::SETCC,   MVT::v2i64,   1 },
2328   };
2329 
2330   static const CostTblEntry SSE41CostTbl[] = {
2331     { ISD::SELECT,  MVT::v2f64,   1 }, // blendvpd
2332     { ISD::SELECT,  MVT::v4f32,   1 }, // blendvps
2333     { ISD::SELECT,  MVT::v2i64,   1 }, // pblendvb
2334     { ISD::SELECT,  MVT::v4i32,   1 }, // pblendvb
2335     { ISD::SELECT,  MVT::v8i16,   1 }, // pblendvb
2336     { ISD::SELECT,  MVT::v16i8,   1 }, // pblendvb
2337   };
2338 
2339   static const CostTblEntry SSE2CostTbl[] = {
2340     { ISD::SETCC,   MVT::v2f64,   2 },
2341     { ISD::SETCC,   MVT::f64,     1 },
2342     { ISD::SETCC,   MVT::v2i64,   8 },
2343     { ISD::SETCC,   MVT::v4i32,   1 },
2344     { ISD::SETCC,   MVT::v8i16,   1 },
2345     { ISD::SETCC,   MVT::v16i8,   1 },
2346 
2347     { ISD::SELECT,  MVT::v2f64,   3 }, // andpd + andnpd + orpd
2348     { ISD::SELECT,  MVT::v2i64,   3 }, // pand + pandn + por
2349     { ISD::SELECT,  MVT::v4i32,   3 }, // pand + pandn + por
2350     { ISD::SELECT,  MVT::v8i16,   3 }, // pand + pandn + por
2351     { ISD::SELECT,  MVT::v16i8,   3 }, // pand + pandn + por
2352   };
2353 
2354   static const CostTblEntry SSE1CostTbl[] = {
2355     { ISD::SETCC,   MVT::v4f32,   2 },
2356     { ISD::SETCC,   MVT::f32,     1 },
2357 
2358     { ISD::SELECT,  MVT::v4f32,   3 }, // andps + andnps + orps
2359   };
2360 
2361   if (ST->isSLM())
2362     if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2363       return LT.first * (ExtraCost + Entry->Cost);
2364 
2365   if (ST->hasBWI())
2366     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2367       return LT.first * (ExtraCost + Entry->Cost);
2368 
2369   if (ST->hasAVX512())
2370     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2371       return LT.first * (ExtraCost + Entry->Cost);
2372 
2373   if (ST->hasAVX2())
2374     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2375       return LT.first * (ExtraCost + Entry->Cost);
2376 
2377   if (ST->hasAVX())
2378     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2379       return LT.first * (ExtraCost + Entry->Cost);
2380 
2381   if (ST->hasSSE42())
2382     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2383       return LT.first * (ExtraCost + Entry->Cost);
2384 
2385   if (ST->hasSSE41())
2386     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2387       return LT.first * (ExtraCost + Entry->Cost);
2388 
2389   if (ST->hasSSE2())
2390     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2391       return LT.first * (ExtraCost + Entry->Cost);
2392 
2393   if (ST->hasSSE1())
2394     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2395       return LT.first * (ExtraCost + Entry->Cost);
2396 
2397   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2398 }
2399 
2400 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2401 
2402 InstructionCost
2403 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2404                                            TTI::TargetCostKind CostKind) {
2405 
2406   // Costs should match the codegen from:
2407   // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2408   // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2409   // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2410   // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2411   // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2412 
2413   // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2414   //       specialized in these tables yet.
2415   static const CostTblEntry AVX512CDCostTbl[] = {
2416     { ISD::CTLZ,       MVT::v8i64,   1 },
2417     { ISD::CTLZ,       MVT::v16i32,  1 },
2418     { ISD::CTLZ,       MVT::v32i16,  8 },
2419     { ISD::CTLZ,       MVT::v64i8,  20 },
2420     { ISD::CTLZ,       MVT::v4i64,   1 },
2421     { ISD::CTLZ,       MVT::v8i32,   1 },
2422     { ISD::CTLZ,       MVT::v16i16,  4 },
2423     { ISD::CTLZ,       MVT::v32i8,  10 },
2424     { ISD::CTLZ,       MVT::v2i64,   1 },
2425     { ISD::CTLZ,       MVT::v4i32,   1 },
2426     { ISD::CTLZ,       MVT::v8i16,   4 },
2427     { ISD::CTLZ,       MVT::v16i8,   4 },
2428   };
2429   static const CostTblEntry AVX512BWCostTbl[] = {
2430     { ISD::ABS,        MVT::v32i16,  1 },
2431     { ISD::ABS,        MVT::v64i8,   1 },
2432     { ISD::BITREVERSE, MVT::v8i64,   5 },
2433     { ISD::BITREVERSE, MVT::v16i32,  5 },
2434     { ISD::BITREVERSE, MVT::v32i16,  5 },
2435     { ISD::BITREVERSE, MVT::v64i8,   5 },
2436     { ISD::CTLZ,       MVT::v8i64,  23 },
2437     { ISD::CTLZ,       MVT::v16i32, 22 },
2438     { ISD::CTLZ,       MVT::v32i16, 18 },
2439     { ISD::CTLZ,       MVT::v64i8,  17 },
2440     { ISD::CTPOP,      MVT::v8i64,   7 },
2441     { ISD::CTPOP,      MVT::v16i32, 11 },
2442     { ISD::CTPOP,      MVT::v32i16,  9 },
2443     { ISD::CTPOP,      MVT::v64i8,   6 },
2444     { ISD::CTTZ,       MVT::v8i64,  10 },
2445     { ISD::CTTZ,       MVT::v16i32, 14 },
2446     { ISD::CTTZ,       MVT::v32i16, 12 },
2447     { ISD::CTTZ,       MVT::v64i8,   9 },
2448     { ISD::SADDSAT,    MVT::v32i16,  1 },
2449     { ISD::SADDSAT,    MVT::v64i8,   1 },
2450     { ISD::SMAX,       MVT::v32i16,  1 },
2451     { ISD::SMAX,       MVT::v64i8,   1 },
2452     { ISD::SMIN,       MVT::v32i16,  1 },
2453     { ISD::SMIN,       MVT::v64i8,   1 },
2454     { ISD::SSUBSAT,    MVT::v32i16,  1 },
2455     { ISD::SSUBSAT,    MVT::v64i8,   1 },
2456     { ISD::UADDSAT,    MVT::v32i16,  1 },
2457     { ISD::UADDSAT,    MVT::v64i8,   1 },
2458     { ISD::UMAX,       MVT::v32i16,  1 },
2459     { ISD::UMAX,       MVT::v64i8,   1 },
2460     { ISD::UMIN,       MVT::v32i16,  1 },
2461     { ISD::UMIN,       MVT::v64i8,   1 },
2462     { ISD::USUBSAT,    MVT::v32i16,  1 },
2463     { ISD::USUBSAT,    MVT::v64i8,   1 },
2464   };
2465   static const CostTblEntry AVX512CostTbl[] = {
2466     { ISD::ABS,        MVT::v8i64,   1 },
2467     { ISD::ABS,        MVT::v16i32,  1 },
2468     { ISD::ABS,        MVT::v32i16,  2 }, // FIXME: include split
2469     { ISD::ABS,        MVT::v64i8,   2 }, // FIXME: include split
2470     { ISD::ABS,        MVT::v4i64,   1 },
2471     { ISD::ABS,        MVT::v2i64,   1 },
2472     { ISD::BITREVERSE, MVT::v8i64,  36 },
2473     { ISD::BITREVERSE, MVT::v16i32, 24 },
2474     { ISD::BITREVERSE, MVT::v32i16, 10 },
2475     { ISD::BITREVERSE, MVT::v64i8,  10 },
2476     { ISD::CTLZ,       MVT::v8i64,  29 },
2477     { ISD::CTLZ,       MVT::v16i32, 35 },
2478     { ISD::CTLZ,       MVT::v32i16, 28 },
2479     { ISD::CTLZ,       MVT::v64i8,  18 },
2480     { ISD::CTPOP,      MVT::v8i64,  16 },
2481     { ISD::CTPOP,      MVT::v16i32, 24 },
2482     { ISD::CTPOP,      MVT::v32i16, 18 },
2483     { ISD::CTPOP,      MVT::v64i8,  12 },
2484     { ISD::CTTZ,       MVT::v8i64,  20 },
2485     { ISD::CTTZ,       MVT::v16i32, 28 },
2486     { ISD::CTTZ,       MVT::v32i16, 24 },
2487     { ISD::CTTZ,       MVT::v64i8,  18 },
2488     { ISD::SMAX,       MVT::v8i64,   1 },
2489     { ISD::SMAX,       MVT::v16i32,  1 },
2490     { ISD::SMAX,       MVT::v32i16,  2 }, // FIXME: include split
2491     { ISD::SMAX,       MVT::v64i8,   2 }, // FIXME: include split
2492     { ISD::SMAX,       MVT::v4i64,   1 },
2493     { ISD::SMAX,       MVT::v2i64,   1 },
2494     { ISD::SMIN,       MVT::v8i64,   1 },
2495     { ISD::SMIN,       MVT::v16i32,  1 },
2496     { ISD::SMIN,       MVT::v32i16,  2 }, // FIXME: include split
2497     { ISD::SMIN,       MVT::v64i8,   2 }, // FIXME: include split
2498     { ISD::SMIN,       MVT::v4i64,   1 },
2499     { ISD::SMIN,       MVT::v2i64,   1 },
2500     { ISD::UMAX,       MVT::v8i64,   1 },
2501     { ISD::UMAX,       MVT::v16i32,  1 },
2502     { ISD::UMAX,       MVT::v32i16,  2 }, // FIXME: include split
2503     { ISD::UMAX,       MVT::v64i8,   2 }, // FIXME: include split
2504     { ISD::UMAX,       MVT::v4i64,   1 },
2505     { ISD::UMAX,       MVT::v2i64,   1 },
2506     { ISD::UMIN,       MVT::v8i64,   1 },
2507     { ISD::UMIN,       MVT::v16i32,  1 },
2508     { ISD::UMIN,       MVT::v32i16,  2 }, // FIXME: include split
2509     { ISD::UMIN,       MVT::v64i8,   2 }, // FIXME: include split
2510     { ISD::UMIN,       MVT::v4i64,   1 },
2511     { ISD::UMIN,       MVT::v2i64,   1 },
2512     { ISD::USUBSAT,    MVT::v16i32,  2 }, // pmaxud + psubd
2513     { ISD::USUBSAT,    MVT::v2i64,   2 }, // pmaxuq + psubq
2514     { ISD::USUBSAT,    MVT::v4i64,   2 }, // pmaxuq + psubq
2515     { ISD::USUBSAT,    MVT::v8i64,   2 }, // pmaxuq + psubq
2516     { ISD::UADDSAT,    MVT::v16i32,  3 }, // not + pminud + paddd
2517     { ISD::UADDSAT,    MVT::v2i64,   3 }, // not + pminuq + paddq
2518     { ISD::UADDSAT,    MVT::v4i64,   3 }, // not + pminuq + paddq
2519     { ISD::UADDSAT,    MVT::v8i64,   3 }, // not + pminuq + paddq
2520     { ISD::SADDSAT,    MVT::v32i16,  2 }, // FIXME: include split
2521     { ISD::SADDSAT,    MVT::v64i8,   2 }, // FIXME: include split
2522     { ISD::SSUBSAT,    MVT::v32i16,  2 }, // FIXME: include split
2523     { ISD::SSUBSAT,    MVT::v64i8,   2 }, // FIXME: include split
2524     { ISD::UADDSAT,    MVT::v32i16,  2 }, // FIXME: include split
2525     { ISD::UADDSAT,    MVT::v64i8,   2 }, // FIXME: include split
2526     { ISD::USUBSAT,    MVT::v32i16,  2 }, // FIXME: include split
2527     { ISD::USUBSAT,    MVT::v64i8,   2 }, // FIXME: include split
2528     { ISD::FMAXNUM,    MVT::f32,     2 },
2529     { ISD::FMAXNUM,    MVT::v4f32,   2 },
2530     { ISD::FMAXNUM,    MVT::v8f32,   2 },
2531     { ISD::FMAXNUM,    MVT::v16f32,  2 },
2532     { ISD::FMAXNUM,    MVT::f64,     2 },
2533     { ISD::FMAXNUM,    MVT::v2f64,   2 },
2534     { ISD::FMAXNUM,    MVT::v4f64,   2 },
2535     { ISD::FMAXNUM,    MVT::v8f64,   2 },
2536   };
2537   static const CostTblEntry XOPCostTbl[] = {
2538     { ISD::BITREVERSE, MVT::v4i64,   4 },
2539     { ISD::BITREVERSE, MVT::v8i32,   4 },
2540     { ISD::BITREVERSE, MVT::v16i16,  4 },
2541     { ISD::BITREVERSE, MVT::v32i8,   4 },
2542     { ISD::BITREVERSE, MVT::v2i64,   1 },
2543     { ISD::BITREVERSE, MVT::v4i32,   1 },
2544     { ISD::BITREVERSE, MVT::v8i16,   1 },
2545     { ISD::BITREVERSE, MVT::v16i8,   1 },
2546     { ISD::BITREVERSE, MVT::i64,     3 },
2547     { ISD::BITREVERSE, MVT::i32,     3 },
2548     { ISD::BITREVERSE, MVT::i16,     3 },
2549     { ISD::BITREVERSE, MVT::i8,      3 }
2550   };
2551   static const CostTblEntry AVX2CostTbl[] = {
2552     { ISD::ABS,        MVT::v4i64,   2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2553     { ISD::ABS,        MVT::v8i32,   1 },
2554     { ISD::ABS,        MVT::v16i16,  1 },
2555     { ISD::ABS,        MVT::v32i8,   1 },
2556     { ISD::BITREVERSE, MVT::v4i64,   5 },
2557     { ISD::BITREVERSE, MVT::v8i32,   5 },
2558     { ISD::BITREVERSE, MVT::v16i16,  5 },
2559     { ISD::BITREVERSE, MVT::v32i8,   5 },
2560     { ISD::BSWAP,      MVT::v4i64,   1 },
2561     { ISD::BSWAP,      MVT::v8i32,   1 },
2562     { ISD::BSWAP,      MVT::v16i16,  1 },
2563     { ISD::CTLZ,       MVT::v4i64,  23 },
2564     { ISD::CTLZ,       MVT::v8i32,  18 },
2565     { ISD::CTLZ,       MVT::v16i16, 14 },
2566     { ISD::CTLZ,       MVT::v32i8,   9 },
2567     { ISD::CTPOP,      MVT::v4i64,   7 },
2568     { ISD::CTPOP,      MVT::v8i32,  11 },
2569     { ISD::CTPOP,      MVT::v16i16,  9 },
2570     { ISD::CTPOP,      MVT::v32i8,   6 },
2571     { ISD::CTTZ,       MVT::v4i64,  10 },
2572     { ISD::CTTZ,       MVT::v8i32,  14 },
2573     { ISD::CTTZ,       MVT::v16i16, 12 },
2574     { ISD::CTTZ,       MVT::v32i8,   9 },
2575     { ISD::SADDSAT,    MVT::v16i16,  1 },
2576     { ISD::SADDSAT,    MVT::v32i8,   1 },
2577     { ISD::SMAX,       MVT::v8i32,   1 },
2578     { ISD::SMAX,       MVT::v16i16,  1 },
2579     { ISD::SMAX,       MVT::v32i8,   1 },
2580     { ISD::SMIN,       MVT::v8i32,   1 },
2581     { ISD::SMIN,       MVT::v16i16,  1 },
2582     { ISD::SMIN,       MVT::v32i8,   1 },
2583     { ISD::SSUBSAT,    MVT::v16i16,  1 },
2584     { ISD::SSUBSAT,    MVT::v32i8,   1 },
2585     { ISD::UADDSAT,    MVT::v16i16,  1 },
2586     { ISD::UADDSAT,    MVT::v32i8,   1 },
2587     { ISD::UADDSAT,    MVT::v8i32,   3 }, // not + pminud + paddd
2588     { ISD::UMAX,       MVT::v8i32,   1 },
2589     { ISD::UMAX,       MVT::v16i16,  1 },
2590     { ISD::UMAX,       MVT::v32i8,   1 },
2591     { ISD::UMIN,       MVT::v8i32,   1 },
2592     { ISD::UMIN,       MVT::v16i16,  1 },
2593     { ISD::UMIN,       MVT::v32i8,   1 },
2594     { ISD::USUBSAT,    MVT::v16i16,  1 },
2595     { ISD::USUBSAT,    MVT::v32i8,   1 },
2596     { ISD::USUBSAT,    MVT::v8i32,   2 }, // pmaxud + psubd
2597     { ISD::FMAXNUM,    MVT::v8f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2598     { ISD::FMAXNUM,    MVT::v4f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2599     { ISD::FSQRT,      MVT::f32,     7 }, // Haswell from http://www.agner.org/
2600     { ISD::FSQRT,      MVT::v4f32,   7 }, // Haswell from http://www.agner.org/
2601     { ISD::FSQRT,      MVT::v8f32,  14 }, // Haswell from http://www.agner.org/
2602     { ISD::FSQRT,      MVT::f64,    14 }, // Haswell from http://www.agner.org/
2603     { ISD::FSQRT,      MVT::v2f64,  14 }, // Haswell from http://www.agner.org/
2604     { ISD::FSQRT,      MVT::v4f64,  28 }, // Haswell from http://www.agner.org/
2605   };
2606   static const CostTblEntry AVX1CostTbl[] = {
2607     { ISD::ABS,        MVT::v4i64,   5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2608     { ISD::ABS,        MVT::v8i32,   3 },
2609     { ISD::ABS,        MVT::v16i16,  3 },
2610     { ISD::ABS,        MVT::v32i8,   3 },
2611     { ISD::BITREVERSE, MVT::v4i64,  12 }, // 2 x 128-bit Op + extract/insert
2612     { ISD::BITREVERSE, MVT::v8i32,  12 }, // 2 x 128-bit Op + extract/insert
2613     { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2614     { ISD::BITREVERSE, MVT::v32i8,  12 }, // 2 x 128-bit Op + extract/insert
2615     { ISD::BSWAP,      MVT::v4i64,   4 },
2616     { ISD::BSWAP,      MVT::v8i32,   4 },
2617     { ISD::BSWAP,      MVT::v16i16,  4 },
2618     { ISD::CTLZ,       MVT::v4i64,  48 }, // 2 x 128-bit Op + extract/insert
2619     { ISD::CTLZ,       MVT::v8i32,  38 }, // 2 x 128-bit Op + extract/insert
2620     { ISD::CTLZ,       MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2621     { ISD::CTLZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2622     { ISD::CTPOP,      MVT::v4i64,  16 }, // 2 x 128-bit Op + extract/insert
2623     { ISD::CTPOP,      MVT::v8i32,  24 }, // 2 x 128-bit Op + extract/insert
2624     { ISD::CTPOP,      MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2625     { ISD::CTPOP,      MVT::v32i8,  14 }, // 2 x 128-bit Op + extract/insert
2626     { ISD::CTTZ,       MVT::v4i64,  22 }, // 2 x 128-bit Op + extract/insert
2627     { ISD::CTTZ,       MVT::v8i32,  30 }, // 2 x 128-bit Op + extract/insert
2628     { ISD::CTTZ,       MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2629     { ISD::CTTZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2630     { ISD::SADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2631     { ISD::SADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2632     { ISD::SMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2633     { ISD::SMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2634     { ISD::SMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2635     { ISD::SMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2636     { ISD::SMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2637     { ISD::SMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2638     { ISD::SSUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2639     { ISD::SSUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2640     { ISD::UADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2641     { ISD::UADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2642     { ISD::UADDSAT,    MVT::v8i32,   8 }, // 2 x 128-bit Op + extract/insert
2643     { ISD::UMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2644     { ISD::UMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2645     { ISD::UMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2646     { ISD::UMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2647     { ISD::UMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2648     { ISD::UMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2649     { ISD::USUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2650     { ISD::USUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2651     { ISD::USUBSAT,    MVT::v8i32,   6 }, // 2 x 128-bit Op + extract/insert
2652     { ISD::FMAXNUM,    MVT::f32,     3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2653     { ISD::FMAXNUM,    MVT::v4f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2654     { ISD::FMAXNUM,    MVT::v8f32,   5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2655     { ISD::FMAXNUM,    MVT::f64,     3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2656     { ISD::FMAXNUM,    MVT::v2f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2657     { ISD::FMAXNUM,    MVT::v4f64,   5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2658     { ISD::FSQRT,      MVT::f32,    14 }, // SNB from http://www.agner.org/
2659     { ISD::FSQRT,      MVT::v4f32,  14 }, // SNB from http://www.agner.org/
2660     { ISD::FSQRT,      MVT::v8f32,  28 }, // SNB from http://www.agner.org/
2661     { ISD::FSQRT,      MVT::f64,    21 }, // SNB from http://www.agner.org/
2662     { ISD::FSQRT,      MVT::v2f64,  21 }, // SNB from http://www.agner.org/
2663     { ISD::FSQRT,      MVT::v4f64,  43 }, // SNB from http://www.agner.org/
2664   };
2665   static const CostTblEntry GLMCostTbl[] = {
2666     { ISD::FSQRT, MVT::f32,   19 }, // sqrtss
2667     { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2668     { ISD::FSQRT, MVT::f64,   34 }, // sqrtsd
2669     { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2670   };
2671   static const CostTblEntry SLMCostTbl[] = {
2672     { ISD::FSQRT, MVT::f32,   20 }, // sqrtss
2673     { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2674     { ISD::FSQRT, MVT::f64,   35 }, // sqrtsd
2675     { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2676   };
2677   static const CostTblEntry SSE42CostTbl[] = {
2678     { ISD::USUBSAT,    MVT::v4i32,   2 }, // pmaxud + psubd
2679     { ISD::UADDSAT,    MVT::v4i32,   3 }, // not + pminud + paddd
2680     { ISD::FSQRT,      MVT::f32,    18 }, // Nehalem from http://www.agner.org/
2681     { ISD::FSQRT,      MVT::v4f32,  18 }, // Nehalem from http://www.agner.org/
2682   };
2683   static const CostTblEntry SSE41CostTbl[] = {
2684     { ISD::ABS,        MVT::v2i64,   2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2685     { ISD::SMAX,       MVT::v4i32,   1 },
2686     { ISD::SMAX,       MVT::v16i8,   1 },
2687     { ISD::SMIN,       MVT::v4i32,   1 },
2688     { ISD::SMIN,       MVT::v16i8,   1 },
2689     { ISD::UMAX,       MVT::v4i32,   1 },
2690     { ISD::UMAX,       MVT::v8i16,   1 },
2691     { ISD::UMIN,       MVT::v4i32,   1 },
2692     { ISD::UMIN,       MVT::v8i16,   1 },
2693   };
2694   static const CostTblEntry SSSE3CostTbl[] = {
2695     { ISD::ABS,        MVT::v4i32,   1 },
2696     { ISD::ABS,        MVT::v8i16,   1 },
2697     { ISD::ABS,        MVT::v16i8,   1 },
2698     { ISD::BITREVERSE, MVT::v2i64,   5 },
2699     { ISD::BITREVERSE, MVT::v4i32,   5 },
2700     { ISD::BITREVERSE, MVT::v8i16,   5 },
2701     { ISD::BITREVERSE, MVT::v16i8,   5 },
2702     { ISD::BSWAP,      MVT::v2i64,   1 },
2703     { ISD::BSWAP,      MVT::v4i32,   1 },
2704     { ISD::BSWAP,      MVT::v8i16,   1 },
2705     { ISD::CTLZ,       MVT::v2i64,  23 },
2706     { ISD::CTLZ,       MVT::v4i32,  18 },
2707     { ISD::CTLZ,       MVT::v8i16,  14 },
2708     { ISD::CTLZ,       MVT::v16i8,   9 },
2709     { ISD::CTPOP,      MVT::v2i64,   7 },
2710     { ISD::CTPOP,      MVT::v4i32,  11 },
2711     { ISD::CTPOP,      MVT::v8i16,   9 },
2712     { ISD::CTPOP,      MVT::v16i8,   6 },
2713     { ISD::CTTZ,       MVT::v2i64,  10 },
2714     { ISD::CTTZ,       MVT::v4i32,  14 },
2715     { ISD::CTTZ,       MVT::v8i16,  12 },
2716     { ISD::CTTZ,       MVT::v16i8,   9 }
2717   };
2718   static const CostTblEntry SSE2CostTbl[] = {
2719     { ISD::ABS,        MVT::v2i64,   4 },
2720     { ISD::ABS,        MVT::v4i32,   3 },
2721     { ISD::ABS,        MVT::v8i16,   2 },
2722     { ISD::ABS,        MVT::v16i8,   2 },
2723     { ISD::BITREVERSE, MVT::v2i64,  29 },
2724     { ISD::BITREVERSE, MVT::v4i32,  27 },
2725     { ISD::BITREVERSE, MVT::v8i16,  27 },
2726     { ISD::BITREVERSE, MVT::v16i8,  20 },
2727     { ISD::BSWAP,      MVT::v2i64,   7 },
2728     { ISD::BSWAP,      MVT::v4i32,   7 },
2729     { ISD::BSWAP,      MVT::v8i16,   7 },
2730     { ISD::CTLZ,       MVT::v2i64,  25 },
2731     { ISD::CTLZ,       MVT::v4i32,  26 },
2732     { ISD::CTLZ,       MVT::v8i16,  20 },
2733     { ISD::CTLZ,       MVT::v16i8,  17 },
2734     { ISD::CTPOP,      MVT::v2i64,  12 },
2735     { ISD::CTPOP,      MVT::v4i32,  15 },
2736     { ISD::CTPOP,      MVT::v8i16,  13 },
2737     { ISD::CTPOP,      MVT::v16i8,  10 },
2738     { ISD::CTTZ,       MVT::v2i64,  14 },
2739     { ISD::CTTZ,       MVT::v4i32,  18 },
2740     { ISD::CTTZ,       MVT::v8i16,  16 },
2741     { ISD::CTTZ,       MVT::v16i8,  13 },
2742     { ISD::SADDSAT,    MVT::v8i16,   1 },
2743     { ISD::SADDSAT,    MVT::v16i8,   1 },
2744     { ISD::SMAX,       MVT::v8i16,   1 },
2745     { ISD::SMIN,       MVT::v8i16,   1 },
2746     { ISD::SSUBSAT,    MVT::v8i16,   1 },
2747     { ISD::SSUBSAT,    MVT::v16i8,   1 },
2748     { ISD::UADDSAT,    MVT::v8i16,   1 },
2749     { ISD::UADDSAT,    MVT::v16i8,   1 },
2750     { ISD::UMAX,       MVT::v8i16,   2 },
2751     { ISD::UMAX,       MVT::v16i8,   1 },
2752     { ISD::UMIN,       MVT::v8i16,   2 },
2753     { ISD::UMIN,       MVT::v16i8,   1 },
2754     { ISD::USUBSAT,    MVT::v8i16,   1 },
2755     { ISD::USUBSAT,    MVT::v16i8,   1 },
2756     { ISD::FMAXNUM,    MVT::f64,     4 },
2757     { ISD::FMAXNUM,    MVT::v2f64,   4 },
2758     { ISD::FSQRT,      MVT::f64,    32 }, // Nehalem from http://www.agner.org/
2759     { ISD::FSQRT,      MVT::v2f64,  32 }, // Nehalem from http://www.agner.org/
2760   };
2761   static const CostTblEntry SSE1CostTbl[] = {
2762     { ISD::FMAXNUM,    MVT::f32,     4 },
2763     { ISD::FMAXNUM,    MVT::v4f32,   4 },
2764     { ISD::FSQRT,      MVT::f32,    28 }, // Pentium III from http://www.agner.org/
2765     { ISD::FSQRT,      MVT::v4f32,  56 }, // Pentium III from http://www.agner.org/
2766   };
2767   static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
2768     { ISD::CTTZ,       MVT::i64,     1 },
2769   };
2770   static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
2771     { ISD::CTTZ,       MVT::i32,     1 },
2772     { ISD::CTTZ,       MVT::i16,     1 },
2773     { ISD::CTTZ,       MVT::i8,      1 },
2774   };
2775   static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
2776     { ISD::CTLZ,       MVT::i64,     1 },
2777   };
2778   static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
2779     { ISD::CTLZ,       MVT::i32,     1 },
2780     { ISD::CTLZ,       MVT::i16,     1 },
2781     { ISD::CTLZ,       MVT::i8,      1 },
2782   };
2783   static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
2784     { ISD::CTPOP,      MVT::i64,     1 },
2785   };
2786   static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
2787     { ISD::CTPOP,      MVT::i32,     1 },
2788     { ISD::CTPOP,      MVT::i16,     1 },
2789     { ISD::CTPOP,      MVT::i8,      1 },
2790   };
2791   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2792     { ISD::ABS,        MVT::i64,     2 }, // SUB+CMOV
2793     { ISD::BITREVERSE, MVT::i64,    14 },
2794     { ISD::BSWAP,      MVT::i64,     1 },
2795     { ISD::CTLZ,       MVT::i64,     4 }, // BSR+XOR or BSR+XOR+CMOV
2796     { ISD::CTTZ,       MVT::i64,     3 }, // TEST+BSF+CMOV/BRANCH
2797     { ISD::CTPOP,      MVT::i64,    10 },
2798     { ISD::SADDO,      MVT::i64,     1 },
2799     { ISD::UADDO,      MVT::i64,     1 },
2800     { ISD::UMULO,      MVT::i64,     2 }, // mulq + seto
2801   };
2802   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2803     { ISD::ABS,        MVT::i32,     2 }, // SUB+CMOV
2804     { ISD::ABS,        MVT::i16,     2 }, // SUB+CMOV
2805     { ISD::BITREVERSE, MVT::i32,    14 },
2806     { ISD::BITREVERSE, MVT::i16,    14 },
2807     { ISD::BITREVERSE, MVT::i8,     11 },
2808     { ISD::BSWAP,      MVT::i32,     1 },
2809     { ISD::BSWAP,      MVT::i16,     1 }, // ROL
2810     { ISD::CTLZ,       MVT::i32,     4 }, // BSR+XOR or BSR+XOR+CMOV
2811     { ISD::CTLZ,       MVT::i16,     4 }, // BSR+XOR or BSR+XOR+CMOV
2812     { ISD::CTLZ,       MVT::i8,      4 }, // BSR+XOR or BSR+XOR+CMOV
2813     { ISD::CTTZ,       MVT::i32,     3 }, // TEST+BSF+CMOV/BRANCH
2814     { ISD::CTTZ,       MVT::i16,     3 }, // TEST+BSF+CMOV/BRANCH
2815     { ISD::CTTZ,       MVT::i8,      3 }, // TEST+BSF+CMOV/BRANCH
2816     { ISD::CTPOP,      MVT::i32,     8 },
2817     { ISD::CTPOP,      MVT::i16,     9 },
2818     { ISD::CTPOP,      MVT::i8,      7 },
2819     { ISD::SADDO,      MVT::i32,     1 },
2820     { ISD::SADDO,      MVT::i16,     1 },
2821     { ISD::SADDO,      MVT::i8,      1 },
2822     { ISD::UADDO,      MVT::i32,     1 },
2823     { ISD::UADDO,      MVT::i16,     1 },
2824     { ISD::UADDO,      MVT::i8,      1 },
2825     { ISD::UMULO,      MVT::i32,     2 }, // mul + seto
2826     { ISD::UMULO,      MVT::i16,     2 },
2827     { ISD::UMULO,      MVT::i8,      2 },
2828   };
2829 
2830   Type *RetTy = ICA.getReturnType();
2831   Type *OpTy = RetTy;
2832   Intrinsic::ID IID = ICA.getID();
2833   unsigned ISD = ISD::DELETED_NODE;
2834   switch (IID) {
2835   default:
2836     break;
2837   case Intrinsic::abs:
2838     ISD = ISD::ABS;
2839     break;
2840   case Intrinsic::bitreverse:
2841     ISD = ISD::BITREVERSE;
2842     break;
2843   case Intrinsic::bswap:
2844     ISD = ISD::BSWAP;
2845     break;
2846   case Intrinsic::ctlz:
2847     ISD = ISD::CTLZ;
2848     break;
2849   case Intrinsic::ctpop:
2850     ISD = ISD::CTPOP;
2851     break;
2852   case Intrinsic::cttz:
2853     ISD = ISD::CTTZ;
2854     break;
2855   case Intrinsic::maxnum:
2856   case Intrinsic::minnum:
2857     // FMINNUM has same costs so don't duplicate.
2858     ISD = ISD::FMAXNUM;
2859     break;
2860   case Intrinsic::sadd_sat:
2861     ISD = ISD::SADDSAT;
2862     break;
2863   case Intrinsic::smax:
2864     ISD = ISD::SMAX;
2865     break;
2866   case Intrinsic::smin:
2867     ISD = ISD::SMIN;
2868     break;
2869   case Intrinsic::ssub_sat:
2870     ISD = ISD::SSUBSAT;
2871     break;
2872   case Intrinsic::uadd_sat:
2873     ISD = ISD::UADDSAT;
2874     break;
2875   case Intrinsic::umax:
2876     ISD = ISD::UMAX;
2877     break;
2878   case Intrinsic::umin:
2879     ISD = ISD::UMIN;
2880     break;
2881   case Intrinsic::usub_sat:
2882     ISD = ISD::USUBSAT;
2883     break;
2884   case Intrinsic::sqrt:
2885     ISD = ISD::FSQRT;
2886     break;
2887   case Intrinsic::sadd_with_overflow:
2888   case Intrinsic::ssub_with_overflow:
2889     // SSUBO has same costs so don't duplicate.
2890     ISD = ISD::SADDO;
2891     OpTy = RetTy->getContainedType(0);
2892     break;
2893   case Intrinsic::uadd_with_overflow:
2894   case Intrinsic::usub_with_overflow:
2895     // USUBO has same costs so don't duplicate.
2896     ISD = ISD::UADDO;
2897     OpTy = RetTy->getContainedType(0);
2898     break;
2899   case Intrinsic::umul_with_overflow:
2900   case Intrinsic::smul_with_overflow:
2901     // SMULO has same costs so don't duplicate.
2902     ISD = ISD::UMULO;
2903     OpTy = RetTy->getContainedType(0);
2904     break;
2905   }
2906 
2907   if (ISD != ISD::DELETED_NODE) {
2908     // Legalize the type.
2909     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2910     MVT MTy = LT.second;
2911 
2912     // Attempt to lookup cost.
2913     if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
2914         MTy.isVector()) {
2915       // With PSHUFB the code is very similar for all types. If we have integer
2916       // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
2917       // we also need a PSHUFB.
2918       unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
2919 
2920       // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
2921       // instructions. We also need an extract and an insert.
2922       if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
2923             (ST->hasBWI() && MTy.is512BitVector())))
2924         Cost = Cost * 2 + 2;
2925 
2926       return LT.first * Cost;
2927     }
2928 
2929     auto adjustTableCost = [](const CostTblEntry &Entry,
2930                               InstructionCost LegalizationCost,
2931                               FastMathFlags FMF) {
2932       // If there are no NANs to deal with, then these are reduced to a
2933       // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
2934       // assume is used in the non-fast case.
2935       if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
2936         if (FMF.noNaNs())
2937           return LegalizationCost * 1;
2938       }
2939       return LegalizationCost * (int)Entry.Cost;
2940     };
2941 
2942     if (ST->useGLMDivSqrtCosts())
2943       if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2944         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2945 
2946     if (ST->isSLM())
2947       if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2948         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2949 
2950     if (ST->hasCDI())
2951       if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2952         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2953 
2954     if (ST->hasBWI())
2955       if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2956         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2957 
2958     if (ST->hasAVX512())
2959       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2960         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2961 
2962     if (ST->hasXOP())
2963       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2964         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2965 
2966     if (ST->hasAVX2())
2967       if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2968         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2969 
2970     if (ST->hasAVX())
2971       if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2972         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2973 
2974     if (ST->hasSSE42())
2975       if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2976         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2977 
2978     if (ST->hasSSE41())
2979       if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2980         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2981 
2982     if (ST->hasSSSE3())
2983       if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2984         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2985 
2986     if (ST->hasSSE2())
2987       if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2988         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2989 
2990     if (ST->hasSSE1())
2991       if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2992         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2993 
2994     if (ST->hasBMI()) {
2995       if (ST->is64Bit())
2996         if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
2997           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
2998 
2999       if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
3000         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3001     }
3002 
3003     if (ST->hasLZCNT()) {
3004       if (ST->is64Bit())
3005         if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
3006           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3007 
3008       if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
3009         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3010     }
3011 
3012     if (ST->hasPOPCNT()) {
3013       if (ST->is64Bit())
3014         if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
3015           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3016 
3017       if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
3018         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3019     }
3020 
3021     if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
3022       if (const Instruction *II = ICA.getInst()) {
3023         if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
3024           return TTI::TCC_Free;
3025         if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
3026           if (LI->hasOneUse())
3027             return TTI::TCC_Free;
3028         }
3029       }
3030     }
3031 
3032     // TODO - add BMI (TZCNT) scalar handling
3033 
3034     if (ST->is64Bit())
3035       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3036         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3037 
3038     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3039       return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3040   }
3041 
3042   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3043 }
3044 
3045 InstructionCost
3046 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3047                                   TTI::TargetCostKind CostKind) {
3048   if (ICA.isTypeBasedOnly())
3049     return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3050 
3051   static const CostTblEntry AVX512CostTbl[] = {
3052     { ISD::ROTL,       MVT::v8i64,   1 },
3053     { ISD::ROTL,       MVT::v4i64,   1 },
3054     { ISD::ROTL,       MVT::v2i64,   1 },
3055     { ISD::ROTL,       MVT::v16i32,  1 },
3056     { ISD::ROTL,       MVT::v8i32,   1 },
3057     { ISD::ROTL,       MVT::v4i32,   1 },
3058     { ISD::ROTR,       MVT::v8i64,   1 },
3059     { ISD::ROTR,       MVT::v4i64,   1 },
3060     { ISD::ROTR,       MVT::v2i64,   1 },
3061     { ISD::ROTR,       MVT::v16i32,  1 },
3062     { ISD::ROTR,       MVT::v8i32,   1 },
3063     { ISD::ROTR,       MVT::v4i32,   1 }
3064   };
3065   // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3066   static const CostTblEntry XOPCostTbl[] = {
3067     { ISD::ROTL,       MVT::v4i64,   4 },
3068     { ISD::ROTL,       MVT::v8i32,   4 },
3069     { ISD::ROTL,       MVT::v16i16,  4 },
3070     { ISD::ROTL,       MVT::v32i8,   4 },
3071     { ISD::ROTL,       MVT::v2i64,   1 },
3072     { ISD::ROTL,       MVT::v4i32,   1 },
3073     { ISD::ROTL,       MVT::v8i16,   1 },
3074     { ISD::ROTL,       MVT::v16i8,   1 },
3075     { ISD::ROTR,       MVT::v4i64,   6 },
3076     { ISD::ROTR,       MVT::v8i32,   6 },
3077     { ISD::ROTR,       MVT::v16i16,  6 },
3078     { ISD::ROTR,       MVT::v32i8,   6 },
3079     { ISD::ROTR,       MVT::v2i64,   2 },
3080     { ISD::ROTR,       MVT::v4i32,   2 },
3081     { ISD::ROTR,       MVT::v8i16,   2 },
3082     { ISD::ROTR,       MVT::v16i8,   2 }
3083   };
3084   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3085     { ISD::ROTL,       MVT::i64,     1 },
3086     { ISD::ROTR,       MVT::i64,     1 },
3087     { ISD::FSHL,       MVT::i64,     4 }
3088   };
3089   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3090     { ISD::ROTL,       MVT::i32,     1 },
3091     { ISD::ROTL,       MVT::i16,     1 },
3092     { ISD::ROTL,       MVT::i8,      1 },
3093     { ISD::ROTR,       MVT::i32,     1 },
3094     { ISD::ROTR,       MVT::i16,     1 },
3095     { ISD::ROTR,       MVT::i8,      1 },
3096     { ISD::FSHL,       MVT::i32,     4 },
3097     { ISD::FSHL,       MVT::i16,     4 },
3098     { ISD::FSHL,       MVT::i8,      4 }
3099   };
3100 
3101   Intrinsic::ID IID = ICA.getID();
3102   Type *RetTy = ICA.getReturnType();
3103   const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3104   unsigned ISD = ISD::DELETED_NODE;
3105   switch (IID) {
3106   default:
3107     break;
3108   case Intrinsic::fshl:
3109     ISD = ISD::FSHL;
3110     if (Args[0] == Args[1])
3111       ISD = ISD::ROTL;
3112     break;
3113   case Intrinsic::fshr:
3114     // FSHR has same costs so don't duplicate.
3115     ISD = ISD::FSHL;
3116     if (Args[0] == Args[1])
3117       ISD = ISD::ROTR;
3118     break;
3119   }
3120 
3121   if (ISD != ISD::DELETED_NODE) {
3122     // Legalize the type.
3123     std::pair<InstructionCost, MVT> LT =
3124         TLI->getTypeLegalizationCost(DL, RetTy);
3125     MVT MTy = LT.second;
3126 
3127     // Attempt to lookup cost.
3128     if (ST->hasAVX512())
3129       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3130         return LT.first * Entry->Cost;
3131 
3132     if (ST->hasXOP())
3133       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3134         return LT.first * Entry->Cost;
3135 
3136     if (ST->is64Bit())
3137       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3138         return LT.first * Entry->Cost;
3139 
3140     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3141       return LT.first * Entry->Cost;
3142   }
3143 
3144   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3145 }
3146 
3147 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3148                                                unsigned Index) {
3149   static const CostTblEntry SLMCostTbl[] = {
3150      { ISD::EXTRACT_VECTOR_ELT,       MVT::i8,      4 },
3151      { ISD::EXTRACT_VECTOR_ELT,       MVT::i16,     4 },
3152      { ISD::EXTRACT_VECTOR_ELT,       MVT::i32,     4 },
3153      { ISD::EXTRACT_VECTOR_ELT,       MVT::i64,     7 }
3154    };
3155 
3156   assert(Val->isVectorTy() && "This must be a vector type");
3157   Type *ScalarType = Val->getScalarType();
3158   int RegisterFileMoveCost = 0;
3159 
3160   if (Index != -1U && (Opcode == Instruction::ExtractElement ||
3161                        Opcode == Instruction::InsertElement)) {
3162     // Legalize the type.
3163     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3164 
3165     // This type is legalized to a scalar type.
3166     if (!LT.second.isVector())
3167       return 0;
3168 
3169     // The type may be split. Normalize the index to the new type.
3170     unsigned NumElts = LT.second.getVectorNumElements();
3171     unsigned SubNumElts = NumElts;
3172     Index = Index % NumElts;
3173 
3174     // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3175     // For inserts, we also need to insert the subvector back.
3176     if (LT.second.getSizeInBits() > 128) {
3177       assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector");
3178       unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3179       SubNumElts = NumElts / NumSubVecs;
3180       if (SubNumElts <= Index) {
3181         RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3182         Index %= SubNumElts;
3183       }
3184     }
3185 
3186     if (Index == 0) {
3187       // Floating point scalars are already located in index #0.
3188       // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3189       // true for all.
3190       if (ScalarType->isFloatingPointTy())
3191         return RegisterFileMoveCost;
3192 
3193       // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3194       if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3195         return 1 + RegisterFileMoveCost;
3196     }
3197 
3198     int ISD = TLI->InstructionOpcodeToISD(Opcode);
3199     assert(ISD && "Unexpected vector opcode");
3200     MVT MScalarTy = LT.second.getScalarType();
3201     if (ST->isSLM())
3202       if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3203         return Entry->Cost + RegisterFileMoveCost;
3204 
3205     // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3206     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3207         (MScalarTy.isInteger() && ST->hasSSE41()))
3208       return 1 + RegisterFileMoveCost;
3209 
3210     // Assume insertps is relatively cheap on all targets.
3211     if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3212         Opcode == Instruction::InsertElement)
3213       return 1 + RegisterFileMoveCost;
3214 
3215     // For extractions we just need to shuffle the element to index 0, which
3216     // should be very cheap (assume cost = 1). For insertions we need to shuffle
3217     // the elements to its destination. In both cases we must handle the
3218     // subvector move(s).
3219     // If the vector type is already less than 128-bits then don't reduce it.
3220     // TODO: Under what circumstances should we shuffle using the full width?
3221     InstructionCost ShuffleCost = 1;
3222     if (Opcode == Instruction::InsertElement) {
3223       auto *SubTy = cast<VectorType>(Val);
3224       EVT VT = TLI->getValueType(DL, Val);
3225       if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3226         SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3227       ShuffleCost =
3228           getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3229     }
3230     int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3231     return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3232   }
3233 
3234   // Add to the base cost if we know that the extracted element of a vector is
3235   // destined to be moved to and used in the integer register file.
3236   if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3237     RegisterFileMoveCost += 1;
3238 
3239   return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3240 }
3241 
3242 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3243                                                      const APInt &DemandedElts,
3244                                                      bool Insert,
3245                                                      bool Extract) {
3246   InstructionCost Cost = 0;
3247 
3248   // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3249   // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3250   if (Insert) {
3251     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3252     MVT MScalarTy = LT.second.getScalarType();
3253 
3254     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3255         (MScalarTy.isInteger() && ST->hasSSE41()) ||
3256         (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3257       // For types we can insert directly, insertion into 128-bit sub vectors is
3258       // cheap, followed by a cheap chain of concatenations.
3259       if (LT.second.getSizeInBits() <= 128) {
3260         Cost +=
3261             BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3262       } else {
3263         // In each 128-lane, if at least one index is demanded but not all
3264         // indices are demanded and this 128-lane is not the first 128-lane of
3265         // the legalized-vector, then this 128-lane needs a extracti128; If in
3266         // each 128-lane, there is at least one demanded index, this 128-lane
3267         // needs a inserti128.
3268 
3269         // The following cases will help you build a better understanding:
3270         // Assume we insert several elements into a v8i32 vector in avx2,
3271         // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3272         // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3273         // inserti128.
3274         // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3275         const int CostValue = *LT.first.getValue();
3276         assert(CostValue >= 0 && "Negative cost!");
3277         unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue;
3278         unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3279         APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3280         unsigned Scale = NumElts / Num128Lanes;
3281         // We iterate each 128-lane, and check if we need a
3282         // extracti128/inserti128 for this 128-lane.
3283         for (unsigned I = 0; I < NumElts; I += Scale) {
3284           APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3285           APInt MaskedDE = Mask & WidenedDemandedElts;
3286           unsigned Population = MaskedDE.countPopulation();
3287           Cost += (Population > 0 && Population != Scale &&
3288                    I % LT.second.getVectorNumElements() != 0);
3289           Cost += Population > 0;
3290         }
3291         Cost += DemandedElts.countPopulation();
3292 
3293         // For vXf32 cases, insertion into the 0'th index in each v4f32
3294         // 128-bit vector is free.
3295         // NOTE: This assumes legalization widens vXf32 vectors.
3296         if (MScalarTy == MVT::f32)
3297           for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3298                i < e; i += 4)
3299             if (DemandedElts[i])
3300               Cost--;
3301       }
3302     } else if (LT.second.isVector()) {
3303       // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3304       // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3305       // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3306       // considered cheap.
3307       if (Ty->isIntOrIntVectorTy())
3308         Cost += DemandedElts.countPopulation();
3309 
3310       // Get the smaller of the legalized or original pow2-extended number of
3311       // vector elements, which represents the number of unpacks we'll end up
3312       // performing.
3313       unsigned NumElts = LT.second.getVectorNumElements();
3314       unsigned Pow2Elts =
3315           PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3316       Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3317     }
3318   }
3319 
3320   // TODO: Use default extraction for now, but we should investigate extending this
3321   // to handle repeated subvector extraction.
3322   if (Extract)
3323     Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3324 
3325   return Cost;
3326 }
3327 
3328 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3329                                             MaybeAlign Alignment,
3330                                             unsigned AddressSpace,
3331                                             TTI::TargetCostKind CostKind,
3332                                             const Instruction *I) {
3333   // TODO: Handle other cost kinds.
3334   if (CostKind != TTI::TCK_RecipThroughput) {
3335     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3336       // Store instruction with index and scale costs 2 Uops.
3337       // Check the preceding GEP to identify non-const indices.
3338       if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3339         if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3340           return TTI::TCC_Basic * 2;
3341       }
3342     }
3343     return TTI::TCC_Basic;
3344   }
3345 
3346   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3347          "Invalid Opcode");
3348   // Type legalization can't handle structs
3349   if (TLI->getValueType(DL, Src, true) == MVT::Other)
3350     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3351                                   CostKind);
3352 
3353   // Legalize the type.
3354   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3355 
3356   auto *VTy = dyn_cast<FixedVectorType>(Src);
3357 
3358   // Handle the simple case of non-vectors.
3359   // NOTE: this assumes that legalization never creates vector from scalars!
3360   if (!VTy || !LT.second.isVector())
3361     // Each load/store unit costs 1.
3362     return LT.first * 1;
3363 
3364   bool IsLoad = Opcode == Instruction::Load;
3365 
3366   Type *EltTy = VTy->getElementType();
3367 
3368   const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3369 
3370   InstructionCost Cost = 0;
3371 
3372   // Source of truth: how many elements were there in the original IR vector?
3373   const unsigned SrcNumElt = VTy->getNumElements();
3374 
3375   // How far have we gotten?
3376   int NumEltRemaining = SrcNumElt;
3377   // Note that we intentionally capture by-reference, NumEltRemaining changes.
3378   auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3379 
3380   const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3381 
3382   // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3383   const unsigned XMMBits = 128;
3384   if (XMMBits % EltTyBits != 0)
3385     // Vector size must be a multiple of the element size. I.e. no padding.
3386     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3387                                   CostKind);
3388   const int NumEltPerXMM = XMMBits / EltTyBits;
3389 
3390   auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3391 
3392   for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3393        NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3394     // How many elements would a single op deal with at once?
3395     if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3396       // Vector size must be a multiple of the element size. I.e. no padding.
3397       return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3398                                     CostKind);
3399     int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3400 
3401     assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
3402     assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
3403             (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
3404            "Unless we haven't halved the op size yet, "
3405            "we have less than two op's sized units of work left.");
3406 
3407     auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3408                           ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3409                           : XMMVecTy;
3410 
3411     assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
3412            "After halving sizes, the vector elt count is no longer a multiple "
3413            "of number of elements per operation?");
3414     auto *CoalescedVecTy =
3415         CurrNumEltPerOp == 1
3416             ? CurrVecTy
3417             : FixedVectorType::get(
3418                   IntegerType::get(Src->getContext(),
3419                                    EltTyBits * CurrNumEltPerOp),
3420                   CurrVecTy->getNumElements() / CurrNumEltPerOp);
3421     assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
3422                DL.getTypeSizeInBits(CurrVecTy) &&
3423            "coalesciing elements doesn't change vector width.");
3424 
3425     while (NumEltRemaining > 0) {
3426       assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
3427 
3428       // Can we use this vector size, as per the remaining element count?
3429       // Iff the vector is naturally aligned, we can do a wide load regardless.
3430       if (NumEltRemaining < CurrNumEltPerOp &&
3431           (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3432           CurrOpSizeBytes != 1)
3433         break; // Try smalled vector size.
3434 
3435       bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3436 
3437       // If we have fully processed the previous reg, we need to replenish it.
3438       if (SubVecEltsLeft == 0) {
3439         SubVecEltsLeft += CurrVecTy->getNumElements();
3440         // And that's free only for the 0'th subvector of a legalized vector.
3441         if (!Is0thSubVec)
3442           Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3443                                         : TTI::ShuffleKind::SK_ExtractSubvector,
3444                                  VTy, None, NumEltDone(), CurrVecTy);
3445       }
3446 
3447       // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3448       // for smaller widths (32/16/8) we have to insert/extract them separately.
3449       // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3450       // but let's pretend that it is also true for 16/8 bit wide ops...)
3451       if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3452         int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3453         assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
3454         int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3455         APInt DemandedElts =
3456             APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3457                               CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
3458         assert(DemandedElts.countPopulation() == 1 && "Inserting single value");
3459         Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
3460                                          !IsLoad);
3461       }
3462 
3463       // This isn't exactly right. We're using slow unaligned 32-byte accesses
3464       // as a proxy for a double-pumped AVX memory interface such as on
3465       // Sandybridge.
3466       if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
3467         Cost += 2;
3468       else
3469         Cost += 1;
3470 
3471       SubVecEltsLeft -= CurrNumEltPerOp;
3472       NumEltRemaining -= CurrNumEltPerOp;
3473       Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
3474     }
3475   }
3476 
3477   assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
3478 
3479   return Cost;
3480 }
3481 
3482 InstructionCost
3483 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
3484                                   unsigned AddressSpace,
3485                                   TTI::TargetCostKind CostKind) {
3486   bool IsLoad = (Instruction::Load == Opcode);
3487   bool IsStore = (Instruction::Store == Opcode);
3488 
3489   auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3490   if (!SrcVTy)
3491     // To calculate scalar take the regular cost, without mask
3492     return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3493 
3494   unsigned NumElem = SrcVTy->getNumElements();
3495   auto *MaskTy =
3496       FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3497   if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3498       (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
3499     // Scalarization
3500     APInt DemandedElts = APInt::getAllOnesValue(NumElem);
3501     InstructionCost MaskSplitCost =
3502         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3503     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
3504         Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3505         CmpInst::BAD_ICMP_PREDICATE, CostKind);
3506     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3507     InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3508     InstructionCost ValueSplitCost =
3509         getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3510     InstructionCost MemopCost =
3511         NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3512                                          Alignment, AddressSpace, CostKind);
3513     return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3514   }
3515 
3516   // Legalize the type.
3517   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3518   auto VT = TLI->getValueType(DL, SrcVTy);
3519   InstructionCost Cost = 0;
3520   if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3521       LT.second.getVectorNumElements() == NumElem)
3522     // Promotion requires extend/truncate for data and a shuffle for mask.
3523     Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
3524             getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
3525 
3526   else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
3527     auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3528                                            LT.second.getVectorNumElements());
3529     // Expanding requires fill mask with zeroes
3530     Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
3531   }
3532 
3533   // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3534   if (!ST->hasAVX512())
3535     return Cost + LT.first * (IsLoad ? 2 : 8);
3536 
3537   // AVX-512 masked load/store is cheapper
3538   return Cost + LT.first;
3539 }
3540 
3541 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
3542                                                       ScalarEvolution *SE,
3543                                                       const SCEV *Ptr) {
3544   // Address computations in vectorized code with non-consecutive addresses will
3545   // likely result in more instructions compared to scalar code where the
3546   // computation can more often be merged into the index mode. The resulting
3547   // extra micro-ops can significantly decrease throughput.
3548   const unsigned NumVectorInstToHideOverhead = 10;
3549 
3550   // Cost modeling of Strided Access Computation is hidden by the indexing
3551   // modes of X86 regardless of the stride value. We dont believe that there
3552   // is a difference between constant strided access in gerenal and constant
3553   // strided value which is less than or equal to 64.
3554   // Even in the case of (loop invariant) stride whose value is not known at
3555   // compile time, the address computation will not incur more than one extra
3556   // ADD instruction.
3557   if (Ty->isVectorTy() && SE) {
3558     if (!BaseT::isStridedAccess(Ptr))
3559       return NumVectorInstToHideOverhead;
3560     if (!BaseT::getConstantStrideStep(SE, Ptr))
3561       return 1;
3562   }
3563 
3564   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3565 }
3566 
3567 InstructionCost
3568 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3569                                        bool IsPairwise,
3570                                        TTI::TargetCostKind CostKind) {
3571   // Just use the default implementation for pair reductions.
3572   if (IsPairwise)
3573     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind);
3574 
3575   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3576   // and make it as the cost.
3577 
3578   static const CostTblEntry SLMCostTblNoPairWise[] = {
3579     { ISD::FADD,  MVT::v2f64,   3 },
3580     { ISD::ADD,   MVT::v2i64,   5 },
3581   };
3582 
3583   static const CostTblEntry SSE2CostTblNoPairWise[] = {
3584     { ISD::FADD,  MVT::v2f64,   2 },
3585     { ISD::FADD,  MVT::v2f32,   2 },
3586     { ISD::FADD,  MVT::v4f32,   4 },
3587     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
3588     { ISD::ADD,   MVT::v2i32,   2 }, // FIXME: chosen to be less than v4i32
3589     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
3590     { ISD::ADD,   MVT::v2i16,   2 },      // The data reported by the IACA tool is "4.3".
3591     { ISD::ADD,   MVT::v4i16,   3 },      // The data reported by the IACA tool is "4.3".
3592     { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
3593     { ISD::ADD,   MVT::v2i8,    2 },
3594     { ISD::ADD,   MVT::v4i8,    2 },
3595     { ISD::ADD,   MVT::v8i8,    2 },
3596     { ISD::ADD,   MVT::v16i8,   3 },
3597   };
3598 
3599   static const CostTblEntry AVX1CostTblNoPairWise[] = {
3600     { ISD::FADD,  MVT::v4f64,   3 },
3601     { ISD::FADD,  MVT::v4f32,   3 },
3602     { ISD::FADD,  MVT::v8f32,   4 },
3603     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
3604     { ISD::ADD,   MVT::v4i64,   3 },
3605     { ISD::ADD,   MVT::v8i32,   5 },
3606     { ISD::ADD,   MVT::v16i16,  5 },
3607     { ISD::ADD,   MVT::v32i8,   4 },
3608   };
3609 
3610   int ISD = TLI->InstructionOpcodeToISD(Opcode);
3611   assert(ISD && "Invalid opcode");
3612 
3613   // Before legalizing the type, give a chance to look up illegal narrow types
3614   // in the table.
3615   // FIXME: Is there a better way to do this?
3616   EVT VT = TLI->getValueType(DL, ValTy);
3617   if (VT.isSimple()) {
3618     MVT MTy = VT.getSimpleVT();
3619     if (ST->isSLM())
3620       if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3621         return Entry->Cost;
3622 
3623     if (ST->hasAVX())
3624       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3625         return Entry->Cost;
3626 
3627     if (ST->hasSSE2())
3628       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3629         return Entry->Cost;
3630   }
3631 
3632   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3633 
3634   MVT MTy = LT.second;
3635 
3636   auto *ValVTy = cast<FixedVectorType>(ValTy);
3637 
3638   // Special case: vXi8 mul reductions are performed as vXi16.
3639   if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
3640     auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
3641     auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
3642     return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
3643                             TargetTransformInfo::CastContextHint::None,
3644                             CostKind) +
3645            getArithmeticReductionCost(Opcode, WideVecTy, IsPairwise, CostKind);
3646   }
3647 
3648   InstructionCost ArithmeticCost = 0;
3649   if (LT.first != 1 && MTy.isVector() &&
3650       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3651     // Type needs to be split. We need LT.first - 1 arithmetic ops.
3652     auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3653                                             MTy.getVectorNumElements());
3654     ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3655     ArithmeticCost *= LT.first - 1;
3656   }
3657 
3658   if (ST->isSLM())
3659     if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3660       return ArithmeticCost + Entry->Cost;
3661 
3662   if (ST->hasAVX())
3663     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3664       return ArithmeticCost + Entry->Cost;
3665 
3666   if (ST->hasSSE2())
3667     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3668       return ArithmeticCost + Entry->Cost;
3669 
3670   // FIXME: These assume a naive kshift+binop lowering, which is probably
3671   // conservative in most cases.
3672   static const CostTblEntry AVX512BoolReduction[] = {
3673     { ISD::AND,  MVT::v2i1,   3 },
3674     { ISD::AND,  MVT::v4i1,   5 },
3675     { ISD::AND,  MVT::v8i1,   7 },
3676     { ISD::AND,  MVT::v16i1,  9 },
3677     { ISD::AND,  MVT::v32i1, 11 },
3678     { ISD::AND,  MVT::v64i1, 13 },
3679     { ISD::OR,   MVT::v2i1,   3 },
3680     { ISD::OR,   MVT::v4i1,   5 },
3681     { ISD::OR,   MVT::v8i1,   7 },
3682     { ISD::OR,   MVT::v16i1,  9 },
3683     { ISD::OR,   MVT::v32i1, 11 },
3684     { ISD::OR,   MVT::v64i1, 13 },
3685   };
3686 
3687   static const CostTblEntry AVX2BoolReduction[] = {
3688     { ISD::AND,  MVT::v16i16,  2 }, // vpmovmskb + cmp
3689     { ISD::AND,  MVT::v32i8,   2 }, // vpmovmskb + cmp
3690     { ISD::OR,   MVT::v16i16,  2 }, // vpmovmskb + cmp
3691     { ISD::OR,   MVT::v32i8,   2 }, // vpmovmskb + cmp
3692   };
3693 
3694   static const CostTblEntry AVX1BoolReduction[] = {
3695     { ISD::AND,  MVT::v4i64,   2 }, // vmovmskpd + cmp
3696     { ISD::AND,  MVT::v8i32,   2 }, // vmovmskps + cmp
3697     { ISD::AND,  MVT::v16i16,  4 }, // vextractf128 + vpand + vpmovmskb + cmp
3698     { ISD::AND,  MVT::v32i8,   4 }, // vextractf128 + vpand + vpmovmskb + cmp
3699     { ISD::OR,   MVT::v4i64,   2 }, // vmovmskpd + cmp
3700     { ISD::OR,   MVT::v8i32,   2 }, // vmovmskps + cmp
3701     { ISD::OR,   MVT::v16i16,  4 }, // vextractf128 + vpor + vpmovmskb + cmp
3702     { ISD::OR,   MVT::v32i8,   4 }, // vextractf128 + vpor + vpmovmskb + cmp
3703   };
3704 
3705   static const CostTblEntry SSE2BoolReduction[] = {
3706     { ISD::AND,  MVT::v2i64,   2 }, // movmskpd + cmp
3707     { ISD::AND,  MVT::v4i32,   2 }, // movmskps + cmp
3708     { ISD::AND,  MVT::v8i16,   2 }, // pmovmskb + cmp
3709     { ISD::AND,  MVT::v16i8,   2 }, // pmovmskb + cmp
3710     { ISD::OR,   MVT::v2i64,   2 }, // movmskpd + cmp
3711     { ISD::OR,   MVT::v4i32,   2 }, // movmskps + cmp
3712     { ISD::OR,   MVT::v8i16,   2 }, // pmovmskb + cmp
3713     { ISD::OR,   MVT::v16i8,   2 }, // pmovmskb + cmp
3714   };
3715 
3716   // Handle bool allof/anyof patterns.
3717   if (ValVTy->getElementType()->isIntegerTy(1)) {
3718     InstructionCost ArithmeticCost = 0;
3719     if (LT.first != 1 && MTy.isVector() &&
3720         MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3721       // Type needs to be split. We need LT.first - 1 arithmetic ops.
3722       auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3723                                               MTy.getVectorNumElements());
3724       ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3725       ArithmeticCost *= LT.first - 1;
3726     }
3727 
3728     if (ST->hasAVX512())
3729       if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
3730         return ArithmeticCost + Entry->Cost;
3731     if (ST->hasAVX2())
3732       if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
3733         return ArithmeticCost + Entry->Cost;
3734     if (ST->hasAVX())
3735       if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
3736         return ArithmeticCost + Entry->Cost;
3737     if (ST->hasSSE2())
3738       if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
3739         return ArithmeticCost + Entry->Cost;
3740 
3741     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3742                                              CostKind);
3743   }
3744 
3745   unsigned NumVecElts = ValVTy->getNumElements();
3746   unsigned ScalarSize = ValVTy->getScalarSizeInBits();
3747 
3748   // Special case power of 2 reductions where the scalar type isn't changed
3749   // by type legalization.
3750   if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
3751     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise,
3752                                              CostKind);
3753 
3754   InstructionCost ReductionCost = 0;
3755 
3756   auto *Ty = ValVTy;
3757   if (LT.first != 1 && MTy.isVector() &&
3758       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3759     // Type needs to be split. We need LT.first - 1 arithmetic ops.
3760     Ty = FixedVectorType::get(ValVTy->getElementType(),
3761                               MTy.getVectorNumElements());
3762     ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
3763     ReductionCost *= LT.first - 1;
3764     NumVecElts = MTy.getVectorNumElements();
3765   }
3766 
3767   // Now handle reduction with the legal type, taking into account size changes
3768   // at each level.
3769   while (NumVecElts > 1) {
3770     // Determine the size of the remaining vector we need to reduce.
3771     unsigned Size = NumVecElts * ScalarSize;
3772     NumVecElts /= 2;
3773     // If we're reducing from 256/512 bits, use an extract_subvector.
3774     if (Size > 128) {
3775       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
3776       ReductionCost +=
3777           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
3778       Ty = SubTy;
3779     } else if (Size == 128) {
3780       // Reducing from 128 bits is a permute of v2f64/v2i64.
3781       FixedVectorType *ShufTy;
3782       if (ValVTy->isFloatingPointTy())
3783         ShufTy =
3784             FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
3785       else
3786         ShufTy =
3787             FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
3788       ReductionCost +=
3789           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
3790     } else if (Size == 64) {
3791       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
3792       FixedVectorType *ShufTy;
3793       if (ValVTy->isFloatingPointTy())
3794         ShufTy =
3795             FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
3796       else
3797         ShufTy =
3798             FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
3799       ReductionCost +=
3800           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
3801     } else {
3802       // Reducing from smaller size is a shift by immediate.
3803       auto *ShiftTy = FixedVectorType::get(
3804           Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
3805       ReductionCost += getArithmeticInstrCost(
3806           Instruction::LShr, ShiftTy, CostKind,
3807           TargetTransformInfo::OK_AnyValue,
3808           TargetTransformInfo::OK_UniformConstantValue,
3809           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
3810     }
3811 
3812     // Add the arithmetic op for this level.
3813     ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
3814   }
3815 
3816   // Add the final extract element to the cost.
3817   return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
3818 }
3819 
3820 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
3821                                           bool IsUnsigned) {
3822   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3823 
3824   MVT MTy = LT.second;
3825 
3826   int ISD;
3827   if (Ty->isIntOrIntVectorTy()) {
3828     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3829   } else {
3830     assert(Ty->isFPOrFPVectorTy() &&
3831            "Expected float point or integer vector type.");
3832     ISD = ISD::FMINNUM;
3833   }
3834 
3835   static const CostTblEntry SSE1CostTbl[] = {
3836     {ISD::FMINNUM, MVT::v4f32, 1},
3837   };
3838 
3839   static const CostTblEntry SSE2CostTbl[] = {
3840     {ISD::FMINNUM, MVT::v2f64, 1},
3841     {ISD::SMIN,    MVT::v8i16, 1},
3842     {ISD::UMIN,    MVT::v16i8, 1},
3843   };
3844 
3845   static const CostTblEntry SSE41CostTbl[] = {
3846     {ISD::SMIN,    MVT::v4i32, 1},
3847     {ISD::UMIN,    MVT::v4i32, 1},
3848     {ISD::UMIN,    MVT::v8i16, 1},
3849     {ISD::SMIN,    MVT::v16i8, 1},
3850   };
3851 
3852   static const CostTblEntry SSE42CostTbl[] = {
3853     {ISD::UMIN,    MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
3854   };
3855 
3856   static const CostTblEntry AVX1CostTbl[] = {
3857     {ISD::FMINNUM, MVT::v8f32,  1},
3858     {ISD::FMINNUM, MVT::v4f64,  1},
3859     {ISD::SMIN,    MVT::v8i32,  3},
3860     {ISD::UMIN,    MVT::v8i32,  3},
3861     {ISD::SMIN,    MVT::v16i16, 3},
3862     {ISD::UMIN,    MVT::v16i16, 3},
3863     {ISD::SMIN,    MVT::v32i8,  3},
3864     {ISD::UMIN,    MVT::v32i8,  3},
3865   };
3866 
3867   static const CostTblEntry AVX2CostTbl[] = {
3868     {ISD::SMIN,    MVT::v8i32,  1},
3869     {ISD::UMIN,    MVT::v8i32,  1},
3870     {ISD::SMIN,    MVT::v16i16, 1},
3871     {ISD::UMIN,    MVT::v16i16, 1},
3872     {ISD::SMIN,    MVT::v32i8,  1},
3873     {ISD::UMIN,    MVT::v32i8,  1},
3874   };
3875 
3876   static const CostTblEntry AVX512CostTbl[] = {
3877     {ISD::FMINNUM, MVT::v16f32, 1},
3878     {ISD::FMINNUM, MVT::v8f64,  1},
3879     {ISD::SMIN,    MVT::v2i64,  1},
3880     {ISD::UMIN,    MVT::v2i64,  1},
3881     {ISD::SMIN,    MVT::v4i64,  1},
3882     {ISD::UMIN,    MVT::v4i64,  1},
3883     {ISD::SMIN,    MVT::v8i64,  1},
3884     {ISD::UMIN,    MVT::v8i64,  1},
3885     {ISD::SMIN,    MVT::v16i32, 1},
3886     {ISD::UMIN,    MVT::v16i32, 1},
3887   };
3888 
3889   static const CostTblEntry AVX512BWCostTbl[] = {
3890     {ISD::SMIN,    MVT::v32i16, 1},
3891     {ISD::UMIN,    MVT::v32i16, 1},
3892     {ISD::SMIN,    MVT::v64i8,  1},
3893     {ISD::UMIN,    MVT::v64i8,  1},
3894   };
3895 
3896   // If we have a native MIN/MAX instruction for this type, use it.
3897   if (ST->hasBWI())
3898     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3899       return LT.first * Entry->Cost;
3900 
3901   if (ST->hasAVX512())
3902     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3903       return LT.first * Entry->Cost;
3904 
3905   if (ST->hasAVX2())
3906     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3907       return LT.first * Entry->Cost;
3908 
3909   if (ST->hasAVX())
3910     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3911       return LT.first * Entry->Cost;
3912 
3913   if (ST->hasSSE42())
3914     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3915       return LT.first * Entry->Cost;
3916 
3917   if (ST->hasSSE41())
3918     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3919       return LT.first * Entry->Cost;
3920 
3921   if (ST->hasSSE2())
3922     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3923       return LT.first * Entry->Cost;
3924 
3925   if (ST->hasSSE1())
3926     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3927       return LT.first * Entry->Cost;
3928 
3929   unsigned CmpOpcode;
3930   if (Ty->isFPOrFPVectorTy()) {
3931     CmpOpcode = Instruction::FCmp;
3932   } else {
3933     assert(Ty->isIntOrIntVectorTy() &&
3934            "expecting floating point or integer type for min/max reduction");
3935     CmpOpcode = Instruction::ICmp;
3936   }
3937 
3938   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
3939   // Otherwise fall back to cmp+select.
3940   InstructionCost Result =
3941       getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
3942                          CostKind) +
3943       getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
3944                          CmpInst::BAD_ICMP_PREDICATE, CostKind);
3945   return Result;
3946 }
3947 
3948 InstructionCost
3949 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
3950                                    bool IsPairwise, bool IsUnsigned,
3951                                    TTI::TargetCostKind CostKind) {
3952   // Just use the default implementation for pair reductions.
3953   if (IsPairwise)
3954     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
3955                                          CostKind);
3956 
3957   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3958 
3959   MVT MTy = LT.second;
3960 
3961   int ISD;
3962   if (ValTy->isIntOrIntVectorTy()) {
3963     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
3964   } else {
3965     assert(ValTy->isFPOrFPVectorTy() &&
3966            "Expected float point or integer vector type.");
3967     ISD = ISD::FMINNUM;
3968   }
3969 
3970   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3971   // and make it as the cost.
3972 
3973   static const CostTblEntry SSE2CostTblNoPairWise[] = {
3974       {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
3975       {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
3976       {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
3977   };
3978 
3979   static const CostTblEntry SSE41CostTblNoPairWise[] = {
3980       {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
3981       {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
3982       {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
3983       {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
3984       {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
3985       {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
3986       {ISD::SMIN, MVT::v2i8,  3}, // pminsb
3987       {ISD::SMIN, MVT::v4i8,  5}, // pminsb
3988       {ISD::SMIN, MVT::v8i8,  7}, // pminsb
3989       {ISD::SMIN, MVT::v16i8, 6},
3990       {ISD::UMIN, MVT::v2i8,  3}, // same as sse2
3991       {ISD::UMIN, MVT::v4i8,  5}, // same as sse2
3992       {ISD::UMIN, MVT::v8i8,  7}, // same as sse2
3993       {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
3994   };
3995 
3996   static const CostTblEntry AVX1CostTblNoPairWise[] = {
3997       {ISD::SMIN, MVT::v16i16, 6},
3998       {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
3999       {ISD::SMIN, MVT::v32i8, 8},
4000       {ISD::UMIN, MVT::v32i8, 8},
4001   };
4002 
4003   static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
4004       {ISD::SMIN, MVT::v32i16, 8},
4005       {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
4006       {ISD::SMIN, MVT::v64i8, 10},
4007       {ISD::UMIN, MVT::v64i8, 10},
4008   };
4009 
4010   // Before legalizing the type, give a chance to look up illegal narrow types
4011   // in the table.
4012   // FIXME: Is there a better way to do this?
4013   EVT VT = TLI->getValueType(DL, ValTy);
4014   if (VT.isSimple()) {
4015     MVT MTy = VT.getSimpleVT();
4016     if (ST->hasBWI())
4017       if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4018         return Entry->Cost;
4019 
4020     if (ST->hasAVX())
4021       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4022         return Entry->Cost;
4023 
4024     if (ST->hasSSE41())
4025       if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4026         return Entry->Cost;
4027 
4028     if (ST->hasSSE2())
4029       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4030         return Entry->Cost;
4031   }
4032 
4033   auto *ValVTy = cast<FixedVectorType>(ValTy);
4034   unsigned NumVecElts = ValVTy->getNumElements();
4035 
4036   auto *Ty = ValVTy;
4037   InstructionCost MinMaxCost = 0;
4038   if (LT.first != 1 && MTy.isVector() &&
4039       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4040     // Type needs to be split. We need LT.first - 1 operations ops.
4041     Ty = FixedVectorType::get(ValVTy->getElementType(),
4042                               MTy.getVectorNumElements());
4043     auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
4044                                            MTy.getVectorNumElements());
4045     MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4046     MinMaxCost *= LT.first - 1;
4047     NumVecElts = MTy.getVectorNumElements();
4048   }
4049 
4050   if (ST->hasBWI())
4051     if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4052       return MinMaxCost + Entry->Cost;
4053 
4054   if (ST->hasAVX())
4055     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4056       return MinMaxCost + Entry->Cost;
4057 
4058   if (ST->hasSSE41())
4059     if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4060       return MinMaxCost + Entry->Cost;
4061 
4062   if (ST->hasSSE2())
4063     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4064       return MinMaxCost + Entry->Cost;
4065 
4066   unsigned ScalarSize = ValTy->getScalarSizeInBits();
4067 
4068   // Special case power of 2 reductions where the scalar type isn't changed
4069   // by type legalization.
4070   if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4071       ScalarSize != MTy.getScalarSizeInBits())
4072     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned,
4073                                          CostKind);
4074 
4075   // Now handle reduction with the legal type, taking into account size changes
4076   // at each level.
4077   while (NumVecElts > 1) {
4078     // Determine the size of the remaining vector we need to reduce.
4079     unsigned Size = NumVecElts * ScalarSize;
4080     NumVecElts /= 2;
4081     // If we're reducing from 256/512 bits, use an extract_subvector.
4082     if (Size > 128) {
4083       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4084       MinMaxCost +=
4085           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4086       Ty = SubTy;
4087     } else if (Size == 128) {
4088       // Reducing from 128 bits is a permute of v2f64/v2i64.
4089       VectorType *ShufTy;
4090       if (ValTy->isFloatingPointTy())
4091         ShufTy =
4092             FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4093       else
4094         ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4095       MinMaxCost +=
4096           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4097     } else if (Size == 64) {
4098       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4099       FixedVectorType *ShufTy;
4100       if (ValTy->isFloatingPointTy())
4101         ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4102       else
4103         ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4104       MinMaxCost +=
4105           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4106     } else {
4107       // Reducing from smaller size is a shift by immediate.
4108       auto *ShiftTy = FixedVectorType::get(
4109           Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4110       MinMaxCost += getArithmeticInstrCost(
4111           Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4112           TargetTransformInfo::OK_AnyValue,
4113           TargetTransformInfo::OK_UniformConstantValue,
4114           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4115     }
4116 
4117     // Add the arithmetic op for this level.
4118     auto *SubCondTy =
4119         FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4120     MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4121   }
4122 
4123   // Add the final extract element to the cost.
4124   return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4125 }
4126 
4127 /// Calculate the cost of materializing a 64-bit value. This helper
4128 /// method might only calculate a fraction of a larger immediate. Therefore it
4129 /// is valid to return a cost of ZERO.
4130 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4131   if (Val == 0)
4132     return TTI::TCC_Free;
4133 
4134   if (isInt<32>(Val))
4135     return TTI::TCC_Basic;
4136 
4137   return 2 * TTI::TCC_Basic;
4138 }
4139 
4140 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4141                                           TTI::TargetCostKind CostKind) {
4142   assert(Ty->isIntegerTy());
4143 
4144   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4145   if (BitSize == 0)
4146     return ~0U;
4147 
4148   // Never hoist constants larger than 128bit, because this might lead to
4149   // incorrect code generation or assertions in codegen.
4150   // Fixme: Create a cost model for types larger than i128 once the codegen
4151   // issues have been fixed.
4152   if (BitSize > 128)
4153     return TTI::TCC_Free;
4154 
4155   if (Imm == 0)
4156     return TTI::TCC_Free;
4157 
4158   // Sign-extend all constants to a multiple of 64-bit.
4159   APInt ImmVal = Imm;
4160   if (BitSize % 64 != 0)
4161     ImmVal = Imm.sext(alignTo(BitSize, 64));
4162 
4163   // Split the constant into 64-bit chunks and calculate the cost for each
4164   // chunk.
4165   InstructionCost Cost = 0;
4166   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4167     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4168     int64_t Val = Tmp.getSExtValue();
4169     Cost += getIntImmCost(Val);
4170   }
4171   // We need at least one instruction to materialize the constant.
4172   return std::max<InstructionCost>(1, Cost);
4173 }
4174 
4175 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4176                                               const APInt &Imm, Type *Ty,
4177                                               TTI::TargetCostKind CostKind,
4178                                               Instruction *Inst) {
4179   assert(Ty->isIntegerTy());
4180 
4181   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4182   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4183   // here, so that constant hoisting will ignore this constant.
4184   if (BitSize == 0)
4185     return TTI::TCC_Free;
4186 
4187   unsigned ImmIdx = ~0U;
4188   switch (Opcode) {
4189   default:
4190     return TTI::TCC_Free;
4191   case Instruction::GetElementPtr:
4192     // Always hoist the base address of a GetElementPtr. This prevents the
4193     // creation of new constants for every base constant that gets constant
4194     // folded with the offset.
4195     if (Idx == 0)
4196       return 2 * TTI::TCC_Basic;
4197     return TTI::TCC_Free;
4198   case Instruction::Store:
4199     ImmIdx = 0;
4200     break;
4201   case Instruction::ICmp:
4202     // This is an imperfect hack to prevent constant hoisting of
4203     // compares that might be trying to check if a 64-bit value fits in
4204     // 32-bits. The backend can optimize these cases using a right shift by 32.
4205     // Ideally we would check the compare predicate here. There also other
4206     // similar immediates the backend can use shifts for.
4207     if (Idx == 1 && Imm.getBitWidth() == 64) {
4208       uint64_t ImmVal = Imm.getZExtValue();
4209       if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4210         return TTI::TCC_Free;
4211     }
4212     ImmIdx = 1;
4213     break;
4214   case Instruction::And:
4215     // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4216     // by using a 32-bit operation with implicit zero extension. Detect such
4217     // immediates here as the normal path expects bit 31 to be sign extended.
4218     if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4219       return TTI::TCC_Free;
4220     ImmIdx = 1;
4221     break;
4222   case Instruction::Add:
4223   case Instruction::Sub:
4224     // For add/sub, we can use the opposite instruction for INT32_MIN.
4225     if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4226       return TTI::TCC_Free;
4227     ImmIdx = 1;
4228     break;
4229   case Instruction::UDiv:
4230   case Instruction::SDiv:
4231   case Instruction::URem:
4232   case Instruction::SRem:
4233     // Division by constant is typically expanded later into a different
4234     // instruction sequence. This completely changes the constants.
4235     // Report them as "free" to stop ConstantHoist from marking them as opaque.
4236     return TTI::TCC_Free;
4237   case Instruction::Mul:
4238   case Instruction::Or:
4239   case Instruction::Xor:
4240     ImmIdx = 1;
4241     break;
4242   // Always return TCC_Free for the shift value of a shift instruction.
4243   case Instruction::Shl:
4244   case Instruction::LShr:
4245   case Instruction::AShr:
4246     if (Idx == 1)
4247       return TTI::TCC_Free;
4248     break;
4249   case Instruction::Trunc:
4250   case Instruction::ZExt:
4251   case Instruction::SExt:
4252   case Instruction::IntToPtr:
4253   case Instruction::PtrToInt:
4254   case Instruction::BitCast:
4255   case Instruction::PHI:
4256   case Instruction::Call:
4257   case Instruction::Select:
4258   case Instruction::Ret:
4259   case Instruction::Load:
4260     break;
4261   }
4262 
4263   if (Idx == ImmIdx) {
4264     int NumConstants = divideCeil(BitSize, 64);
4265     InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4266     return (Cost <= NumConstants * TTI::TCC_Basic)
4267                ? static_cast<int>(TTI::TCC_Free)
4268                : Cost;
4269   }
4270 
4271   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4272 }
4273 
4274 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4275                                                 const APInt &Imm, Type *Ty,
4276                                                 TTI::TargetCostKind CostKind) {
4277   assert(Ty->isIntegerTy());
4278 
4279   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4280   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4281   // here, so that constant hoisting will ignore this constant.
4282   if (BitSize == 0)
4283     return TTI::TCC_Free;
4284 
4285   switch (IID) {
4286   default:
4287     return TTI::TCC_Free;
4288   case Intrinsic::sadd_with_overflow:
4289   case Intrinsic::uadd_with_overflow:
4290   case Intrinsic::ssub_with_overflow:
4291   case Intrinsic::usub_with_overflow:
4292   case Intrinsic::smul_with_overflow:
4293   case Intrinsic::umul_with_overflow:
4294     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4295       return TTI::TCC_Free;
4296     break;
4297   case Intrinsic::experimental_stackmap:
4298     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4299       return TTI::TCC_Free;
4300     break;
4301   case Intrinsic::experimental_patchpoint_void:
4302   case Intrinsic::experimental_patchpoint_i64:
4303     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4304       return TTI::TCC_Free;
4305     break;
4306   }
4307   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4308 }
4309 
4310 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4311                                            TTI::TargetCostKind CostKind,
4312                                            const Instruction *I) {
4313   if (CostKind != TTI::TCK_RecipThroughput)
4314     return Opcode == Instruction::PHI ? 0 : 1;
4315   // Branches are assumed to be predicted.
4316   return 0;
4317 }
4318 
4319 int X86TTIImpl::getGatherOverhead() const {
4320   // Some CPUs have more overhead for gather. The specified overhead is relative
4321   // to the Load operation. "2" is the number provided by Intel architects. This
4322   // parameter is used for cost estimation of Gather Op and comparison with
4323   // other alternatives.
4324   // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4325   // enable gather with a -march.
4326   if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4327     return 2;
4328 
4329   return 1024;
4330 }
4331 
4332 int X86TTIImpl::getScatterOverhead() const {
4333   if (ST->hasAVX512())
4334     return 2;
4335 
4336   return 1024;
4337 }
4338 
4339 // Return an average cost of Gather / Scatter instruction, maybe improved later.
4340 // FIXME: Add TargetCostKind support.
4341 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4342                                             const Value *Ptr, Align Alignment,
4343                                             unsigned AddressSpace) {
4344 
4345   assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
4346   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4347 
4348   // Try to reduce index size from 64 bit (default for GEP)
4349   // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4350   // operation will use 16 x 64 indices which do not fit in a zmm and needs
4351   // to split. Also check that the base pointer is the same for all lanes,
4352   // and that there's at most one variable index.
4353   auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4354     unsigned IndexSize = DL.getPointerSizeInBits();
4355     const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4356     if (IndexSize < 64 || !GEP)
4357       return IndexSize;
4358 
4359     unsigned NumOfVarIndices = 0;
4360     const Value *Ptrs = GEP->getPointerOperand();
4361     if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4362       return IndexSize;
4363     for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4364       if (isa<Constant>(GEP->getOperand(i)))
4365         continue;
4366       Type *IndxTy = GEP->getOperand(i)->getType();
4367       if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4368         IndxTy = IndexVTy->getElementType();
4369       if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4370           !isa<SExtInst>(GEP->getOperand(i))) ||
4371          ++NumOfVarIndices > 1)
4372         return IndexSize; // 64
4373     }
4374     return (unsigned)32;
4375   };
4376 
4377   // Trying to reduce IndexSize to 32 bits for vector 16.
4378   // By default the IndexSize is equal to pointer size.
4379   unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4380                            ? getIndexSizeInBits(Ptr, DL)
4381                            : DL.getPointerSizeInBits();
4382 
4383   auto *IndexVTy = FixedVectorType::get(
4384       IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4385   std::pair<InstructionCost, MVT> IdxsLT =
4386       TLI->getTypeLegalizationCost(DL, IndexVTy);
4387   std::pair<InstructionCost, MVT> SrcLT =
4388       TLI->getTypeLegalizationCost(DL, SrcVTy);
4389   InstructionCost::CostType SplitFactor =
4390       *std::max(IdxsLT.first, SrcLT.first).getValue();
4391   if (SplitFactor > 1) {
4392     // Handle splitting of vector of pointers
4393     auto *SplitSrcTy =
4394         FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4395     return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4396                                          AddressSpace);
4397   }
4398 
4399   // The gather / scatter cost is given by Intel architects. It is a rough
4400   // number since we are looking at one instruction in a time.
4401   const int GSOverhead = (Opcode == Instruction::Load)
4402                              ? getGatherOverhead()
4403                              : getScatterOverhead();
4404   return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4405                                            MaybeAlign(Alignment), AddressSpace,
4406                                            TTI::TCK_RecipThroughput);
4407 }
4408 
4409 /// Return the cost of full scalarization of gather / scatter operation.
4410 ///
4411 /// Opcode - Load or Store instruction.
4412 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4413 /// VariableMask - The mask is non-constant at compile time.
4414 /// Alignment - Alignment for one element.
4415 /// AddressSpace - pointer[s] address space.
4416 ///
4417 /// FIXME: Add TargetCostKind support.
4418 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4419                                             bool VariableMask, Align Alignment,
4420                                             unsigned AddressSpace) {
4421   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4422   APInt DemandedElts = APInt::getAllOnesValue(VF);
4423   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4424 
4425   InstructionCost MaskUnpackCost = 0;
4426   if (VariableMask) {
4427     auto *MaskTy =
4428         FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4429     MaskUnpackCost =
4430         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4431     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4432         Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4433         CmpInst::BAD_ICMP_PREDICATE, CostKind);
4434     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4435     MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4436   }
4437 
4438   // The cost of the scalar loads/stores.
4439   InstructionCost MemoryOpCost =
4440       VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4441                            MaybeAlign(Alignment), AddressSpace, CostKind);
4442 
4443   InstructionCost InsertExtractCost = 0;
4444   if (Opcode == Instruction::Load)
4445     for (unsigned i = 0; i < VF; ++i)
4446       // Add the cost of inserting each scalar load into the vector
4447       InsertExtractCost +=
4448         getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4449   else
4450     for (unsigned i = 0; i < VF; ++i)
4451       // Add the cost of extracting each element out of the data vector
4452       InsertExtractCost +=
4453         getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4454 
4455   return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4456 }
4457 
4458 /// Calculate the cost of Gather / Scatter operation
4459 InstructionCost X86TTIImpl::getGatherScatterOpCost(
4460     unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4461     Align Alignment, TTI::TargetCostKind CostKind,
4462     const Instruction *I = nullptr) {
4463   if (CostKind != TTI::TCK_RecipThroughput) {
4464     if ((Opcode == Instruction::Load &&
4465          isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4466         (Opcode == Instruction::Store &&
4467          isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4468       return 1;
4469     return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4470                                          Alignment, CostKind, I);
4471   }
4472 
4473   assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
4474   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4475   PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4476   if (!PtrTy && Ptr->getType()->isVectorTy())
4477     PtrTy = dyn_cast<PointerType>(
4478         cast<VectorType>(Ptr->getType())->getElementType());
4479   assert(PtrTy && "Unexpected type for Ptr argument");
4480   unsigned AddressSpace = PtrTy->getAddressSpace();
4481 
4482   bool Scalarize = false;
4483   if ((Opcode == Instruction::Load &&
4484        !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4485       (Opcode == Instruction::Store &&
4486        !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4487     Scalarize = true;
4488   // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4489   // Vector-4 of gather/scatter instruction does not exist on KNL.
4490   // We can extend it to 8 elements, but zeroing upper bits of
4491   // the mask vector will add more instructions. Right now we give the scalar
4492   // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
4493   // is better in the VariableMask case.
4494   if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
4495     Scalarize = true;
4496 
4497   if (Scalarize)
4498     return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4499                            AddressSpace);
4500 
4501   return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4502 }
4503 
4504 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4505                                TargetTransformInfo::LSRCost &C2) {
4506     // X86 specific here are "instruction number 1st priority".
4507     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4508                     C1.NumIVMuls, C1.NumBaseAdds,
4509                     C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4510            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4511                     C2.NumIVMuls, C2.NumBaseAdds,
4512                     C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4513 }
4514 
4515 bool X86TTIImpl::canMacroFuseCmp() {
4516   return ST->hasMacroFusion() || ST->hasBranchFusion();
4517 }
4518 
4519 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4520   if (!ST->hasAVX())
4521     return false;
4522 
4523   // The backend can't handle a single element vector.
4524   if (isa<VectorType>(DataTy) &&
4525       cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4526     return false;
4527   Type *ScalarTy = DataTy->getScalarType();
4528 
4529   if (ScalarTy->isPointerTy())
4530     return true;
4531 
4532   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4533     return true;
4534 
4535   if (!ScalarTy->isIntegerTy())
4536     return false;
4537 
4538   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4539   return IntWidth == 32 || IntWidth == 64 ||
4540          ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4541 }
4542 
4543 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4544   return isLegalMaskedLoad(DataType, Alignment);
4545 }
4546 
4547 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4548   unsigned DataSize = DL.getTypeStoreSize(DataType);
4549   // The only supported nontemporal loads are for aligned vectors of 16 or 32
4550   // bytes.  Note that 32-byte nontemporal vector loads are supported by AVX2
4551   // (the equivalent stores only require AVX).
4552   if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4553     return DataSize == 16 ?  ST->hasSSE1() : ST->hasAVX2();
4554 
4555   return false;
4556 }
4557 
4558 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4559   unsigned DataSize = DL.getTypeStoreSize(DataType);
4560 
4561   // SSE4A supports nontemporal stores of float and double at arbitrary
4562   // alignment.
4563   if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4564     return true;
4565 
4566   // Besides the SSE4A subtarget exception above, only aligned stores are
4567   // available nontemporaly on any other subtarget.  And only stores with a size
4568   // of 4..32 bytes (powers of 2, only) are permitted.
4569   if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4570       !isPowerOf2_32(DataSize))
4571     return false;
4572 
4573   // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4574   // loads require AVX2).
4575   if (DataSize == 32)
4576     return ST->hasAVX();
4577   else if (DataSize == 16)
4578     return ST->hasSSE1();
4579   return true;
4580 }
4581 
4582 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4583   if (!isa<VectorType>(DataTy))
4584     return false;
4585 
4586   if (!ST->hasAVX512())
4587     return false;
4588 
4589   // The backend can't handle a single element vector.
4590   if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4591     return false;
4592 
4593   Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4594 
4595   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4596     return true;
4597 
4598   if (!ScalarTy->isIntegerTy())
4599     return false;
4600 
4601   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4602   return IntWidth == 32 || IntWidth == 64 ||
4603          ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4604 }
4605 
4606 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4607   return isLegalMaskedExpandLoad(DataTy);
4608 }
4609 
4610 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4611   // Some CPUs have better gather performance than others.
4612   // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4613   // enable gather with a -march.
4614   if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4615     return false;
4616 
4617   // This function is called now in two cases: from the Loop Vectorizer
4618   // and from the Scalarizer.
4619   // When the Loop Vectorizer asks about legality of the feature,
4620   // the vectorization factor is not calculated yet. The Loop Vectorizer
4621   // sends a scalar type and the decision is based on the width of the
4622   // scalar element.
4623   // Later on, the cost model will estimate usage this intrinsic based on
4624   // the vector type.
4625   // The Scalarizer asks again about legality. It sends a vector type.
4626   // In this case we can reject non-power-of-2 vectors.
4627   // We also reject single element vectors as the type legalizer can't
4628   // scalarize it.
4629   if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4630     unsigned NumElts = DataVTy->getNumElements();
4631     if (NumElts == 1)
4632       return false;
4633   }
4634   Type *ScalarTy = DataTy->getScalarType();
4635   if (ScalarTy->isPointerTy())
4636     return true;
4637 
4638   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4639     return true;
4640 
4641   if (!ScalarTy->isIntegerTy())
4642     return false;
4643 
4644   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4645   return IntWidth == 32 || IntWidth == 64;
4646 }
4647 
4648 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4649   // AVX2 doesn't support scatter
4650   if (!ST->hasAVX512())
4651     return false;
4652   return isLegalMaskedGather(DataType, Alignment);
4653 }
4654 
4655 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4656   EVT VT = TLI->getValueType(DL, DataType);
4657   return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4658 }
4659 
4660 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4661   return false;
4662 }
4663 
4664 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4665                                      const Function *Callee) const {
4666   const TargetMachine &TM = getTLI()->getTargetMachine();
4667 
4668   // Work this as a subsetting of subtarget features.
4669   const FeatureBitset &CallerBits =
4670       TM.getSubtargetImpl(*Caller)->getFeatureBits();
4671   const FeatureBitset &CalleeBits =
4672       TM.getSubtargetImpl(*Callee)->getFeatureBits();
4673 
4674   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4675   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4676   return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4677 }
4678 
4679 bool X86TTIImpl::areFunctionArgsABICompatible(
4680     const Function *Caller, const Function *Callee,
4681     SmallPtrSetImpl<Argument *> &Args) const {
4682   if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4683     return false;
4684 
4685   // If we get here, we know the target features match. If one function
4686   // considers 512-bit vectors legal and the other does not, consider them
4687   // incompatible.
4688   const TargetMachine &TM = getTLI()->getTargetMachine();
4689 
4690   if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4691       TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4692     return true;
4693 
4694   // Consider the arguments compatible if they aren't vectors or aggregates.
4695   // FIXME: Look at the size of vectors.
4696   // FIXME: Look at the element types of aggregates to see if there are vectors.
4697   // FIXME: The API of this function seems intended to allow arguments
4698   // to be removed from the set, but the caller doesn't check if the set
4699   // becomes empty so that may not work in practice.
4700   return llvm::none_of(Args, [](Argument *A) {
4701     auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4702     return EltTy->isVectorTy() || EltTy->isAggregateType();
4703   });
4704 }
4705 
4706 X86TTIImpl::TTI::MemCmpExpansionOptions
4707 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4708   TTI::MemCmpExpansionOptions Options;
4709   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4710   Options.NumLoadsPerBlock = 2;
4711   // All GPR and vector loads can be unaligned.
4712   Options.AllowOverlappingLoads = true;
4713   if (IsZeroCmp) {
4714     // Only enable vector loads for equality comparison. Right now the vector
4715     // version is not as fast for three way compare (see #33329).
4716     const unsigned PreferredWidth = ST->getPreferVectorWidth();
4717     if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4718     if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4719     if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4720   }
4721   if (ST->is64Bit()) {
4722     Options.LoadSizes.push_back(8);
4723   }
4724   Options.LoadSizes.push_back(4);
4725   Options.LoadSizes.push_back(2);
4726   Options.LoadSizes.push_back(1);
4727   return Options;
4728 }
4729 
4730 bool X86TTIImpl::enableInterleavedAccessVectorization() {
4731   // TODO: We expect this to be beneficial regardless of arch,
4732   // but there are currently some unexplained performance artifacts on Atom.
4733   // As a temporary solution, disable on Atom.
4734   return !(ST->isAtom());
4735 }
4736 
4737 // Get estimation for interleaved load/store operations for AVX2.
4738 // \p Factor is the interleaved-access factor (stride) - number of
4739 // (interleaved) elements in the group.
4740 // \p Indices contains the indices for a strided load: when the
4741 // interleaved load has gaps they indicate which elements are used.
4742 // If Indices is empty (or if the number of indices is equal to the size
4743 // of the interleaved-access as given in \p Factor) the access has no gaps.
4744 //
4745 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
4746 // computing the cost using a generic formula as a function of generic
4747 // shuffles. We therefore use a lookup table instead, filled according to
4748 // the instruction sequences that codegen currently generates.
4749 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2(
4750     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4751     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4752     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4753 
4754   if (UseMaskForCond || UseMaskForGaps)
4755     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4756                                              Alignment, AddressSpace, CostKind,
4757                                              UseMaskForCond, UseMaskForGaps);
4758 
4759   // We currently Support only fully-interleaved groups, with no gaps.
4760   // TODO: Support also strided loads (interleaved-groups with gaps).
4761   if (Indices.size() && Indices.size() != Factor)
4762     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4763                                              Alignment, AddressSpace,
4764                                              CostKind);
4765 
4766   // VecTy for interleave memop is <VF*Factor x Elt>.
4767   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4768   // VecTy = <12 x i32>.
4769   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4770 
4771   // This function can be called with VecTy=<6xi128>, Factor=3, in which case
4772   // the VF=2, while v2i128 is an unsupported MVT vector type
4773   // (see MachineValueType.h::getVectorVT()).
4774   if (!LegalVT.isVector())
4775     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4776                                              Alignment, AddressSpace,
4777                                              CostKind);
4778 
4779   unsigned VF = VecTy->getNumElements() / Factor;
4780   Type *ScalarTy = VecTy->getElementType();
4781   // Deduplicate entries, model floats/pointers as appropriately-sized integers.
4782   if (!ScalarTy->isIntegerTy())
4783     ScalarTy =
4784         Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
4785 
4786   // Get the cost of all the memory operations.
4787   InstructionCost MemOpCosts = getMemoryOpCost(
4788       Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
4789 
4790   auto *VT = FixedVectorType::get(ScalarTy, VF);
4791   EVT ETy = TLI->getValueType(DL, VT);
4792   if (!ETy.isSimple())
4793     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4794                                              Alignment, AddressSpace,
4795                                              CostKind);
4796 
4797   // TODO: Complete for other data-types and strides.
4798   // Each combination of Stride, element bit width and VF results in a different
4799   // sequence; The cost tables are therefore accessed with:
4800   // Factor (stride) and VectorType=VFxiN.
4801   // The Cost accounts only for the shuffle sequence;
4802   // The cost of the loads/stores is accounted for separately.
4803   //
4804   static const CostTblEntry AVX2InterleavedLoadTbl[] = {
4805     { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
4806 
4807     { 3, MVT::v2i8,  10 }, //(load 6i8 and)  deinterleave into 3 x 2i8
4808     { 3, MVT::v4i8,  4 },  //(load 12i8 and) deinterleave into 3 x 4i8
4809     { 3, MVT::v8i8,  9 },  //(load 24i8 and) deinterleave into 3 x 8i8
4810     { 3, MVT::v16i8, 11},  //(load 48i8 and) deinterleave into 3 x 16i8
4811     { 3, MVT::v32i8, 13},  //(load 96i8 and) deinterleave into 3 x 32i8
4812 
4813     { 3, MVT::v8i32, 17 }, //(load 24i32 and)deinterleave into 3 x 8i32
4814 
4815     { 4, MVT::v2i8,  12 }, //(load 8i8 and)   deinterleave into 4 x 2i8
4816     { 4, MVT::v4i8,  4 },  //(load 16i8 and)  deinterleave into 4 x 4i8
4817     { 4, MVT::v8i8,  20 }, //(load 32i8 and)  deinterleave into 4 x 8i8
4818     { 4, MVT::v16i8, 39 }, //(load 64i8 and)  deinterleave into 4 x 16i8
4819     { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
4820 
4821     { 8, MVT::v8i32, 40 }  //(load 64i32 and)deinterleave into 8 x 8i32
4822   };
4823 
4824   static const CostTblEntry AVX2InterleavedStoreTbl[] = {
4825     { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
4826 
4827     { 3, MVT::v2i8,  7 },  //interleave 3 x 2i8  into 6i8 (and store)
4828     { 3, MVT::v4i8,  8 },  //interleave 3 x 4i8  into 12i8 (and store)
4829     { 3, MVT::v8i8,  11 }, //interleave 3 x 8i8  into 24i8 (and store)
4830     { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
4831     { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
4832 
4833     { 4, MVT::v2i8,  12 }, //interleave 4 x 2i8  into 8i8 (and store)
4834     { 4, MVT::v4i8,  9 },  //interleave 4 x 4i8  into 16i8 (and store)
4835     { 4, MVT::v8i8,  10 }, //interleave 4 x 8i8  into 32i8 (and store)
4836     { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
4837     { 4, MVT::v32i8, 12 }  //interleave 4 x 32i8 into 128i8 (and store)
4838   };
4839 
4840   if (Opcode == Instruction::Load) {
4841     if (const auto *Entry =
4842             CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
4843       return MemOpCosts + Entry->Cost;
4844   } else {
4845     assert(Opcode == Instruction::Store &&
4846            "Expected Store Instruction at this  point");
4847     if (const auto *Entry =
4848             CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
4849       return MemOpCosts + Entry->Cost;
4850   }
4851 
4852   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4853                                            Alignment, AddressSpace, CostKind);
4854 }
4855 
4856 // Get estimation for interleaved load/store operations and strided load.
4857 // \p Indices contains indices for strided load.
4858 // \p Factor - the factor of interleaving.
4859 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
4860 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
4861     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
4862     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
4863     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
4864 
4865   if (UseMaskForCond || UseMaskForGaps)
4866     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
4867                                              Alignment, AddressSpace, CostKind,
4868                                              UseMaskForCond, UseMaskForGaps);
4869 
4870   // VecTy for interleave memop is <VF*Factor x Elt>.
4871   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
4872   // VecTy = <12 x i32>.
4873 
4874   // Calculate the number of memory operations (NumOfMemOps), required
4875   // for load/store the VecTy.
4876   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
4877   unsigned VecTySize = DL.getTypeStoreSize(VecTy);
4878   unsigned LegalVTSize = LegalVT.getStoreSize();
4879   unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
4880 
4881   // Get the cost of one memory operation.
4882   auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
4883                                              LegalVT.getVectorNumElements());
4884   InstructionCost MemOpCost = getMemoryOpCost(
4885       Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind);
4886 
4887   unsigned VF = VecTy->getNumElements() / Factor;
4888   MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
4889 
4890   if (Opcode == Instruction::Load) {
4891     // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
4892     // contain the cost of the optimized shuffle sequence that the
4893     // X86InterleavedAccess pass will generate.
4894     // The cost of loads and stores are computed separately from the table.
4895 
4896     // X86InterleavedAccess support only the following interleaved-access group.
4897     static const CostTblEntry AVX512InterleavedLoadTbl[] = {
4898         {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
4899         {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
4900         {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
4901     };
4902 
4903     if (const auto *Entry =
4904             CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
4905       return NumOfMemOps * MemOpCost + Entry->Cost;
4906     //If an entry does not exist, fallback to the default implementation.
4907 
4908     // Kind of shuffle depends on number of loaded values.
4909     // If we load the entire data in one register, we can use a 1-src shuffle.
4910     // Otherwise, we'll merge 2 sources in each operation.
4911     TTI::ShuffleKind ShuffleKind =
4912         (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
4913 
4914     InstructionCost ShuffleCost =
4915         getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
4916 
4917     unsigned NumOfLoadsInInterleaveGrp =
4918         Indices.size() ? Indices.size() : Factor;
4919     auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
4920                                           VecTy->getNumElements() / Factor);
4921     InstructionCost NumOfResults =
4922         getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
4923         NumOfLoadsInInterleaveGrp;
4924 
4925     // About a half of the loads may be folded in shuffles when we have only
4926     // one result. If we have more than one result, we do not fold loads at all.
4927     unsigned NumOfUnfoldedLoads =
4928         NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
4929 
4930     // Get a number of shuffle operations per result.
4931     unsigned NumOfShufflesPerResult =
4932         std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
4933 
4934     // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4935     // When we have more than one destination, we need additional instructions
4936     // to keep sources.
4937     InstructionCost NumOfMoves = 0;
4938     if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
4939       NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
4940 
4941     InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
4942                            NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
4943 
4944     return Cost;
4945   }
4946 
4947   // Store.
4948   assert(Opcode == Instruction::Store &&
4949          "Expected Store Instruction at this  point");
4950   // X86InterleavedAccess support only the following interleaved-access group.
4951   static const CostTblEntry AVX512InterleavedStoreTbl[] = {
4952       {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
4953       {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
4954       {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
4955 
4956       {4, MVT::v8i8, 10},  // interleave 4 x 8i8  into 32i8  (and store)
4957       {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8  (and store)
4958       {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
4959       {4, MVT::v64i8, 24}  // interleave 4 x 32i8 into 256i8 (and store)
4960   };
4961 
4962   if (const auto *Entry =
4963           CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
4964     return NumOfMemOps * MemOpCost + Entry->Cost;
4965   //If an entry does not exist, fallback to the default implementation.
4966 
4967   // There is no strided stores meanwhile. And store can't be folded in
4968   // shuffle.
4969   unsigned NumOfSources = Factor; // The number of values to be merged.
4970   InstructionCost ShuffleCost =
4971       getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
4972   unsigned NumOfShufflesPerStore = NumOfSources - 1;
4973 
4974   // The SK_MergeTwoSrc shuffle clobbers one of src operands.
4975   // We need additional instructions to keep sources.
4976   unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
4977   InstructionCost Cost =
4978       NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
4979       NumOfMoves;
4980   return Cost;
4981 }
4982 
4983 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
4984     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
4985     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
4986     bool UseMaskForCond, bool UseMaskForGaps) {
4987   auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
4988     Type *EltTy = cast<VectorType>(VecTy)->getElementType();
4989     if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
4990         EltTy->isIntegerTy(32) || EltTy->isPointerTy())
4991       return true;
4992     if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
4993       return HasBW;
4994     return false;
4995   };
4996   if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
4997     return getInterleavedMemoryOpCostAVX512(
4998         Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
4999         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5000   if (ST->hasAVX2())
5001     return getInterleavedMemoryOpCostAVX2(
5002         Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
5003         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5004 
5005   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5006                                            Alignment, AddressSpace, CostKind,
5007                                            UseMaskForCond, UseMaskForGaps);
5008 }
5009