1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 ///   SSE 3   - Pentium4 / Athlon64
23 ///   SSE 4.1 - Penryn
24 ///   SSE 4.2 - Nehalem
25 ///   AVX     - Sandy Bridge
26 ///   AVX2    - Haswell
27 ///   AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 ///                   divss     sqrtss          rsqrtss
30 ///   AMD K7            11-16     19              3
31 ///   Piledriver        9-24      13-15           5
32 ///   Jaguar            14        16              2
33 ///   Pentium II,III    18        30              2
34 ///   Nehalem           7-14      7-18            3
35 ///   Haswell           10-13     11              5
36 /// TODO: Develop and implement  the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
40 
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "x86tti"
52 
53 //===----------------------------------------------------------------------===//
54 //
55 // X86 cost model.
56 //
57 //===----------------------------------------------------------------------===//
58 
59 TargetTransformInfo::PopcntSupportKind
60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
62   // TODO: Currently the __builtin_popcount() implementation using SSE3
63   //   instructions is inefficient. Once the problem is fixed, we should
64   //   call ST->hasSSE3() instead of ST->hasPOPCNT().
65   return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66 }
67 
68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69   TargetTransformInfo::CacheLevel Level) const {
70   switch (Level) {
71   case TargetTransformInfo::CacheLevel::L1D:
72     //   - Penryn
73     //   - Nehalem
74     //   - Westmere
75     //   - Sandy Bridge
76     //   - Ivy Bridge
77     //   - Haswell
78     //   - Broadwell
79     //   - Skylake
80     //   - Kabylake
81     return 32 * 1024;  //  32 KByte
82   case TargetTransformInfo::CacheLevel::L2D:
83     //   - Penryn
84     //   - Nehalem
85     //   - Westmere
86     //   - Sandy Bridge
87     //   - Ivy Bridge
88     //   - Haswell
89     //   - Broadwell
90     //   - Skylake
91     //   - Kabylake
92     return 256 * 1024; // 256 KByte
93   }
94 
95   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
96 }
97 
98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99   TargetTransformInfo::CacheLevel Level) const {
100   //   - Penryn
101   //   - Nehalem
102   //   - Westmere
103   //   - Sandy Bridge
104   //   - Ivy Bridge
105   //   - Haswell
106   //   - Broadwell
107   //   - Skylake
108   //   - Kabylake
109   switch (Level) {
110   case TargetTransformInfo::CacheLevel::L1D:
111     LLVM_FALLTHROUGH;
112   case TargetTransformInfo::CacheLevel::L2D:
113     return 8;
114   }
115 
116   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
117 }
118 
119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120   bool Vector = (ClassID == 1);
121   if (Vector && !ST->hasSSE1())
122     return 0;
123 
124   if (ST->is64Bit()) {
125     if (Vector && ST->hasAVX512())
126       return 32;
127     return 16;
128   }
129   return 8;
130 }
131 
132 TypeSize
133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
134   unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135   switch (K) {
136   case TargetTransformInfo::RGK_Scalar:
137     return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
138   case TargetTransformInfo::RGK_FixedWidthVector:
139     if (ST->hasAVX512() && PreferVectorWidth >= 512)
140       return TypeSize::getFixed(512);
141     if (ST->hasAVX() && PreferVectorWidth >= 256)
142       return TypeSize::getFixed(256);
143     if (ST->hasSSE1() && PreferVectorWidth >= 128)
144       return TypeSize::getFixed(128);
145     return TypeSize::getFixed(0);
146   case TargetTransformInfo::RGK_ScalableVector:
147     return TypeSize::getScalable(0);
148   }
149 
150   llvm_unreachable("Unsupported register kind");
151 }
152 
153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154   return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
155       .getFixedSize();
156 }
157 
158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
159   // If the loop will not be vectorized, don't interleave the loop.
160   // Let regular unroll to unroll the loop, which saves the overflow
161   // check and memory check cost.
162   if (VF == 1)
163     return 1;
164 
165   if (ST->isAtom())
166     return 1;
167 
168   // Sandybridge and Haswell have multiple execution ports and pipelined
169   // vector units.
170   if (ST->hasAVX())
171     return 4;
172 
173   return 2;
174 }
175 
176 InstructionCost X86TTIImpl::getArithmeticInstrCost(
177     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
178     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
179     TTI::OperandValueProperties Opd1PropInfo,
180     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
181     const Instruction *CxtI) {
182   // TODO: Handle more cost kinds.
183   if (CostKind != TTI::TCK_RecipThroughput)
184     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
185                                          Op2Info, Opd1PropInfo,
186                                          Opd2PropInfo, Args, CxtI);
187 
188   // vXi8 multiplications are always promoted to vXi16.
189   if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
190       Ty->getScalarSizeInBits() == 8) {
191     Type *WideVecTy =
192         VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
193     return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
194                             TargetTransformInfo::CastContextHint::None,
195                             CostKind) +
196            getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
197                             TargetTransformInfo::CastContextHint::None,
198                             CostKind) +
199            getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
200                                   Opd1PropInfo, Opd2PropInfo);
201   }
202 
203   // Legalize the type.
204   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
205 
206   int ISD = TLI->InstructionOpcodeToISD(Opcode);
207   assert(ISD && "Invalid opcode");
208 
209   if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
210       LT.second.getScalarType() == MVT::i32) {
211     // Check if the operands can be represented as a smaller datatype.
212     bool Op1Signed = false, Op2Signed = false;
213     unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
214     unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
215     unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
216 
217     // If both are representable as i15 and at least one is constant,
218     // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we
219     // can treat this as PMADDWD which has the same costs as a vXi16 multiply.
220     if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) {
221       bool Op1Constant =
222           isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
223       bool Op2Constant =
224           isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
225       bool Op1Sext = isa<SExtInst>(Args[0]) &&
226                      (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41()));
227       bool Op2Sext = isa<SExtInst>(Args[1]) &&
228                      (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41()));
229 
230       bool IsZeroExtended = !Op1Signed || !Op2Signed;
231       bool IsConstant = Op1Constant || Op2Constant;
232       bool IsSext = Op1Sext || Op2Sext;
233       if (IsConstant || IsZeroExtended || IsSext)
234         LT.second =
235             MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
236     }
237   }
238 
239   if ((ISD == ISD::MUL || ISD == ISD::SDIV || ISD == ISD::SREM ||
240        ISD == ISD::UDIV || ISD == ISD::UREM) &&
241       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
242        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
243       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
244     // Vector multiply by pow2 will be simplified to shifts.
245     if (ISD == ISD::MUL) {
246       InstructionCost Cost = getArithmeticInstrCost(
247           Instruction::Shl, Ty, CostKind, Op1Info, Op2Info,
248           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
249       return Cost;
250     }
251 
252     if (ISD == ISD::SDIV || ISD == ISD::SREM) {
253       // On X86, vector signed division by constants power-of-two are
254       // normally expanded to the sequence SRA + SRL + ADD + SRA.
255       // The OperandValue properties may not be the same as that of the previous
256       // operation; conservatively assume OP_None.
257       InstructionCost Cost =
258           2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
259                                      Op2Info, TargetTransformInfo::OP_None,
260                                      TargetTransformInfo::OP_None);
261       Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
262                                      Op2Info, TargetTransformInfo::OP_None,
263                                      TargetTransformInfo::OP_None);
264       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
265                                      Op2Info, TargetTransformInfo::OP_None,
266                                      TargetTransformInfo::OP_None);
267 
268       if (ISD == ISD::SREM) {
269         // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
270         Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
271                                        Op2Info);
272         Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
273                                        Op2Info);
274       }
275 
276       return Cost;
277     }
278 
279     // Vector unsigned division/remainder will be simplified to shifts/masks.
280     if (ISD == ISD::UDIV)
281       return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
282                                     Op2Info, TargetTransformInfo::OP_None,
283                                     TargetTransformInfo::OP_None);
284     // UREM
285     return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info,
286                                   Op2Info, TargetTransformInfo::OP_None,
287                                   TargetTransformInfo::OP_None);
288   }
289 
290   static const CostTblEntry GLMCostTable[] = {
291     { ISD::FDIV,  MVT::f32,   18 }, // divss
292     { ISD::FDIV,  MVT::v4f32, 35 }, // divps
293     { ISD::FDIV,  MVT::f64,   33 }, // divsd
294     { ISD::FDIV,  MVT::v2f64, 65 }, // divpd
295   };
296 
297   if (ST->useGLMDivSqrtCosts())
298     if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
299                                             LT.second))
300       return LT.first * Entry->Cost;
301 
302   static const CostTblEntry SLMCostTable[] = {
303     { ISD::MUL,   MVT::v4i32, 11 }, // pmulld
304     { ISD::MUL,   MVT::v8i16, 2  }, // pmullw
305     { ISD::FMUL,  MVT::f64,   2  }, // mulsd
306     { ISD::FMUL,  MVT::v2f64, 4  }, // mulpd
307     { ISD::FMUL,  MVT::v4f32, 2  }, // mulps
308     { ISD::FDIV,  MVT::f32,   17 }, // divss
309     { ISD::FDIV,  MVT::v4f32, 39 }, // divps
310     { ISD::FDIV,  MVT::f64,   32 }, // divsd
311     { ISD::FDIV,  MVT::v2f64, 69 }, // divpd
312     { ISD::FADD,  MVT::v2f64, 2  }, // addpd
313     { ISD::FSUB,  MVT::v2f64, 2  }, // subpd
314     // v2i64/v4i64 mul is custom lowered as a series of long:
315     // multiplies(3), shifts(3) and adds(2)
316     // slm muldq version throughput is 2 and addq throughput 4
317     // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
318     //       3X4 (addq throughput) = 17
319     { ISD::MUL,   MVT::v2i64, 17 },
320     // slm addq\subq throughput is 4
321     { ISD::ADD,   MVT::v2i64, 4  },
322     { ISD::SUB,   MVT::v2i64, 4  },
323   };
324 
325   if (ST->useSLMArithCosts()) {
326     if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
327       // Check if the operands can be shrinked into a smaller datatype.
328       // TODO: Merge this into generiic vXi32 MUL patterns above.
329       bool Op1Signed = false;
330       unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
331       bool Op2Signed = false;
332       unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
333 
334       bool SignedMode = Op1Signed || Op2Signed;
335       unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
336 
337       if (OpMinSize <= 7)
338         return LT.first * 3; // pmullw/sext
339       if (!SignedMode && OpMinSize <= 8)
340         return LT.first * 3; // pmullw/zext
341       if (OpMinSize <= 15)
342         return LT.first * 5; // pmullw/pmulhw/pshuf
343       if (!SignedMode && OpMinSize <= 16)
344         return LT.first * 5; // pmullw/pmulhw/pshuf
345     }
346 
347     if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
348                                             LT.second)) {
349       return LT.first * Entry->Cost;
350     }
351   }
352 
353   static const CostTblEntry AVX512BWUniformConstCostTable[] = {
354     { ISD::SHL,  MVT::v64i8,   2 }, // psllw + pand.
355     { ISD::SRL,  MVT::v64i8,   2 }, // psrlw + pand.
356     { ISD::SRA,  MVT::v64i8,   4 }, // psrlw, pand, pxor, psubb.
357   };
358 
359   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
360       ST->hasBWI()) {
361     if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
362                                             LT.second))
363       return LT.first * Entry->Cost;
364   }
365 
366   static const CostTblEntry AVX512UniformConstCostTable[] = {
367     { ISD::SRA,  MVT::v2i64,   1 },
368     { ISD::SRA,  MVT::v4i64,   1 },
369     { ISD::SRA,  MVT::v8i64,   1 },
370 
371     { ISD::SHL,  MVT::v64i8,   4 }, // psllw + pand.
372     { ISD::SRL,  MVT::v64i8,   4 }, // psrlw + pand.
373     { ISD::SRA,  MVT::v64i8,   8 }, // psrlw, pand, pxor, psubb.
374 
375     { ISD::SDIV, MVT::v16i32,  6 }, // pmuludq sequence
376     { ISD::SREM, MVT::v16i32,  8 }, // pmuludq+mul+sub sequence
377     { ISD::UDIV, MVT::v16i32,  5 }, // pmuludq sequence
378     { ISD::UREM, MVT::v16i32,  7 }, // pmuludq+mul+sub sequence
379   };
380 
381   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
382       ST->hasAVX512()) {
383     if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
384                                             LT.second))
385       return LT.first * Entry->Cost;
386   }
387 
388   static const CostTblEntry AVX2UniformConstCostTable[] = {
389     { ISD::SHL,  MVT::v32i8,   2 }, // psllw + pand.
390     { ISD::SRL,  MVT::v32i8,   2 }, // psrlw + pand.
391     { ISD::SRA,  MVT::v32i8,   4 }, // psrlw, pand, pxor, psubb.
392 
393     { ISD::SRA,  MVT::v4i64,   4 }, // 2 x psrad + shuffle.
394 
395     { ISD::SDIV, MVT::v8i32,   6 }, // pmuludq sequence
396     { ISD::SREM, MVT::v8i32,   8 }, // pmuludq+mul+sub sequence
397     { ISD::UDIV, MVT::v8i32,   5 }, // pmuludq sequence
398     { ISD::UREM, MVT::v8i32,   7 }, // pmuludq+mul+sub sequence
399   };
400 
401   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
402       ST->hasAVX2()) {
403     if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
404                                             LT.second))
405       return LT.first * Entry->Cost;
406   }
407 
408   static const CostTblEntry SSE2UniformConstCostTable[] = {
409     { ISD::SHL,  MVT::v16i8,     2 }, // psllw + pand.
410     { ISD::SRL,  MVT::v16i8,     2 }, // psrlw + pand.
411     { ISD::SRA,  MVT::v16i8,     4 }, // psrlw, pand, pxor, psubb.
412 
413     { ISD::SHL,  MVT::v32i8,   4+2 }, // 2*(psllw + pand) + split.
414     { ISD::SRL,  MVT::v32i8,   4+2 }, // 2*(psrlw + pand) + split.
415     { ISD::SRA,  MVT::v32i8,   8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
416 
417     { ISD::SDIV, MVT::v8i32,  12+2 }, // 2*pmuludq sequence + split.
418     { ISD::SREM, MVT::v8i32,  16+2 }, // 2*pmuludq+mul+sub sequence + split.
419     { ISD::SDIV, MVT::v4i32,     6 }, // pmuludq sequence
420     { ISD::SREM, MVT::v4i32,     8 }, // pmuludq+mul+sub sequence
421     { ISD::UDIV, MVT::v8i32,  10+2 }, // 2*pmuludq sequence + split.
422     { ISD::UREM, MVT::v8i32,  14+2 }, // 2*pmuludq+mul+sub sequence + split.
423     { ISD::UDIV, MVT::v4i32,     5 }, // pmuludq sequence
424     { ISD::UREM, MVT::v4i32,     7 }, // pmuludq+mul+sub sequence
425   };
426 
427   // XOP has faster vXi8 shifts.
428   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
429       ST->hasSSE2() && !ST->hasXOP()) {
430     if (const auto *Entry =
431             CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
432       return LT.first * Entry->Cost;
433   }
434 
435   static const CostTblEntry AVX512BWConstCostTable[] = {
436     { ISD::SDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
437     { ISD::SREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
438     { ISD::UDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
439     { ISD::UREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
440     { ISD::SDIV, MVT::v32i16,  6 }, // vpmulhw sequence
441     { ISD::SREM, MVT::v32i16,  8 }, // vpmulhw+mul+sub sequence
442     { ISD::UDIV, MVT::v32i16,  6 }, // vpmulhuw sequence
443     { ISD::UREM, MVT::v32i16,  8 }, // vpmulhuw+mul+sub sequence
444   };
445 
446   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
447        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
448       ST->hasBWI()) {
449     if (const auto *Entry =
450             CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
451       return LT.first * Entry->Cost;
452   }
453 
454   static const CostTblEntry AVX512ConstCostTable[] = {
455     { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
456     { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
457     { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
458     { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
459     { ISD::SDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
460     { ISD::SREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
461     { ISD::UDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
462     { ISD::UREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
463     { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
464     { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
465     { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
466     { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
467   };
468 
469   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
470        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
471       ST->hasAVX512()) {
472     if (const auto *Entry =
473             CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
474       return LT.first * Entry->Cost;
475   }
476 
477   static const CostTblEntry AVX2ConstCostTable[] = {
478     { ISD::SDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
479     { ISD::SREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
480     { ISD::UDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
481     { ISD::UREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
482     { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
483     { ISD::SREM, MVT::v16i16,  8 }, // vpmulhw+mul+sub sequence
484     { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
485     { ISD::UREM, MVT::v16i16,  8 }, // vpmulhuw+mul+sub sequence
486     { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
487     { ISD::SREM, MVT::v8i32,  19 }, // vpmuldq+mul+sub sequence
488     { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
489     { ISD::UREM, MVT::v8i32,  19 }, // vpmuludq+mul+sub sequence
490   };
491 
492   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
493        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
494       ST->hasAVX2()) {
495     if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
496       return LT.first * Entry->Cost;
497   }
498 
499   static const CostTblEntry SSE2ConstCostTable[] = {
500     { ISD::SDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
501     { ISD::SREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
502     { ISD::SDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
503     { ISD::SREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
504     { ISD::UDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
505     { ISD::UREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
506     { ISD::UDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
507     { ISD::UREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
508     { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
509     { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
510     { ISD::SDIV, MVT::v8i16,     6 }, // pmulhw sequence
511     { ISD::SREM, MVT::v8i16,     8 }, // pmulhw+mul+sub sequence
512     { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
513     { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
514     { ISD::UDIV, MVT::v8i16,     6 }, // pmulhuw sequence
515     { ISD::UREM, MVT::v8i16,     8 }, // pmulhuw+mul+sub sequence
516     { ISD::SDIV, MVT::v8i32,  38+2 }, // 2*pmuludq sequence + split.
517     { ISD::SREM, MVT::v8i32,  48+2 }, // 2*pmuludq+mul+sub sequence + split.
518     { ISD::SDIV, MVT::v4i32,    19 }, // pmuludq sequence
519     { ISD::SREM, MVT::v4i32,    24 }, // pmuludq+mul+sub sequence
520     { ISD::UDIV, MVT::v8i32,  30+2 }, // 2*pmuludq sequence + split.
521     { ISD::UREM, MVT::v8i32,  40+2 }, // 2*pmuludq+mul+sub sequence + split.
522     { ISD::UDIV, MVT::v4i32,    15 }, // pmuludq sequence
523     { ISD::UREM, MVT::v4i32,    20 }, // pmuludq+mul+sub sequence
524   };
525 
526   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
527        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
528       ST->hasSSE2()) {
529     // pmuldq sequence.
530     if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
531       return LT.first * 32;
532     if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
533       return LT.first * 38;
534     if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
535       return LT.first * 15;
536     if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
537       return LT.first * 20;
538 
539     if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
540       return LT.first * Entry->Cost;
541   }
542 
543   static const CostTblEntry AVX512BWShiftCostTable[] = {
544     { ISD::SHL,   MVT::v16i8,      4 }, // extend/vpsllvw/pack sequence.
545     { ISD::SRL,   MVT::v16i8,      4 }, // extend/vpsrlvw/pack sequence.
546     { ISD::SRA,   MVT::v16i8,      4 }, // extend/vpsravw/pack sequence.
547     { ISD::SHL,   MVT::v32i8,      4 }, // extend/vpsllvw/pack sequence.
548     { ISD::SRL,   MVT::v32i8,      4 }, // extend/vpsrlvw/pack sequence.
549     { ISD::SRA,   MVT::v32i8,      6 }, // extend/vpsravw/pack sequence.
550     { ISD::SHL,   MVT::v64i8,      6 }, // extend/vpsllvw/pack sequence.
551     { ISD::SRL,   MVT::v64i8,      7 }, // extend/vpsrlvw/pack sequence.
552     { ISD::SRA,   MVT::v64i8,     15 }, // extend/vpsravw/pack sequence.
553 
554     { ISD::SHL,   MVT::v8i16,      1 }, // vpsllvw
555     { ISD::SRL,   MVT::v8i16,      1 }, // vpsrlvw
556     { ISD::SRA,   MVT::v8i16,      1 }, // vpsravw
557     { ISD::SHL,   MVT::v16i16,     1 }, // vpsllvw
558     { ISD::SRL,   MVT::v16i16,     1 }, // vpsrlvw
559     { ISD::SRA,   MVT::v16i16,     1 }, // vpsravw
560     { ISD::SHL,   MVT::v32i16,     1 }, // vpsllvw
561     { ISD::SRL,   MVT::v32i16,     1 }, // vpsrlvw
562     { ISD::SRA,   MVT::v32i16,     1 }, // vpsravw
563   };
564 
565   if (ST->hasBWI())
566     if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
567       return LT.first * Entry->Cost;
568 
569   static const CostTblEntry AVX2UniformCostTable[] = {
570     // Uniform splats are cheaper for the following instructions.
571     { ISD::SHL,  MVT::v16i16, 1 }, // psllw.
572     { ISD::SRL,  MVT::v16i16, 1 }, // psrlw.
573     { ISD::SRA,  MVT::v16i16, 1 }, // psraw.
574     { ISD::SHL,  MVT::v32i16, 2 }, // 2*psllw.
575     { ISD::SRL,  MVT::v32i16, 2 }, // 2*psrlw.
576     { ISD::SRA,  MVT::v32i16, 2 }, // 2*psraw.
577 
578     { ISD::SHL,  MVT::v8i32,  1 }, // pslld
579     { ISD::SRL,  MVT::v8i32,  1 }, // psrld
580     { ISD::SRA,  MVT::v8i32,  1 }, // psrad
581     { ISD::SHL,  MVT::v4i64,  1 }, // psllq
582     { ISD::SRL,  MVT::v4i64,  1 }, // psrlq
583   };
584 
585   if (ST->hasAVX2() &&
586       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
587        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
588     if (const auto *Entry =
589             CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
590       return LT.first * Entry->Cost;
591   }
592 
593   static const CostTblEntry SSE2UniformCostTable[] = {
594     // Uniform splats are cheaper for the following instructions.
595     { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
596     { ISD::SHL,  MVT::v4i32,  1 }, // pslld
597     { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
598 
599     { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
600     { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
601     { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
602 
603     { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
604     { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
605   };
606 
607   if (ST->hasSSE2() &&
608       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
609        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
610     if (const auto *Entry =
611             CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
612       return LT.first * Entry->Cost;
613   }
614 
615   static const CostTblEntry AVX512DQCostTable[] = {
616     { ISD::MUL,  MVT::v2i64, 2 }, // pmullq
617     { ISD::MUL,  MVT::v4i64, 2 }, // pmullq
618     { ISD::MUL,  MVT::v8i64, 2 }  // pmullq
619   };
620 
621   // Look for AVX512DQ lowering tricks for custom cases.
622   if (ST->hasDQI())
623     if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
624       return LT.first * Entry->Cost;
625 
626   static const CostTblEntry AVX512BWCostTable[] = {
627     { ISD::SHL,   MVT::v64i8,     11 }, // vpblendvb sequence.
628     { ISD::SRL,   MVT::v64i8,     11 }, // vpblendvb sequence.
629     { ISD::SRA,   MVT::v64i8,     24 }, // vpblendvb sequence.
630   };
631 
632   // Look for AVX512BW lowering tricks for custom cases.
633   if (ST->hasBWI())
634     if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
635       return LT.first * Entry->Cost;
636 
637   static const CostTblEntry AVX512CostTable[] = {
638     { ISD::SHL,     MVT::v4i32,      1 },
639     { ISD::SRL,     MVT::v4i32,      1 },
640     { ISD::SRA,     MVT::v4i32,      1 },
641     { ISD::SHL,     MVT::v8i32,      1 },
642     { ISD::SRL,     MVT::v8i32,      1 },
643     { ISD::SRA,     MVT::v8i32,      1 },
644     { ISD::SHL,     MVT::v16i32,     1 },
645     { ISD::SRL,     MVT::v16i32,     1 },
646     { ISD::SRA,     MVT::v16i32,     1 },
647 
648     { ISD::SHL,     MVT::v2i64,      1 },
649     { ISD::SRL,     MVT::v2i64,      1 },
650     { ISD::SHL,     MVT::v4i64,      1 },
651     { ISD::SRL,     MVT::v4i64,      1 },
652     { ISD::SHL,     MVT::v8i64,      1 },
653     { ISD::SRL,     MVT::v8i64,      1 },
654 
655     { ISD::SRA,     MVT::v2i64,      1 },
656     { ISD::SRA,     MVT::v4i64,      1 },
657     { ISD::SRA,     MVT::v8i64,      1 },
658 
659     { ISD::MUL,     MVT::v16i32,     1 }, // pmulld (Skylake from agner.org)
660     { ISD::MUL,     MVT::v8i32,      1 }, // pmulld (Skylake from agner.org)
661     { ISD::MUL,     MVT::v4i32,      1 }, // pmulld (Skylake from agner.org)
662     { ISD::MUL,     MVT::v8i64,      6 }, // 3*pmuludq/3*shift/2*add
663 
664     { ISD::FNEG,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
665     { ISD::FADD,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
666     { ISD::FSUB,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
667     { ISD::FMUL,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
668     { ISD::FDIV,    MVT::f64,        4 }, // Skylake from http://www.agner.org/
669     { ISD::FDIV,    MVT::v2f64,      4 }, // Skylake from http://www.agner.org/
670     { ISD::FDIV,    MVT::v4f64,      8 }, // Skylake from http://www.agner.org/
671     { ISD::FDIV,    MVT::v8f64,     16 }, // Skylake from http://www.agner.org/
672 
673     { ISD::FNEG,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
674     { ISD::FADD,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
675     { ISD::FSUB,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
676     { ISD::FMUL,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
677     { ISD::FDIV,    MVT::f32,        3 }, // Skylake from http://www.agner.org/
678     { ISD::FDIV,    MVT::v4f32,      3 }, // Skylake from http://www.agner.org/
679     { ISD::FDIV,    MVT::v8f32,      5 }, // Skylake from http://www.agner.org/
680     { ISD::FDIV,    MVT::v16f32,    10 }, // Skylake from http://www.agner.org/
681   };
682 
683   if (ST->hasAVX512())
684     if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
685       return LT.first * Entry->Cost;
686 
687   static const CostTblEntry AVX2ShiftCostTable[] = {
688     // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
689     // customize them to detect the cases where shift amount is a scalar one.
690     { ISD::SHL,     MVT::v4i32,    2 }, // vpsllvd (Haswell from agner.org)
691     { ISD::SRL,     MVT::v4i32,    2 }, // vpsrlvd (Haswell from agner.org)
692     { ISD::SRA,     MVT::v4i32,    2 }, // vpsravd (Haswell from agner.org)
693     { ISD::SHL,     MVT::v8i32,    2 }, // vpsllvd (Haswell from agner.org)
694     { ISD::SRL,     MVT::v8i32,    2 }, // vpsrlvd (Haswell from agner.org)
695     { ISD::SRA,     MVT::v8i32,    2 }, // vpsravd (Haswell from agner.org)
696     { ISD::SHL,     MVT::v2i64,    1 }, // vpsllvq (Haswell from agner.org)
697     { ISD::SRL,     MVT::v2i64,    1 }, // vpsrlvq (Haswell from agner.org)
698     { ISD::SHL,     MVT::v4i64,    1 }, // vpsllvq (Haswell from agner.org)
699     { ISD::SRL,     MVT::v4i64,    1 }, // vpsrlvq (Haswell from agner.org)
700   };
701 
702   if (ST->hasAVX512()) {
703     if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
704         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
705          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
706       // On AVX512, a packed v32i16 shift left by a constant build_vector
707       // is lowered into a vector multiply (vpmullw).
708       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
709                                     Op1Info, Op2Info,
710                                     TargetTransformInfo::OP_None,
711                                     TargetTransformInfo::OP_None);
712   }
713 
714   // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
715   if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
716     if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
717         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
718          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
719       // On AVX2, a packed v16i16 shift left by a constant build_vector
720       // is lowered into a vector multiply (vpmullw).
721       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
722                                     Op1Info, Op2Info,
723                                     TargetTransformInfo::OP_None,
724                                     TargetTransformInfo::OP_None);
725 
726     if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
727       return LT.first * Entry->Cost;
728   }
729 
730   static const CostTblEntry XOPShiftCostTable[] = {
731     // 128bit shifts take 1cy, but right shifts require negation beforehand.
732     { ISD::SHL,     MVT::v16i8,    1 },
733     { ISD::SRL,     MVT::v16i8,    2 },
734     { ISD::SRA,     MVT::v16i8,    2 },
735     { ISD::SHL,     MVT::v8i16,    1 },
736     { ISD::SRL,     MVT::v8i16,    2 },
737     { ISD::SRA,     MVT::v8i16,    2 },
738     { ISD::SHL,     MVT::v4i32,    1 },
739     { ISD::SRL,     MVT::v4i32,    2 },
740     { ISD::SRA,     MVT::v4i32,    2 },
741     { ISD::SHL,     MVT::v2i64,    1 },
742     { ISD::SRL,     MVT::v2i64,    2 },
743     { ISD::SRA,     MVT::v2i64,    2 },
744     // 256bit shifts require splitting if AVX2 didn't catch them above.
745     { ISD::SHL,     MVT::v32i8,  2+2 },
746     { ISD::SRL,     MVT::v32i8,  4+2 },
747     { ISD::SRA,     MVT::v32i8,  4+2 },
748     { ISD::SHL,     MVT::v16i16, 2+2 },
749     { ISD::SRL,     MVT::v16i16, 4+2 },
750     { ISD::SRA,     MVT::v16i16, 4+2 },
751     { ISD::SHL,     MVT::v8i32,  2+2 },
752     { ISD::SRL,     MVT::v8i32,  4+2 },
753     { ISD::SRA,     MVT::v8i32,  4+2 },
754     { ISD::SHL,     MVT::v4i64,  2+2 },
755     { ISD::SRL,     MVT::v4i64,  4+2 },
756     { ISD::SRA,     MVT::v4i64,  4+2 },
757   };
758 
759   // Look for XOP lowering tricks.
760   if (ST->hasXOP()) {
761     // If the right shift is constant then we'll fold the negation so
762     // it's as cheap as a left shift.
763     int ShiftISD = ISD;
764     if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
765         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
766          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
767       ShiftISD = ISD::SHL;
768     if (const auto *Entry =
769             CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
770       return LT.first * Entry->Cost;
771   }
772 
773   static const CostTblEntry SSE2UniformShiftCostTable[] = {
774     // Uniform splats are cheaper for the following instructions.
775     { ISD::SHL,  MVT::v16i16, 2+2 }, // 2*psllw + split.
776     { ISD::SHL,  MVT::v8i32,  2+2 }, // 2*pslld + split.
777     { ISD::SHL,  MVT::v4i64,  2+2 }, // 2*psllq + split.
778 
779     { ISD::SRL,  MVT::v16i16, 2+2 }, // 2*psrlw + split.
780     { ISD::SRL,  MVT::v8i32,  2+2 }, // 2*psrld + split.
781     { ISD::SRL,  MVT::v4i64,  2+2 }, // 2*psrlq + split.
782 
783     { ISD::SRA,  MVT::v16i16, 2+2 }, // 2*psraw + split.
784     { ISD::SRA,  MVT::v8i32,  2+2 }, // 2*psrad + split.
785     { ISD::SRA,  MVT::v2i64,    4 }, // 2*psrad + shuffle.
786     { ISD::SRA,  MVT::v4i64,  8+2 }, // 2*(2*psrad + shuffle) + split.
787   };
788 
789   if (ST->hasSSE2() &&
790       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
791        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
792 
793     // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
794     if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
795       return LT.first * 4; // 2*psrad + shuffle.
796 
797     if (const auto *Entry =
798             CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
799       return LT.first * Entry->Cost;
800   }
801 
802   if (ISD == ISD::SHL &&
803       Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
804     MVT VT = LT.second;
805     // Vector shift left by non uniform constant can be lowered
806     // into vector multiply.
807     if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
808         ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
809       ISD = ISD::MUL;
810   }
811 
812   static const CostTblEntry AVX2CostTable[] = {
813     { ISD::SHL,  MVT::v16i8,      6 }, // vpblendvb sequence.
814     { ISD::SHL,  MVT::v32i8,      6 }, // vpblendvb sequence.
815     { ISD::SHL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
816     { ISD::SHL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
817     { ISD::SHL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
818     { ISD::SHL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
819 
820     { ISD::SRL,  MVT::v16i8,      6 }, // vpblendvb sequence.
821     { ISD::SRL,  MVT::v32i8,      6 }, // vpblendvb sequence.
822     { ISD::SRL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
823     { ISD::SRL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
824     { ISD::SRL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
825     { ISD::SRL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
826 
827     { ISD::SRA,  MVT::v16i8,     17 }, // vpblendvb sequence.
828     { ISD::SRA,  MVT::v32i8,     17 }, // vpblendvb sequence.
829     { ISD::SRA,  MVT::v64i8,     34 }, // 2*vpblendvb sequence.
830     { ISD::SRA,  MVT::v8i16,      5 }, // extend/vpsravd/pack sequence.
831     { ISD::SRA,  MVT::v16i16,     7 }, // extend/vpsravd/pack sequence.
832     { ISD::SRA,  MVT::v32i16,    14 }, // 2*extend/vpsravd/pack sequence.
833     { ISD::SRA,  MVT::v2i64,      2 }, // srl/xor/sub sequence.
834     { ISD::SRA,  MVT::v4i64,      2 }, // srl/xor/sub sequence.
835 
836     { ISD::SUB,  MVT::v32i8,      1 }, // psubb
837     { ISD::ADD,  MVT::v32i8,      1 }, // paddb
838     { ISD::SUB,  MVT::v16i16,     1 }, // psubw
839     { ISD::ADD,  MVT::v16i16,     1 }, // paddw
840     { ISD::SUB,  MVT::v8i32,      1 }, // psubd
841     { ISD::ADD,  MVT::v8i32,      1 }, // paddd
842     { ISD::SUB,  MVT::v4i64,      1 }, // psubq
843     { ISD::ADD,  MVT::v4i64,      1 }, // paddq
844 
845     { ISD::MUL,  MVT::v16i16,     1 }, // pmullw
846     { ISD::MUL,  MVT::v8i32,      2 }, // pmulld (Haswell from agner.org)
847     { ISD::MUL,  MVT::v4i64,      6 }, // 3*pmuludq/3*shift/2*add
848 
849     { ISD::FNEG, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
850     { ISD::FNEG, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
851     { ISD::FADD, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
852     { ISD::FADD, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
853     { ISD::FSUB, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
854     { ISD::FSUB, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
855     { ISD::FMUL, MVT::f64,        1 }, // Haswell from http://www.agner.org/
856     { ISD::FMUL, MVT::v2f64,      1 }, // Haswell from http://www.agner.org/
857     { ISD::FMUL, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
858     { ISD::FMUL, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
859 
860     { ISD::FDIV, MVT::f32,        7 }, // Haswell from http://www.agner.org/
861     { ISD::FDIV, MVT::v4f32,      7 }, // Haswell from http://www.agner.org/
862     { ISD::FDIV, MVT::v8f32,     14 }, // Haswell from http://www.agner.org/
863     { ISD::FDIV, MVT::f64,       14 }, // Haswell from http://www.agner.org/
864     { ISD::FDIV, MVT::v2f64,     14 }, // Haswell from http://www.agner.org/
865     { ISD::FDIV, MVT::v4f64,     28 }, // Haswell from http://www.agner.org/
866   };
867 
868   // Look for AVX2 lowering tricks for custom cases.
869   if (ST->hasAVX2())
870     if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
871       return LT.first * Entry->Cost;
872 
873   static const CostTblEntry AVX1CostTable[] = {
874     // We don't have to scalarize unsupported ops. We can issue two half-sized
875     // operations and we only need to extract the upper YMM half.
876     // Two ops + 1 extract + 1 insert = 4.
877     { ISD::MUL,     MVT::v16i16,     4 },
878     { ISD::MUL,     MVT::v8i32,      5 }, // BTVER2 from http://www.agner.org/
879     { ISD::MUL,     MVT::v4i64,     12 },
880 
881     { ISD::SUB,     MVT::v32i8,      4 },
882     { ISD::ADD,     MVT::v32i8,      4 },
883     { ISD::SUB,     MVT::v16i16,     4 },
884     { ISD::ADD,     MVT::v16i16,     4 },
885     { ISD::SUB,     MVT::v8i32,      4 },
886     { ISD::ADD,     MVT::v8i32,      4 },
887     { ISD::SUB,     MVT::v4i64,      4 },
888     { ISD::ADD,     MVT::v4i64,      4 },
889 
890     { ISD::SHL,     MVT::v32i8,     22 }, // pblendvb sequence + split.
891     { ISD::SHL,     MVT::v8i16,      6 }, // pblendvb sequence.
892     { ISD::SHL,     MVT::v16i16,    13 }, // pblendvb sequence + split.
893     { ISD::SHL,     MVT::v4i32,      3 }, // pslld/paddd/cvttps2dq/pmulld
894     { ISD::SHL,     MVT::v8i32,      9 }, // pslld/paddd/cvttps2dq/pmulld + split
895     { ISD::SHL,     MVT::v2i64,      2 }, // Shift each lane + blend.
896     { ISD::SHL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
897 
898     { ISD::SRL,     MVT::v32i8,     23 }, // pblendvb sequence + split.
899     { ISD::SRL,     MVT::v16i16,    28 }, // pblendvb sequence + split.
900     { ISD::SRL,     MVT::v4i32,      6 }, // Shift each lane + blend.
901     { ISD::SRL,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
902     { ISD::SRL,     MVT::v2i64,      2 }, // Shift each lane + blend.
903     { ISD::SRL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
904 
905     { ISD::SRA,     MVT::v32i8,     44 }, // pblendvb sequence + split.
906     { ISD::SRA,     MVT::v16i16,    28 }, // pblendvb sequence + split.
907     { ISD::SRA,     MVT::v4i32,      6 }, // Shift each lane + blend.
908     { ISD::SRA,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
909     { ISD::SRA,     MVT::v2i64,      5 }, // Shift each lane + blend.
910     { ISD::SRA,     MVT::v4i64,     12 }, // Shift each lane + blend + split.
911 
912     { ISD::FNEG,    MVT::v4f64,      2 }, // BTVER2 from http://www.agner.org/
913     { ISD::FNEG,    MVT::v8f32,      2 }, // BTVER2 from http://www.agner.org/
914 
915     { ISD::FMUL,    MVT::f64,        2 }, // BTVER2 from http://www.agner.org/
916     { ISD::FMUL,    MVT::v2f64,      2 }, // BTVER2 from http://www.agner.org/
917     { ISD::FMUL,    MVT::v4f64,      4 }, // BTVER2 from http://www.agner.org/
918 
919     { ISD::FDIV,    MVT::f32,       14 }, // SNB from http://www.agner.org/
920     { ISD::FDIV,    MVT::v4f32,     14 }, // SNB from http://www.agner.org/
921     { ISD::FDIV,    MVT::v8f32,     28 }, // SNB from http://www.agner.org/
922     { ISD::FDIV,    MVT::f64,       22 }, // SNB from http://www.agner.org/
923     { ISD::FDIV,    MVT::v2f64,     22 }, // SNB from http://www.agner.org/
924     { ISD::FDIV,    MVT::v4f64,     44 }, // SNB from http://www.agner.org/
925   };
926 
927   if (ST->hasAVX())
928     if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
929       return LT.first * Entry->Cost;
930 
931   static const CostTblEntry SSE42CostTable[] = {
932     { ISD::FADD, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
933     { ISD::FADD, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
934     { ISD::FADD, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
935     { ISD::FADD, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
936 
937     { ISD::FSUB, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
938     { ISD::FSUB, MVT::f32 ,    1 }, // Nehalem from http://www.agner.org/
939     { ISD::FSUB, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
940     { ISD::FSUB, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
941 
942     { ISD::FMUL, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
943     { ISD::FMUL, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
944     { ISD::FMUL, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
945     { ISD::FMUL, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
946 
947     { ISD::FDIV,  MVT::f32,   14 }, // Nehalem from http://www.agner.org/
948     { ISD::FDIV,  MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
949     { ISD::FDIV,  MVT::f64,   22 }, // Nehalem from http://www.agner.org/
950     { ISD::FDIV,  MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
951 
952     { ISD::MUL,   MVT::v2i64,  6 }  // 3*pmuludq/3*shift/2*add
953   };
954 
955   if (ST->hasSSE42())
956     if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
957       return LT.first * Entry->Cost;
958 
959   static const CostTblEntry SSE41CostTable[] = {
960     { ISD::SHL,  MVT::v16i8,      10 }, // pblendvb sequence.
961     { ISD::SHL,  MVT::v8i16,      11 }, // pblendvb sequence.
962     { ISD::SHL,  MVT::v4i32,       4 }, // pslld/paddd/cvttps2dq/pmulld
963 
964     { ISD::SRL,  MVT::v16i8,      11 }, // pblendvb sequence.
965     { ISD::SRL,  MVT::v8i16,      13 }, // pblendvb sequence.
966     { ISD::SRL,  MVT::v4i32,      16 }, // Shift each lane + blend.
967 
968     { ISD::SRA,  MVT::v16i8,      21 }, // pblendvb sequence.
969     { ISD::SRA,  MVT::v8i16,      13 }, // pblendvb sequence.
970 
971     { ISD::MUL,  MVT::v4i32,       2 }  // pmulld (Nehalem from agner.org)
972   };
973 
974   if (ST->hasSSE41())
975     if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
976       return LT.first * Entry->Cost;
977 
978   static const CostTblEntry SSE2CostTable[] = {
979     // We don't correctly identify costs of casts because they are marked as
980     // custom.
981     { ISD::SHL,  MVT::v16i8,      13 }, // cmpgtb sequence.
982     { ISD::SHL,  MVT::v8i16,      25 }, // cmpgtw sequence.
983     { ISD::SHL,  MVT::v4i32,      16 }, // pslld/paddd/cvttps2dq/pmuludq.
984     { ISD::SHL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
985 
986     { ISD::SRL,  MVT::v16i8,      14 }, // cmpgtb sequence.
987     { ISD::SRL,  MVT::v8i16,      16 }, // cmpgtw sequence.
988     { ISD::SRL,  MVT::v4i32,      12 }, // Shift each lane + blend.
989     { ISD::SRL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
990 
991     { ISD::SRA,  MVT::v16i8,      27 }, // unpacked cmpgtb sequence.
992     { ISD::SRA,  MVT::v8i16,      16 }, // cmpgtw sequence.
993     { ISD::SRA,  MVT::v4i32,      12 }, // Shift each lane + blend.
994     { ISD::SRA,  MVT::v2i64,       8 }, // srl/xor/sub splat+shuffle sequence.
995 
996     { ISD::MUL,  MVT::v8i16,       1 }, // pmullw
997     { ISD::MUL,  MVT::v4i32,       6 }, // 3*pmuludq/4*shuffle
998     { ISD::MUL,  MVT::v2i64,       8 }, // 3*pmuludq/3*shift/2*add
999 
1000     { ISD::FDIV, MVT::f32,        23 }, // Pentium IV from http://www.agner.org/
1001     { ISD::FDIV, MVT::v4f32,      39 }, // Pentium IV from http://www.agner.org/
1002     { ISD::FDIV, MVT::f64,        38 }, // Pentium IV from http://www.agner.org/
1003     { ISD::FDIV, MVT::v2f64,      69 }, // Pentium IV from http://www.agner.org/
1004 
1005     { ISD::FNEG, MVT::f32,         1 }, // Pentium IV from http://www.agner.org/
1006     { ISD::FNEG, MVT::f64,         1 }, // Pentium IV from http://www.agner.org/
1007     { ISD::FNEG, MVT::v4f32,       1 }, // Pentium IV from http://www.agner.org/
1008     { ISD::FNEG, MVT::v2f64,       1 }, // Pentium IV from http://www.agner.org/
1009 
1010     { ISD::FADD, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
1011     { ISD::FADD, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
1012 
1013     { ISD::FSUB, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
1014     { ISD::FSUB, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
1015   };
1016 
1017   if (ST->hasSSE2())
1018     if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
1019       return LT.first * Entry->Cost;
1020 
1021   static const CostTblEntry SSE1CostTable[] = {
1022     { ISD::FDIV, MVT::f32,   17 }, // Pentium III from http://www.agner.org/
1023     { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
1024 
1025     { ISD::FNEG, MVT::f32,    2 }, // Pentium III from http://www.agner.org/
1026     { ISD::FNEG, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1027 
1028     { ISD::FADD, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
1029     { ISD::FADD, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1030 
1031     { ISD::FSUB, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
1032     { ISD::FSUB, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1033   };
1034 
1035   if (ST->hasSSE1())
1036     if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1037       return LT.first * Entry->Cost;
1038 
1039   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1040     { ISD::ADD,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1041     { ISD::SUB,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1042     { ISD::MUL,  MVT::i64,    2 }, // Nehalem from http://www.agner.org/
1043   };
1044 
1045   if (ST->is64Bit())
1046     if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1047       return LT.first * Entry->Cost;
1048 
1049   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1050     { ISD::ADD,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1051     { ISD::ADD,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1052     { ISD::ADD,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1053 
1054     { ISD::SUB,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1055     { ISD::SUB,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1056     { ISD::SUB,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1057   };
1058 
1059   if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1060     return LT.first * Entry->Cost;
1061 
1062   // It is not a good idea to vectorize division. We have to scalarize it and
1063   // in the process we will often end up having to spilling regular
1064   // registers. The overhead of division is going to dominate most kernels
1065   // anyways so try hard to prevent vectorization of division - it is
1066   // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1067   // to hide "20 cycles" for each lane.
1068   if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
1069                                ISD == ISD::UDIV || ISD == ISD::UREM)) {
1070     InstructionCost ScalarCost = getArithmeticInstrCost(
1071         Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1072         TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1073     return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1074   }
1075 
1076   // Fallback to the default implementation.
1077   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1078 }
1079 
1080 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1081                                            VectorType *BaseTp,
1082                                            ArrayRef<int> Mask, int Index,
1083                                            VectorType *SubTp) {
1084   // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1085   // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1086   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1087 
1088   Kind = improveShuffleKindFromMask(Kind, Mask);
1089   // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1090   if (Kind == TTI::SK_Transpose)
1091     Kind = TTI::SK_PermuteTwoSrc;
1092 
1093   // For Broadcasts we are splatting the first element from the first input
1094   // register, so only need to reference that input and all the output
1095   // registers are the same.
1096   if (Kind == TTI::SK_Broadcast)
1097     LT.first = 1;
1098 
1099   // Subvector extractions are free if they start at the beginning of a
1100   // vector and cheap if the subvectors are aligned.
1101   if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1102     int NumElts = LT.second.getVectorNumElements();
1103     if ((Index % NumElts) == 0)
1104       return 0;
1105     std::pair<InstructionCost, MVT> SubLT =
1106         TLI->getTypeLegalizationCost(DL, SubTp);
1107     if (SubLT.second.isVector()) {
1108       int NumSubElts = SubLT.second.getVectorNumElements();
1109       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1110         return SubLT.first;
1111       // Handle some cases for widening legalization. For now we only handle
1112       // cases where the original subvector was naturally aligned and evenly
1113       // fit in its legalized subvector type.
1114       // FIXME: Remove some of the alignment restrictions.
1115       // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1116       // vectors.
1117       int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1118       if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1119           (NumSubElts % OrigSubElts) == 0 &&
1120           LT.second.getVectorElementType() ==
1121               SubLT.second.getVectorElementType() &&
1122           LT.second.getVectorElementType().getSizeInBits() ==
1123               BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1124         assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1125                "Unexpected number of elements!");
1126         auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1127                                            LT.second.getVectorNumElements());
1128         auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1129                                            SubLT.second.getVectorNumElements());
1130         int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1131         InstructionCost ExtractCost = getShuffleCost(
1132             TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1133 
1134         // If the original size is 32-bits or more, we can use pshufd. Otherwise
1135         // if we have SSSE3 we can use pshufb.
1136         if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1137           return ExtractCost + 1; // pshufd or pshufb
1138 
1139         assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1140                "Unexpected vector size");
1141 
1142         return ExtractCost + 2; // worst case pshufhw + pshufd
1143       }
1144     }
1145   }
1146 
1147   // Subvector insertions are cheap if the subvectors are aligned.
1148   // Note that in general, the insertion starting at the beginning of a vector
1149   // isn't free, because we need to preserve the rest of the wide vector.
1150   if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1151     int NumElts = LT.second.getVectorNumElements();
1152     std::pair<InstructionCost, MVT> SubLT =
1153         TLI->getTypeLegalizationCost(DL, SubTp);
1154     if (SubLT.second.isVector()) {
1155       int NumSubElts = SubLT.second.getVectorNumElements();
1156       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1157         return SubLT.first;
1158     }
1159 
1160     // If the insertion isn't aligned, treat it like a 2-op shuffle.
1161     Kind = TTI::SK_PermuteTwoSrc;
1162   }
1163 
1164   // Handle some common (illegal) sub-vector types as they are often very cheap
1165   // to shuffle even on targets without PSHUFB.
1166   EVT VT = TLI->getValueType(DL, BaseTp);
1167   if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1168       !ST->hasSSSE3()) {
1169      static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1170       {TTI::SK_Broadcast,        MVT::v4i16, 1}, // pshuflw
1171       {TTI::SK_Broadcast,        MVT::v2i16, 1}, // pshuflw
1172       {TTI::SK_Broadcast,        MVT::v8i8,  2}, // punpck/pshuflw
1173       {TTI::SK_Broadcast,        MVT::v4i8,  2}, // punpck/pshuflw
1174       {TTI::SK_Broadcast,        MVT::v2i8,  1}, // punpck
1175 
1176       {TTI::SK_Reverse,          MVT::v4i16, 1}, // pshuflw
1177       {TTI::SK_Reverse,          MVT::v2i16, 1}, // pshuflw
1178       {TTI::SK_Reverse,          MVT::v4i8,  3}, // punpck/pshuflw/packus
1179       {TTI::SK_Reverse,          MVT::v2i8,  1}, // punpck
1180 
1181       {TTI::SK_PermuteTwoSrc,    MVT::v4i16, 2}, // punpck/pshuflw
1182       {TTI::SK_PermuteTwoSrc,    MVT::v2i16, 2}, // punpck/pshuflw
1183       {TTI::SK_PermuteTwoSrc,    MVT::v8i8,  7}, // punpck/pshuflw
1184       {TTI::SK_PermuteTwoSrc,    MVT::v4i8,  4}, // punpck/pshuflw
1185       {TTI::SK_PermuteTwoSrc,    MVT::v2i8,  2}, // punpck
1186 
1187       {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1188       {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1189       {TTI::SK_PermuteSingleSrc, MVT::v8i8,  5}, // punpck/pshuflw
1190       {TTI::SK_PermuteSingleSrc, MVT::v4i8,  3}, // punpck/pshuflw
1191       {TTI::SK_PermuteSingleSrc, MVT::v2i8,  1}, // punpck
1192     };
1193 
1194     if (ST->hasSSE2())
1195       if (const auto *Entry =
1196               CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1197         return Entry->Cost;
1198   }
1199 
1200   // We are going to permute multiple sources and the result will be in multiple
1201   // destinations. Providing an accurate cost only for splits where the element
1202   // type remains the same.
1203   if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1204     MVT LegalVT = LT.second;
1205     if (LegalVT.isVector() &&
1206         LegalVT.getVectorElementType().getSizeInBits() ==
1207             BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1208         LegalVT.getVectorNumElements() <
1209             cast<FixedVectorType>(BaseTp)->getNumElements()) {
1210 
1211       unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1212       unsigned LegalVTSize = LegalVT.getStoreSize();
1213       // Number of source vectors after legalization:
1214       unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1215       // Number of destination vectors after legalization:
1216       InstructionCost NumOfDests = LT.first;
1217 
1218       auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1219                                               LegalVT.getVectorNumElements());
1220 
1221       InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1222       return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1223                                             None, 0, nullptr);
1224     }
1225 
1226     return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1227   }
1228 
1229   // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1230   if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1231     // We assume that source and destination have the same vector type.
1232     InstructionCost NumOfDests = LT.first;
1233     InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1234     LT.first = NumOfDests * NumOfShufflesPerDest;
1235   }
1236 
1237   static const CostTblEntry AVX512FP16ShuffleTbl[] = {
1238       {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1239       {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1240       {TTI::SK_Broadcast, MVT::v8f16, 1},  // vpbroadcastw
1241 
1242       {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1243       {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw
1244       {TTI::SK_Reverse, MVT::v8f16, 1},  // vpshufb
1245 
1246       {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1247       {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1248       {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1},  // vpshufb
1249 
1250       {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1251       {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w
1252       {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2}   // vpermt2w
1253   };
1254 
1255   if (!ST->useSoftFloat() && ST->hasFP16())
1256     if (const auto *Entry =
1257             CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second))
1258       return LT.first * Entry->Cost;
1259 
1260   static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1261       {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1262       {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1263 
1264       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1265       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1266 
1267       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1268       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1269       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2}  // vpermt2b
1270   };
1271 
1272   if (ST->hasVBMI())
1273     if (const auto *Entry =
1274             CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1275       return LT.first * Entry->Cost;
1276 
1277   static const CostTblEntry AVX512BWShuffleTbl[] = {
1278       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1279       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1280 
1281       {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1282       {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1283       {TTI::SK_Reverse, MVT::v64i8, 2},  // pshufb + vshufi64x2
1284 
1285       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1286       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1287       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8},  // extend to v32i16
1288 
1289       {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1290       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1291       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2},  // vpermt2w
1292       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1293 
1294       {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1295       {TTI::SK_Select, MVT::v64i8,  1}, // vblendmb
1296   };
1297 
1298   if (ST->hasBWI())
1299     if (const auto *Entry =
1300             CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1301       return LT.first * Entry->Cost;
1302 
1303   static const CostTblEntry AVX512ShuffleTbl[] = {
1304       {TTI::SK_Broadcast, MVT::v8f64, 1},  // vbroadcastpd
1305       {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1306       {TTI::SK_Broadcast, MVT::v8i64, 1},  // vpbroadcastq
1307       {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1308       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1309       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1310 
1311       {TTI::SK_Reverse, MVT::v8f64, 1},  // vpermpd
1312       {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1313       {TTI::SK_Reverse, MVT::v8i64, 1},  // vpermq
1314       {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1315       {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1316       {TTI::SK_Reverse, MVT::v64i8,  7}, // per mca
1317 
1318       {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1},  // vpermpd
1319       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1320       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1},  // vpermpd
1321       {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1322       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1323       {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1},  // vpermps
1324       {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1},  // vpermq
1325       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1326       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1},  // vpermq
1327       {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1328       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1329       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1},  // vpermd
1330       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1},  // pshufb
1331 
1332       {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1},  // vpermt2pd
1333       {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1334       {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1},  // vpermt2q
1335       {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1336       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1},  // vpermt2pd
1337       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1},  // vpermt2ps
1338       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1},  // vpermt2q
1339       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1},  // vpermt2d
1340       {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1},  // vpermt2pd
1341       {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1},  // vpermt2ps
1342       {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1},  // vpermt2q
1343       {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1},  // vpermt2d
1344 
1345       // FIXME: This just applies the type legalization cost rules above
1346       // assuming these completely split.
1347       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1348       {TTI::SK_PermuteSingleSrc, MVT::v64i8,  14},
1349       {TTI::SK_PermuteTwoSrc,    MVT::v32i16, 42},
1350       {TTI::SK_PermuteTwoSrc,    MVT::v64i8,  42},
1351 
1352       {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1353       {TTI::SK_Select, MVT::v64i8,  1}, // vpternlogq
1354       {TTI::SK_Select, MVT::v8f64,  1}, // vblendmpd
1355       {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1356       {TTI::SK_Select, MVT::v8i64,  1}, // vblendmq
1357       {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1358   };
1359 
1360   if (ST->hasAVX512())
1361     if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1362       return LT.first * Entry->Cost;
1363 
1364   static const CostTblEntry AVX2ShuffleTbl[] = {
1365       {TTI::SK_Broadcast, MVT::v4f64, 1},  // vbroadcastpd
1366       {TTI::SK_Broadcast, MVT::v8f32, 1},  // vbroadcastps
1367       {TTI::SK_Broadcast, MVT::v4i64, 1},  // vpbroadcastq
1368       {TTI::SK_Broadcast, MVT::v8i32, 1},  // vpbroadcastd
1369       {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1370       {TTI::SK_Broadcast, MVT::v32i8, 1},  // vpbroadcastb
1371 
1372       {TTI::SK_Reverse, MVT::v4f64, 1},  // vpermpd
1373       {TTI::SK_Reverse, MVT::v8f32, 1},  // vpermps
1374       {TTI::SK_Reverse, MVT::v4i64, 1},  // vpermq
1375       {TTI::SK_Reverse, MVT::v8i32, 1},  // vpermd
1376       {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1377       {TTI::SK_Reverse, MVT::v32i8, 2},  // vperm2i128 + pshufb
1378 
1379       {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1380       {TTI::SK_Select, MVT::v32i8, 1},  // vpblendvb
1381 
1382       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1383       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1384       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1385       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1386       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1387                                                   // + vpblendvb
1388       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vperm2i128 + 2*vpshufb
1389                                                   // + vpblendvb
1390 
1391       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},  // 2*vpermpd + vblendpd
1392       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3},  // 2*vpermps + vblendps
1393       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},  // 2*vpermq + vpblendd
1394       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3},  // 2*vpermd + vpblendd
1395       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1396                                                // + vpblendvb
1397       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7},  // 2*vperm2i128 + 4*vpshufb
1398                                                // + vpblendvb
1399   };
1400 
1401   if (ST->hasAVX2())
1402     if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1403       return LT.first * Entry->Cost;
1404 
1405   static const CostTblEntry XOPShuffleTbl[] = {
1406       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vpermil2pd
1407       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2},  // vperm2f128 + vpermil2ps
1408       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vpermil2pd
1409       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2},  // vperm2f128 + vpermil2ps
1410       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1411                                                   // + vinsertf128
1412       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vextractf128 + 2*vpperm
1413                                                   // + vinsertf128
1414 
1415       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1416                                                // + vinsertf128
1417       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1},  // vpperm
1418       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9},  // 2*vextractf128 + 6*vpperm
1419                                                // + vinsertf128
1420       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1},  // vpperm
1421   };
1422 
1423   if (ST->hasXOP())
1424     if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1425       return LT.first * Entry->Cost;
1426 
1427   static const CostTblEntry AVX1ShuffleTbl[] = {
1428       {TTI::SK_Broadcast, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1429       {TTI::SK_Broadcast, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1430       {TTI::SK_Broadcast, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1431       {TTI::SK_Broadcast, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1432       {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1433       {TTI::SK_Broadcast, MVT::v32i8, 2},  // vpshufb + vinsertf128
1434 
1435       {TTI::SK_Reverse, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1436       {TTI::SK_Reverse, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1437       {TTI::SK_Reverse, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1438       {TTI::SK_Reverse, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1439       {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1440                                          // + vinsertf128
1441       {TTI::SK_Reverse, MVT::v32i8, 4},  // vextractf128 + 2*pshufb
1442                                          // + vinsertf128
1443 
1444       {TTI::SK_Select, MVT::v4i64, 1},  // vblendpd
1445       {TTI::SK_Select, MVT::v4f64, 1},  // vblendpd
1446       {TTI::SK_Select, MVT::v8i32, 1},  // vblendps
1447       {TTI::SK_Select, MVT::v8f32, 1},  // vblendps
1448       {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1449       {TTI::SK_Select, MVT::v32i8, 3},  // vpand + vpandn + vpor
1450 
1451       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vshufpd
1452       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vshufpd
1453       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4},  // 2*vperm2f128 + 2*vshufps
1454       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4},  // 2*vperm2f128 + 2*vshufps
1455       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1456                                                   // + 2*por + vinsertf128
1457       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8},  // vextractf128 + 4*pshufb
1458                                                   // + 2*por + vinsertf128
1459 
1460       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},   // 2*vperm2f128 + vshufpd
1461       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},   // 2*vperm2f128 + vshufpd
1462       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4},   // 2*vperm2f128 + 2*vshufps
1463       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4},   // 2*vperm2f128 + 2*vshufps
1464       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1465                                                 // + 4*por + vinsertf128
1466       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15},  // 2*vextractf128 + 8*pshufb
1467                                                 // + 4*por + vinsertf128
1468   };
1469 
1470   if (ST->hasAVX())
1471     if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1472       return LT.first * Entry->Cost;
1473 
1474   static const CostTblEntry SSE41ShuffleTbl[] = {
1475       {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1476       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1477       {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1478       {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1479       {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1480       {TTI::SK_Select, MVT::v16i8, 1}  // pblendvb
1481   };
1482 
1483   if (ST->hasSSE41())
1484     if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1485       return LT.first * Entry->Cost;
1486 
1487   static const CostTblEntry SSSE3ShuffleTbl[] = {
1488       {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1489       {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1490 
1491       {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1492       {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1493 
1494       {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1495       {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1496 
1497       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1498       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1499 
1500       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1501       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1502   };
1503 
1504   if (ST->hasSSSE3())
1505     if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1506       return LT.first * Entry->Cost;
1507 
1508   static const CostTblEntry SSE2ShuffleTbl[] = {
1509       {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1510       {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1511       {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1512       {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1513       {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1514 
1515       {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1516       {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1517       {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1518       {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1519       {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1520                                         // + 2*pshufd + 2*unpck + packus
1521 
1522       {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1523       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1524       {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1525       {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1526       {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1527 
1528       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1529       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1530       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1531       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1532                                                   // + pshufd/unpck
1533     { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1534                                                   // + 2*pshufd + 2*unpck + 2*packus
1535 
1536     { TTI::SK_PermuteTwoSrc,    MVT::v2f64,  1 }, // shufpd
1537     { TTI::SK_PermuteTwoSrc,    MVT::v2i64,  1 }, // shufpd
1538     { TTI::SK_PermuteTwoSrc,    MVT::v4i32,  2 }, // 2*{unpck,movsd,pshufd}
1539     { TTI::SK_PermuteTwoSrc,    MVT::v8i16,  8 }, // blend+permute
1540     { TTI::SK_PermuteTwoSrc,    MVT::v16i8, 13 }, // blend+permute
1541   };
1542 
1543   if (ST->hasSSE2())
1544     if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1545       return LT.first * Entry->Cost;
1546 
1547   static const CostTblEntry SSE1ShuffleTbl[] = {
1548     { TTI::SK_Broadcast,        MVT::v4f32, 1 }, // shufps
1549     { TTI::SK_Reverse,          MVT::v4f32, 1 }, // shufps
1550     { TTI::SK_Select,           MVT::v4f32, 2 }, // 2*shufps
1551     { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1552     { TTI::SK_PermuteTwoSrc,    MVT::v4f32, 2 }, // 2*shufps
1553   };
1554 
1555   if (ST->hasSSE1())
1556     if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1557       return LT.first * Entry->Cost;
1558 
1559   return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1560 }
1561 
1562 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1563                                              Type *Src,
1564                                              TTI::CastContextHint CCH,
1565                                              TTI::TargetCostKind CostKind,
1566                                              const Instruction *I) {
1567   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1568   assert(ISD && "Invalid opcode");
1569 
1570   // TODO: Allow non-throughput costs that aren't binary.
1571   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1572     if (CostKind != TTI::TCK_RecipThroughput)
1573       return Cost == 0 ? 0 : 1;
1574     return Cost;
1575   };
1576 
1577   // The cost tables include both specific, custom (non-legal) src/dst type
1578   // conversions and generic, legalized types. We test for customs first, before
1579   // falling back to legalization.
1580   // FIXME: Need a better design of the cost table to handle non-simple types of
1581   // potential massive combinations (elem_num x src_type x dst_type).
1582   static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1583     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1584     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1585 
1586     // Mask sign extend has an instruction.
1587     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1588     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1589     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1590     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1591     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1592     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1593     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1594     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1595     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1596     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1597     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1, 1 },
1598     { ISD::SIGN_EXTEND, MVT::v64i8,  MVT::v64i1, 1 },
1599 
1600     // Mask zero extend is a sext + shift.
1601     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1602     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1603     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1604     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1605     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1606     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1607     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1608     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1609     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1610     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1611     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1, 2 },
1612     { ISD::ZERO_EXTEND, MVT::v64i8,  MVT::v64i1, 2 },
1613 
1614     { ISD::TRUNCATE,    MVT::v32i8,  MVT::v32i16, 2 },
1615     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // widen to zmm
1616     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // widen to zmm
1617     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // widen to zmm
1618     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  2 }, // vpmovwb
1619     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // widen to zmm
1620     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // widen to zmm
1621     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  2 }, // vpmovwb
1622     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // widen to zmm
1623     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // widen to zmm
1624     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  2 }, // vpmovwb
1625     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // widen to zmm
1626     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // widen to zmm
1627     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // widen to zmm
1628     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i16, 2 },
1629     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v32i16, 2 },
1630     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v64i8,  2 },
1631   };
1632 
1633   static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1634     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1635     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1636 
1637     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1638     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1639 
1640     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f32,  1 },
1641     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f64,  1 },
1642 
1643     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f32,  1 },
1644     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f64,  1 },
1645   };
1646 
1647   // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1648   // 256-bit wide vectors.
1649 
1650   static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1651     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
1652     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
1653     { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
1654 
1655     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1656     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1657     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1658     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  3 }, // sext+vpslld+vptestmd
1659     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1660     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1661     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1662     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1663     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // zmm vpslld+vptestmd
1664     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // zmm vpslld+vptestmd
1665     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // zmm vpslld+vptestmd
1666     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i32, 2 }, // vpslld+vptestmd
1667     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // zmm vpsllq+vptestmq
1668     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // zmm vpsllq+vptestmq
1669     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i64,  2 }, // vpsllq+vptestmq
1670     { ISD::TRUNCATE,  MVT::v2i8,    MVT::v2i32,  2 }, // vpmovdb
1671     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i32,  2 }, // vpmovdb
1672     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 2 }, // vpmovdb
1673     { ISD::TRUNCATE,  MVT::v32i8,   MVT::v16i32, 2 }, // vpmovdb
1674     { ISD::TRUNCATE,  MVT::v64i8,   MVT::v16i32, 2 }, // vpmovdb
1675     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 2 }, // vpmovdw
1676     { ISD::TRUNCATE,  MVT::v32i16,  MVT::v16i32, 2 }, // vpmovdw
1677     { ISD::TRUNCATE,  MVT::v2i8,    MVT::v2i64,  2 }, // vpmovqb
1678     { ISD::TRUNCATE,  MVT::v2i16,   MVT::v2i64,  1 }, // vpshufb
1679     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i64,  2 }, // vpmovqb
1680     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v8i64,  2 }, // vpmovqb
1681     { ISD::TRUNCATE,  MVT::v32i8,   MVT::v8i64,  2 }, // vpmovqb
1682     { ISD::TRUNCATE,  MVT::v64i8,   MVT::v8i64,  2 }, // vpmovqb
1683     { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  2 }, // vpmovqw
1684     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v8i64,  2 }, // vpmovqw
1685     { ISD::TRUNCATE,  MVT::v32i16,  MVT::v8i64,  2 }, // vpmovqw
1686     { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 }, // vpmovqd
1687     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // zmm vpmovqd
1688     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1689 
1690     { ISD::TRUNCATE,  MVT::v16i8,  MVT::v16i16,  3 }, // extend to v16i32
1691     { ISD::TRUNCATE,  MVT::v32i8,  MVT::v32i16,  8 },
1692     { ISD::TRUNCATE,  MVT::v64i8,  MVT::v32i16,  8 },
1693 
1694     // Sign extend is zmm vpternlogd+vptruncdb.
1695     // Zero extend is zmm broadcast load+vptruncdw.
1696     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   3 },
1697     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   4 },
1698     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   3 },
1699     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   4 },
1700     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   3 },
1701     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   4 },
1702     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  3 },
1703     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  4 },
1704 
1705     // Sign extend is zmm vpternlogd+vptruncdw.
1706     // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1707     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   3 },
1708     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1709     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   3 },
1710     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1711     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   3 },
1712     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1713     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  3 },
1714     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
1715 
1716     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // zmm vpternlogd
1717     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // zmm vpternlogd+psrld
1718     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // zmm vpternlogd
1719     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // zmm vpternlogd+psrld
1720     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // zmm vpternlogd
1721     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // zmm vpternlogd+psrld
1722     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // zmm vpternlogq
1723     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // zmm vpternlogq+psrlq
1724     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // zmm vpternlogq
1725     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // zmm vpternlogq+psrlq
1726 
1727     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  1 }, // vpternlogd
1728     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 }, // vpternlogd+psrld
1729     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i1,   1 }, // vpternlogq
1730     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i1,   2 }, // vpternlogq+psrlq
1731 
1732     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1733     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1734     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1735     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1736     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1737     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1738     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1739     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1740     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1741     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1742 
1743     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8,  3 }, // FIXME: May not be right
1744     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8,  3 }, // FIXME: May not be right
1745 
1746     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1747     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1748     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v16i8,  2 },
1749     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  1 },
1750     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1751     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 1 },
1752     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1753     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1754 
1755     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1756     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1757     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v16i8,  2 },
1758     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i8,  1 },
1759     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1760     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 1 },
1761     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1762     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1763     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64, 26 },
1764     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  5 },
1765 
1766     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 2 },
1767     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f64, 7 },
1768     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v32f64,15 },
1769     { ISD::FP_TO_SINT,  MVT::v64i8,  MVT::v64f32,11 },
1770     { ISD::FP_TO_SINT,  MVT::v64i8,  MVT::v64f64,31 },
1771     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f64,  3 },
1772     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f64, 7 },
1773     { ISD::FP_TO_SINT,  MVT::v32i16, MVT::v32f32, 5 },
1774     { ISD::FP_TO_SINT,  MVT::v32i16, MVT::v32f64,15 },
1775     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  1 },
1776     { ISD::FP_TO_SINT,  MVT::v16i32, MVT::v16f64, 3 },
1777 
1778     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1779     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f64,  3 },
1780     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f64,  3 },
1781     { ISD::FP_TO_UINT,  MVT::v16i32, MVT::v16f32, 1 },
1782     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 3 },
1783     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v16f32, 3 },
1784   };
1785 
1786   static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1787     // Mask sign extend has an instruction.
1788     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1789     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1790     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1791     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1792     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1793     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1794     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1795     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1796     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1797 
1798     // Mask zero extend is a sext + shift.
1799     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1800     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1801     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1802     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1803     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1804     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1805     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1806     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1807     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1808 
1809     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 },
1810     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // vpsllw+vptestmb
1811     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // vpsllw+vptestmw
1812     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // vpsllw+vptestmb
1813     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // vpsllw+vptestmw
1814     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // vpsllw+vptestmb
1815     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // vpsllw+vptestmw
1816     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // vpsllw+vptestmb
1817     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // vpsllw+vptestmw
1818     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // vpsllw+vptestmb
1819   };
1820 
1821   static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1822     { ISD::SINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1823     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1824     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1825     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1826 
1827     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1828     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1829     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1830     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1831 
1832     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v4f32,  1 },
1833     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f32,  1 },
1834     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f64,  1 },
1835     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f64,  1 },
1836 
1837     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v4f32,  1 },
1838     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f32,  1 },
1839     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f64,  1 },
1840     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f64,  1 },
1841   };
1842 
1843   static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1844     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1845     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1846     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1847     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  8 }, // split+2*v8i8
1848     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1849     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1850     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1851     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 8 }, // split+2*v8i16
1852     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // vpslld+vptestmd
1853     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // vpslld+vptestmd
1854     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // vpslld+vptestmd
1855     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // vpsllq+vptestmq
1856     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // vpsllq+vptestmq
1857     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // vpmovqd
1858     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i64,  2 }, // vpmovqb
1859     { ISD::TRUNCATE,  MVT::v4i16,   MVT::v4i64,  2 }, // vpmovqw
1860     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i32,  2 }, // vpmovwb
1861 
1862     // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1863     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1864     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   5 },
1865     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   6 },
1866     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   5 },
1867     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   6 },
1868     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   5 },
1869     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   6 },
1870     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 10 },
1871     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 12 },
1872 
1873     // sign extend is vpcmpeq+maskedmove+vpmovdw
1874     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1875     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1876     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   5 },
1877     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1878     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   5 },
1879     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1880     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   5 },
1881     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1882     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1883 
1884     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // vpternlogd
1885     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // vpternlogd+psrld
1886     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // vpternlogd
1887     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // vpternlogd+psrld
1888     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // vpternlogd
1889     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // vpternlogd+psrld
1890     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // vpternlogq
1891     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // vpternlogq+psrlq
1892     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // vpternlogq
1893     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // vpternlogq+psrlq
1894 
1895     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  1 },
1896     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  1 },
1897     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  1 },
1898     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  1 },
1899     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1900     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1901     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  1 },
1902     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  1 },
1903     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1904     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1905     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1906     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1907 
1908     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
1909     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  1 },
1910     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
1911     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  1 },
1912 
1913     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
1914     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
1915     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
1916     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  1 },
1917     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
1918     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  1 },
1919     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  1 },
1920     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
1921     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
1922     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
1923     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  5 },
1924     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
1925     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  5 },
1926 
1927     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v8f32,  2 },
1928     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 2 },
1929     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v32f32, 5 },
1930 
1931     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    1 },
1932     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    1 },
1933     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  1 },
1934     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  1 },
1935     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  1 },
1936     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  1 },
1937     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1938   };
1939 
1940   static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1941     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1942     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1943     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1944     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1945     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1946     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1947 
1948     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  2 },
1949     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  2 },
1950     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  2 },
1951     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  2 },
1952     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1953     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1954     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  2 },
1955     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  2 },
1956     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1957     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1958     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1959     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1960     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  2 },
1961     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  2 },
1962 
1963     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
1964 
1965     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 4 },
1966     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 4 },
1967     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i16,  1 },
1968     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  1 },
1969     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  1 },
1970     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i32,  4 },
1971     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i64,  4 },
1972     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  1 },
1973     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v2i64,  1 },
1974     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i64,  5 },
1975     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  1 },
1976     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
1977 
1978     { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
1979     { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
1980 
1981     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v8f32,  1 },
1982     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f64,  1 },
1983     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f32,  1 },
1984     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  3 },
1985 
1986     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    3 },
1987     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    3 },
1988     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v8f32,  1 },
1989     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  3 },
1990     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
1991     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  4 },
1992     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  3 },
1993     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v4f64,  4 },
1994 
1995     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  2 },
1996     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  2 },
1997     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  2 },
1998     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
1999     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
2000     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
2001     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  3 },
2002 
2003     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  2 },
2004     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  2 },
2005     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  2 },
2006     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
2007     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  2 },
2008     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  1 },
2009     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  2 },
2010     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2011     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  2 },
2012     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  4 },
2013   };
2014 
2015   static const TypeConversionCostTblEntry AVXConversionTbl[] = {
2016     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   6 },
2017     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   4 },
2018     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   7 },
2019     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   4 },
2020     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
2021     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
2022 
2023     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  3 },
2024     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  3 },
2025     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  3 },
2026     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  3 },
2027     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2028     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2029     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  3 },
2030     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  3 },
2031     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2032     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2033     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2034     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2035 
2036     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i64,  4 },
2037     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  5 },
2038     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 4 },
2039     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i64,  9 },
2040     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i64, 11 },
2041 
2042     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 6 },
2043     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 6 },
2044     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // and+extract+packuswb
2045     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i32,  5 },
2046     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2047     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i64,  5 },
2048     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i64,  3 }, // and+extract+2*packusdw
2049     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
2050 
2051     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i1,   3 },
2052     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i1,   3 },
2053     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i1,   8 },
2054     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  4 },
2055     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v16i8,  2 },
2056     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  4 },
2057     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v8i16,  2 },
2058     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2059     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  2 },
2060     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  4 },
2061     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v2i64,  5 },
2062     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  8 },
2063 
2064     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i1,   7 },
2065     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i1,   7 },
2066     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i1,   6 },
2067     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  4 },
2068     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v16i8,  2 },
2069     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  4 },
2070     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v8i16,  2 },
2071     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  4 },
2072     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  4 },
2073     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  5 },
2074     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  6 },
2075     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  8 },
2076     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32, 10 },
2077     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64, 10 },
2078     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64, 18 },
2079     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
2080     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64, 10 },
2081 
2082     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v8f32,  2 },
2083     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f64,  2 },
2084     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v8f32,  2 },
2085     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v4f64,  2 },
2086     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f32,  2 },
2087     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f64,  2 },
2088     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v8f32,  2 },
2089     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v4f64,  2 },
2090     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f64,  2 },
2091     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f32,  2 },
2092     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  5 },
2093 
2094     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v8f32,  2 },
2095     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f64,  2 },
2096     { ISD::FP_TO_UINT,  MVT::v32i8,  MVT::v8f32,  2 },
2097     { ISD::FP_TO_UINT,  MVT::v32i8,  MVT::v4f64,  2 },
2098     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f32,  2 },
2099     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f64,  2 },
2100     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v8f32,  2 },
2101     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v4f64,  2 },
2102     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  3 },
2103     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2104     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  6 },
2105     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  7 },
2106     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v4f64,  7 },
2107 
2108     { ISD::FP_EXTEND,   MVT::v4f64,  MVT::v4f32,  1 },
2109     { ISD::FP_ROUND,    MVT::v4f32,  MVT::v4f64,  1 },
2110   };
2111 
2112   static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2113     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8,   1 },
2114     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8,   1 },
2115     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8,   1 },
2116     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8,   1 },
2117     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8,   1 },
2118     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8,   1 },
2119     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16,   1 },
2120     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16,   1 },
2121     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16,   1 },
2122     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16,   1 },
2123     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32,   1 },
2124     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32,   1 },
2125 
2126     // These truncates end up widening elements.
2127     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   1 }, // PMOVXZBQ
2128     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  1 }, // PMOVXZWQ
2129     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   1 }, // PMOVXZBD
2130 
2131     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  2 },
2132     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  2 },
2133     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  2 },
2134 
2135     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i32,    1 },
2136     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i32,    1 },
2137     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
2138     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
2139     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  1 },
2140     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2141     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  1 },
2142     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2143     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
2144     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  1 },
2145     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2146 
2147     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i32,    1 },
2148     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i32,    1 },
2149     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    4 },
2150     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    4 },
2151     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  1 },
2152     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2153     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  1 },
2154     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2155     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  3 },
2156     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  3 },
2157     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  2 },
2158     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v2i64, 12 },
2159     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64, 22 },
2160     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  4 },
2161 
2162     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f32,    1 },
2163     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f32,    1 },
2164     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f64,    1 },
2165     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f64,    1 },
2166     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f32,  2 },
2167     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v2f64,  2 },
2168     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f32,  1 },
2169     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v2f64,  1 },
2170     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f32,  1 },
2171     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v2f64,  1 },
2172 
2173     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f32,    1 },
2174     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2175     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f64,    1 },
2176     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    4 },
2177     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f32,  2 },
2178     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v2f64,  2 },
2179     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f32,  1 },
2180     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v2f64,  1 },
2181     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  4 },
2182     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2183   };
2184 
2185   static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2186     // These are somewhat magic numbers justified by comparing the
2187     // output of llvm-mca for our various supported scheduler models
2188     // and basing it off the worst case scenario.
2189     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i32,    3 },
2190     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i32,    3 },
2191     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i64,    3 },
2192     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i64,    3 },
2193     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  3 },
2194     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  4 },
2195     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  3 },
2196     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  4 },
2197     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  3 },
2198     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  4 },
2199     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v2i64,  8 },
2200     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  8 },
2201 
2202     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i32,    3 },
2203     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i32,    3 },
2204     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    8 },
2205     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    9 },
2206     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  4 },
2207     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  4 },
2208     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  4 },
2209     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  4 },
2210     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  7 },
2211     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  7 },
2212     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  5 },
2213     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64, 15 },
2214     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v2i64, 18 },
2215 
2216     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f32,    4 },
2217     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f32,    4 },
2218     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f64,    4 },
2219     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f64,    4 },
2220     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f32,  6 },
2221     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v2f64,  6 },
2222     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f32,  5 },
2223     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v2f64,  5 },
2224     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f32,  4 },
2225     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v2f64,  4 },
2226 
2227     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f32,    4 },
2228     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2229     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f64,    4 },
2230     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,   15 },
2231     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f32,  6 },
2232     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v2f64,  6 },
2233     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f32,  5 },
2234     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v2f64,  5 },
2235     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  8 },
2236     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  8 },
2237 
2238     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v16i8,  4 },
2239     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v16i8,  4 },
2240     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v16i8,  2 },
2241     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v16i8,  3 },
2242     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v16i8,  1 },
2243     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v16i8,  2 },
2244     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v8i16,  2 },
2245     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v8i16,  3 },
2246     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v8i16,  1 },
2247     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v8i16,  2 },
2248     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v4i32,  1 },
2249     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v4i32,  2 },
2250 
2251     // These truncates are really widening elements.
2252     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i32,  1 }, // PSHUFD
2253     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // PUNPCKLWD+DQ
2254     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   3 }, // PUNPCKLBW+WD+PSHUFD
2255     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  1 }, // PUNPCKLWD
2256     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // PUNPCKLBW+WD
2257     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   1 }, // PUNPCKLBW
2258 
2259     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i16,  2 }, // PAND+PACKUSWB
2260     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 3 },
2261     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  3 }, // PAND+2*PACKUSWB
2262     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 7 },
2263     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i32,  1 },
2264     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  3 },
2265     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2266     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32,10 },
2267     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  4 }, // PAND+3*PACKUSWB
2268     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v2i64,  2 }, // PSHUFD+PSHUFLW
2269     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v2i64,  1 }, // PSHUFD
2270   };
2271 
2272   // Attempt to map directly to (simple) MVT types to let us match custom entries.
2273   EVT SrcTy = TLI->getValueType(DL, Src);
2274   EVT DstTy = TLI->getValueType(DL, Dst);
2275 
2276   // The function getSimpleVT only handles simple value types.
2277   if (SrcTy.isSimple() && DstTy.isSimple()) {
2278     MVT SimpleSrcTy = SrcTy.getSimpleVT();
2279     MVT SimpleDstTy = DstTy.getSimpleVT();
2280 
2281     if (ST->useAVX512Regs()) {
2282       if (ST->hasBWI())
2283         if (const auto *Entry = ConvertCostTableLookup(
2284                 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2285           return AdjustCost(Entry->Cost);
2286 
2287       if (ST->hasDQI())
2288         if (const auto *Entry = ConvertCostTableLookup(
2289                 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2290           return AdjustCost(Entry->Cost);
2291 
2292       if (ST->hasAVX512())
2293         if (const auto *Entry = ConvertCostTableLookup(
2294                 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2295           return AdjustCost(Entry->Cost);
2296     }
2297 
2298     if (ST->hasBWI())
2299       if (const auto *Entry = ConvertCostTableLookup(
2300               AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2301         return AdjustCost(Entry->Cost);
2302 
2303     if (ST->hasDQI())
2304       if (const auto *Entry = ConvertCostTableLookup(
2305               AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2306         return AdjustCost(Entry->Cost);
2307 
2308     if (ST->hasAVX512())
2309       if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2310                                                      SimpleDstTy, SimpleSrcTy))
2311         return AdjustCost(Entry->Cost);
2312 
2313     if (ST->hasAVX2()) {
2314       if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2315                                                      SimpleDstTy, SimpleSrcTy))
2316         return AdjustCost(Entry->Cost);
2317     }
2318 
2319     if (ST->hasAVX()) {
2320       if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2321                                                      SimpleDstTy, SimpleSrcTy))
2322         return AdjustCost(Entry->Cost);
2323     }
2324 
2325     if (ST->hasSSE41()) {
2326       if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2327                                                      SimpleDstTy, SimpleSrcTy))
2328         return AdjustCost(Entry->Cost);
2329     }
2330 
2331     if (ST->hasSSE2()) {
2332       if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2333                                                      SimpleDstTy, SimpleSrcTy))
2334         return AdjustCost(Entry->Cost);
2335     }
2336   }
2337 
2338   // Fall back to legalized types.
2339   std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2340   std::pair<InstructionCost, MVT> LTDest =
2341       TLI->getTypeLegalizationCost(DL, Dst);
2342 
2343   if (ST->useAVX512Regs()) {
2344     if (ST->hasBWI())
2345       if (const auto *Entry = ConvertCostTableLookup(
2346               AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2347         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2348 
2349     if (ST->hasDQI())
2350       if (const auto *Entry = ConvertCostTableLookup(
2351               AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2352         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2353 
2354     if (ST->hasAVX512())
2355       if (const auto *Entry = ConvertCostTableLookup(
2356               AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
2357         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2358   }
2359 
2360   if (ST->hasBWI())
2361     if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2362                                                    LTDest.second, LTSrc.second))
2363       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2364 
2365   if (ST->hasDQI())
2366     if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2367                                                    LTDest.second, LTSrc.second))
2368       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2369 
2370   if (ST->hasAVX512())
2371     if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2372                                                    LTDest.second, LTSrc.second))
2373       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2374 
2375   if (ST->hasAVX2())
2376     if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2377                                                    LTDest.second, LTSrc.second))
2378       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2379 
2380   if (ST->hasAVX())
2381     if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2382                                                    LTDest.second, LTSrc.second))
2383       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2384 
2385   if (ST->hasSSE41())
2386     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2387                                                    LTDest.second, LTSrc.second))
2388       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2389 
2390   if (ST->hasSSE2())
2391     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2392                                                    LTDest.second, LTSrc.second))
2393       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2394 
2395   // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2396   // sitofp.
2397   if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
2398       1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
2399     Type *ExtSrc = Src->getWithNewBitWidth(32);
2400     unsigned ExtOpc =
2401         (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
2402 
2403     // For scalar loads the extend would be free.
2404     InstructionCost ExtCost = 0;
2405     if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
2406       ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
2407 
2408     return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
2409                                       TTI::CastContextHint::None, CostKind);
2410   }
2411 
2412   // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
2413   // i32.
2414   if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
2415       1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
2416     Type *TruncDst = Dst->getWithNewBitWidth(32);
2417     return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
2418            getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
2419                             TTI::CastContextHint::None, CostKind);
2420   }
2421 
2422   return AdjustCost(
2423       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2424 }
2425 
2426 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2427                                                Type *CondTy,
2428                                                CmpInst::Predicate VecPred,
2429                                                TTI::TargetCostKind CostKind,
2430                                                const Instruction *I) {
2431   // TODO: Handle other cost kinds.
2432   if (CostKind != TTI::TCK_RecipThroughput)
2433     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2434                                      I);
2435 
2436   // Legalize the type.
2437   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2438 
2439   MVT MTy = LT.second;
2440 
2441   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2442   assert(ISD && "Invalid opcode");
2443 
2444   unsigned ExtraCost = 0;
2445   if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
2446     // Some vector comparison predicates cost extra instructions.
2447     // TODO: Should we invert this and assume worst case cmp costs
2448     // and reduce for particular predicates?
2449     if (MTy.isVector() &&
2450         !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2451           (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2452           ST->hasBWI())) {
2453       // Fallback to I if a specific predicate wasn't specified.
2454       CmpInst::Predicate Pred = VecPred;
2455       if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE ||
2456                 Pred == CmpInst::BAD_FCMP_PREDICATE))
2457         Pred = cast<CmpInst>(I)->getPredicate();
2458 
2459       switch (Pred) {
2460       case CmpInst::Predicate::ICMP_NE:
2461         // xor(cmpeq(x,y),-1)
2462         ExtraCost = 1;
2463         break;
2464       case CmpInst::Predicate::ICMP_SGE:
2465       case CmpInst::Predicate::ICMP_SLE:
2466         // xor(cmpgt(x,y),-1)
2467         ExtraCost = 1;
2468         break;
2469       case CmpInst::Predicate::ICMP_ULT:
2470       case CmpInst::Predicate::ICMP_UGT:
2471         // cmpgt(xor(x,signbit),xor(y,signbit))
2472         // xor(cmpeq(pmaxu(x,y),x),-1)
2473         ExtraCost = 2;
2474         break;
2475       case CmpInst::Predicate::ICMP_ULE:
2476       case CmpInst::Predicate::ICMP_UGE:
2477         if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2478             (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2479           // cmpeq(psubus(x,y),0)
2480           // cmpeq(pminu(x,y),x)
2481           ExtraCost = 1;
2482         } else {
2483           // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2484           ExtraCost = 3;
2485         }
2486         break;
2487       case CmpInst::Predicate::BAD_ICMP_PREDICATE:
2488       case CmpInst::Predicate::BAD_FCMP_PREDICATE:
2489         // Assume worst case scenario and add the maximum extra cost.
2490         ExtraCost = 3;
2491         break;
2492       default:
2493         break;
2494       }
2495     }
2496   }
2497 
2498   static const CostTblEntry SLMCostTbl[] = {
2499     // slm pcmpeq/pcmpgt throughput is 2
2500     { ISD::SETCC,   MVT::v2i64,   2 },
2501   };
2502 
2503   static const CostTblEntry AVX512BWCostTbl[] = {
2504     { ISD::SETCC,   MVT::v32i16,  1 },
2505     { ISD::SETCC,   MVT::v64i8,   1 },
2506 
2507     { ISD::SELECT,  MVT::v32i16,  1 },
2508     { ISD::SELECT,  MVT::v64i8,   1 },
2509   };
2510 
2511   static const CostTblEntry AVX512CostTbl[] = {
2512     { ISD::SETCC,   MVT::v8i64,   1 },
2513     { ISD::SETCC,   MVT::v16i32,  1 },
2514     { ISD::SETCC,   MVT::v8f64,   1 },
2515     { ISD::SETCC,   MVT::v16f32,  1 },
2516 
2517     { ISD::SELECT,  MVT::v8i64,   1 },
2518     { ISD::SELECT,  MVT::v16i32,  1 },
2519     { ISD::SELECT,  MVT::v8f64,   1 },
2520     { ISD::SELECT,  MVT::v16f32,  1 },
2521 
2522     { ISD::SETCC,   MVT::v32i16,  2 }, // FIXME: should probably be 4
2523     { ISD::SETCC,   MVT::v64i8,   2 }, // FIXME: should probably be 4
2524 
2525     { ISD::SELECT,  MVT::v32i16,  2 }, // FIXME: should be 3
2526     { ISD::SELECT,  MVT::v64i8,   2 }, // FIXME: should be 3
2527   };
2528 
2529   static const CostTblEntry AVX2CostTbl[] = {
2530     { ISD::SETCC,   MVT::v4i64,   1 },
2531     { ISD::SETCC,   MVT::v8i32,   1 },
2532     { ISD::SETCC,   MVT::v16i16,  1 },
2533     { ISD::SETCC,   MVT::v32i8,   1 },
2534 
2535     { ISD::SELECT,  MVT::v4i64,   1 }, // pblendvb
2536     { ISD::SELECT,  MVT::v8i32,   1 }, // pblendvb
2537     { ISD::SELECT,  MVT::v16i16,  1 }, // pblendvb
2538     { ISD::SELECT,  MVT::v32i8,   1 }, // pblendvb
2539   };
2540 
2541   static const CostTblEntry AVX1CostTbl[] = {
2542     { ISD::SETCC,   MVT::v4f64,   1 },
2543     { ISD::SETCC,   MVT::v8f32,   1 },
2544     // AVX1 does not support 8-wide integer compare.
2545     { ISD::SETCC,   MVT::v4i64,   4 },
2546     { ISD::SETCC,   MVT::v8i32,   4 },
2547     { ISD::SETCC,   MVT::v16i16,  4 },
2548     { ISD::SETCC,   MVT::v32i8,   4 },
2549 
2550     { ISD::SELECT,  MVT::v4f64,   1 }, // vblendvpd
2551     { ISD::SELECT,  MVT::v8f32,   1 }, // vblendvps
2552     { ISD::SELECT,  MVT::v4i64,   1 }, // vblendvpd
2553     { ISD::SELECT,  MVT::v8i32,   1 }, // vblendvps
2554     { ISD::SELECT,  MVT::v16i16,  3 }, // vandps + vandnps + vorps
2555     { ISD::SELECT,  MVT::v32i8,   3 }, // vandps + vandnps + vorps
2556   };
2557 
2558   static const CostTblEntry SSE42CostTbl[] = {
2559     { ISD::SETCC,   MVT::v2f64,   1 },
2560     { ISD::SETCC,   MVT::v4f32,   1 },
2561     { ISD::SETCC,   MVT::v2i64,   1 },
2562   };
2563 
2564   static const CostTblEntry SSE41CostTbl[] = {
2565     { ISD::SELECT,  MVT::v2f64,   1 }, // blendvpd
2566     { ISD::SELECT,  MVT::v4f32,   1 }, // blendvps
2567     { ISD::SELECT,  MVT::v2i64,   1 }, // pblendvb
2568     { ISD::SELECT,  MVT::v4i32,   1 }, // pblendvb
2569     { ISD::SELECT,  MVT::v8i16,   1 }, // pblendvb
2570     { ISD::SELECT,  MVT::v16i8,   1 }, // pblendvb
2571   };
2572 
2573   static const CostTblEntry SSE2CostTbl[] = {
2574     { ISD::SETCC,   MVT::v2f64,   2 },
2575     { ISD::SETCC,   MVT::f64,     1 },
2576     { ISD::SETCC,   MVT::v2i64,   8 },
2577     { ISD::SETCC,   MVT::v4i32,   1 },
2578     { ISD::SETCC,   MVT::v8i16,   1 },
2579     { ISD::SETCC,   MVT::v16i8,   1 },
2580 
2581     { ISD::SELECT,  MVT::v2f64,   3 }, // andpd + andnpd + orpd
2582     { ISD::SELECT,  MVT::v2i64,   3 }, // pand + pandn + por
2583     { ISD::SELECT,  MVT::v4i32,   3 }, // pand + pandn + por
2584     { ISD::SELECT,  MVT::v8i16,   3 }, // pand + pandn + por
2585     { ISD::SELECT,  MVT::v16i8,   3 }, // pand + pandn + por
2586   };
2587 
2588   static const CostTblEntry SSE1CostTbl[] = {
2589     { ISD::SETCC,   MVT::v4f32,   2 },
2590     { ISD::SETCC,   MVT::f32,     1 },
2591 
2592     { ISD::SELECT,  MVT::v4f32,   3 }, // andps + andnps + orps
2593   };
2594 
2595   if (ST->useSLMArithCosts())
2596     if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2597       return LT.first * (ExtraCost + Entry->Cost);
2598 
2599   if (ST->hasBWI())
2600     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2601       return LT.first * (ExtraCost + Entry->Cost);
2602 
2603   if (ST->hasAVX512())
2604     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2605       return LT.first * (ExtraCost + Entry->Cost);
2606 
2607   if (ST->hasAVX2())
2608     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2609       return LT.first * (ExtraCost + Entry->Cost);
2610 
2611   if (ST->hasAVX())
2612     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2613       return LT.first * (ExtraCost + Entry->Cost);
2614 
2615   if (ST->hasSSE42())
2616     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2617       return LT.first * (ExtraCost + Entry->Cost);
2618 
2619   if (ST->hasSSE41())
2620     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2621       return LT.first * (ExtraCost + Entry->Cost);
2622 
2623   if (ST->hasSSE2())
2624     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2625       return LT.first * (ExtraCost + Entry->Cost);
2626 
2627   if (ST->hasSSE1())
2628     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2629       return LT.first * (ExtraCost + Entry->Cost);
2630 
2631   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2632 }
2633 
2634 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2635 
2636 InstructionCost
2637 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2638                                            TTI::TargetCostKind CostKind) {
2639 
2640   // Costs should match the codegen from:
2641   // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2642   // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2643   // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2644   // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2645   // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2646 
2647   // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2648   //       specialized in these tables yet.
2649   static const CostTblEntry AVX512BITALGCostTbl[] = {
2650     { ISD::CTPOP,      MVT::v32i16,  1 },
2651     { ISD::CTPOP,      MVT::v64i8,   1 },
2652     { ISD::CTPOP,      MVT::v16i16,  1 },
2653     { ISD::CTPOP,      MVT::v32i8,   1 },
2654     { ISD::CTPOP,      MVT::v8i16,   1 },
2655     { ISD::CTPOP,      MVT::v16i8,   1 },
2656   };
2657   static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = {
2658     { ISD::CTPOP,      MVT::v8i64,   1 },
2659     { ISD::CTPOP,      MVT::v16i32,  1 },
2660     { ISD::CTPOP,      MVT::v4i64,   1 },
2661     { ISD::CTPOP,      MVT::v8i32,   1 },
2662     { ISD::CTPOP,      MVT::v2i64,   1 },
2663     { ISD::CTPOP,      MVT::v4i32,   1 },
2664   };
2665   static const CostTblEntry AVX512CDCostTbl[] = {
2666     { ISD::CTLZ,       MVT::v8i64,   1 },
2667     { ISD::CTLZ,       MVT::v16i32,  1 },
2668     { ISD::CTLZ,       MVT::v32i16,  8 },
2669     { ISD::CTLZ,       MVT::v64i8,  20 },
2670     { ISD::CTLZ,       MVT::v4i64,   1 },
2671     { ISD::CTLZ,       MVT::v8i32,   1 },
2672     { ISD::CTLZ,       MVT::v16i16,  4 },
2673     { ISD::CTLZ,       MVT::v32i8,  10 },
2674     { ISD::CTLZ,       MVT::v2i64,   1 },
2675     { ISD::CTLZ,       MVT::v4i32,   1 },
2676     { ISD::CTLZ,       MVT::v8i16,   4 },
2677     { ISD::CTLZ,       MVT::v16i8,   4 },
2678   };
2679   static const CostTblEntry AVX512BWCostTbl[] = {
2680     { ISD::ABS,        MVT::v32i16,  1 },
2681     { ISD::ABS,        MVT::v64i8,   1 },
2682     { ISD::BITREVERSE, MVT::v8i64,   3 },
2683     { ISD::BITREVERSE, MVT::v16i32,  3 },
2684     { ISD::BITREVERSE, MVT::v32i16,  3 },
2685     { ISD::BITREVERSE, MVT::v64i8,   2 },
2686     { ISD::BSWAP,      MVT::v8i64,   1 },
2687     { ISD::BSWAP,      MVT::v16i32,  1 },
2688     { ISD::BSWAP,      MVT::v32i16,  1 },
2689     { ISD::CTLZ,       MVT::v8i64,  23 },
2690     { ISD::CTLZ,       MVT::v16i32, 22 },
2691     { ISD::CTLZ,       MVT::v32i16, 18 },
2692     { ISD::CTLZ,       MVT::v64i8,  17 },
2693     { ISD::CTPOP,      MVT::v8i64,   7 },
2694     { ISD::CTPOP,      MVT::v16i32, 11 },
2695     { ISD::CTPOP,      MVT::v32i16,  9 },
2696     { ISD::CTPOP,      MVT::v64i8,   6 },
2697     { ISD::CTTZ,       MVT::v8i64,  10 },
2698     { ISD::CTTZ,       MVT::v16i32, 14 },
2699     { ISD::CTTZ,       MVT::v32i16, 12 },
2700     { ISD::CTTZ,       MVT::v64i8,   9 },
2701     { ISD::SADDSAT,    MVT::v32i16,  1 },
2702     { ISD::SADDSAT,    MVT::v64i8,   1 },
2703     { ISD::SMAX,       MVT::v32i16,  1 },
2704     { ISD::SMAX,       MVT::v64i8,   1 },
2705     { ISD::SMIN,       MVT::v32i16,  1 },
2706     { ISD::SMIN,       MVT::v64i8,   1 },
2707     { ISD::SSUBSAT,    MVT::v32i16,  1 },
2708     { ISD::SSUBSAT,    MVT::v64i8,   1 },
2709     { ISD::UADDSAT,    MVT::v32i16,  1 },
2710     { ISD::UADDSAT,    MVT::v64i8,   1 },
2711     { ISD::UMAX,       MVT::v32i16,  1 },
2712     { ISD::UMAX,       MVT::v64i8,   1 },
2713     { ISD::UMIN,       MVT::v32i16,  1 },
2714     { ISD::UMIN,       MVT::v64i8,   1 },
2715     { ISD::USUBSAT,    MVT::v32i16,  1 },
2716     { ISD::USUBSAT,    MVT::v64i8,   1 },
2717   };
2718   static const CostTblEntry AVX512CostTbl[] = {
2719     { ISD::ABS,        MVT::v8i64,   1 },
2720     { ISD::ABS,        MVT::v16i32,  1 },
2721     { ISD::ABS,        MVT::v32i16,  2 },
2722     { ISD::ABS,        MVT::v64i8,   2 },
2723     { ISD::ABS,        MVT::v4i64,   1 },
2724     { ISD::ABS,        MVT::v2i64,   1 },
2725     { ISD::BITREVERSE, MVT::v8i64,  36 },
2726     { ISD::BITREVERSE, MVT::v16i32, 24 },
2727     { ISD::BITREVERSE, MVT::v32i16, 10 },
2728     { ISD::BITREVERSE, MVT::v64i8,  10 },
2729     { ISD::BSWAP,      MVT::v8i64,   4 },
2730     { ISD::BSWAP,      MVT::v16i32,  4 },
2731     { ISD::BSWAP,      MVT::v32i16,  4 },
2732     { ISD::CTLZ,       MVT::v8i64,  29 },
2733     { ISD::CTLZ,       MVT::v16i32, 35 },
2734     { ISD::CTLZ,       MVT::v32i16, 28 },
2735     { ISD::CTLZ,       MVT::v64i8,  18 },
2736     { ISD::CTPOP,      MVT::v8i64,  16 },
2737     { ISD::CTPOP,      MVT::v16i32, 24 },
2738     { ISD::CTPOP,      MVT::v32i16, 18 },
2739     { ISD::CTPOP,      MVT::v64i8,  12 },
2740     { ISD::CTTZ,       MVT::v8i64,  20 },
2741     { ISD::CTTZ,       MVT::v16i32, 28 },
2742     { ISD::CTTZ,       MVT::v32i16, 24 },
2743     { ISD::CTTZ,       MVT::v64i8,  18 },
2744     { ISD::SMAX,       MVT::v8i64,   1 },
2745     { ISD::SMAX,       MVT::v16i32,  1 },
2746     { ISD::SMAX,       MVT::v32i16,  2 },
2747     { ISD::SMAX,       MVT::v64i8,   2 },
2748     { ISD::SMAX,       MVT::v4i64,   1 },
2749     { ISD::SMAX,       MVT::v2i64,   1 },
2750     { ISD::SMIN,       MVT::v8i64,   1 },
2751     { ISD::SMIN,       MVT::v16i32,  1 },
2752     { ISD::SMIN,       MVT::v32i16,  2 },
2753     { ISD::SMIN,       MVT::v64i8,   2 },
2754     { ISD::SMIN,       MVT::v4i64,   1 },
2755     { ISD::SMIN,       MVT::v2i64,   1 },
2756     { ISD::UMAX,       MVT::v8i64,   1 },
2757     { ISD::UMAX,       MVT::v16i32,  1 },
2758     { ISD::UMAX,       MVT::v32i16,  2 },
2759     { ISD::UMAX,       MVT::v64i8,   2 },
2760     { ISD::UMAX,       MVT::v4i64,   1 },
2761     { ISD::UMAX,       MVT::v2i64,   1 },
2762     { ISD::UMIN,       MVT::v8i64,   1 },
2763     { ISD::UMIN,       MVT::v16i32,  1 },
2764     { ISD::UMIN,       MVT::v32i16,  2 },
2765     { ISD::UMIN,       MVT::v64i8,   2 },
2766     { ISD::UMIN,       MVT::v4i64,   1 },
2767     { ISD::UMIN,       MVT::v2i64,   1 },
2768     { ISD::USUBSAT,    MVT::v16i32,  2 }, // pmaxud + psubd
2769     { ISD::USUBSAT,    MVT::v2i64,   2 }, // pmaxuq + psubq
2770     { ISD::USUBSAT,    MVT::v4i64,   2 }, // pmaxuq + psubq
2771     { ISD::USUBSAT,    MVT::v8i64,   2 }, // pmaxuq + psubq
2772     { ISD::UADDSAT,    MVT::v16i32,  3 }, // not + pminud + paddd
2773     { ISD::UADDSAT,    MVT::v2i64,   3 }, // not + pminuq + paddq
2774     { ISD::UADDSAT,    MVT::v4i64,   3 }, // not + pminuq + paddq
2775     { ISD::UADDSAT,    MVT::v8i64,   3 }, // not + pminuq + paddq
2776     { ISD::SADDSAT,    MVT::v32i16,  2 },
2777     { ISD::SADDSAT,    MVT::v64i8,   2 },
2778     { ISD::SSUBSAT,    MVT::v32i16,  2 },
2779     { ISD::SSUBSAT,    MVT::v64i8,   2 },
2780     { ISD::UADDSAT,    MVT::v32i16,  2 },
2781     { ISD::UADDSAT,    MVT::v64i8,   2 },
2782     { ISD::USUBSAT,    MVT::v32i16,  2 },
2783     { ISD::USUBSAT,    MVT::v64i8,   2 },
2784     { ISD::FMAXNUM,    MVT::f32,     2 },
2785     { ISD::FMAXNUM,    MVT::v4f32,   2 },
2786     { ISD::FMAXNUM,    MVT::v8f32,   2 },
2787     { ISD::FMAXNUM,    MVT::v16f32,  2 },
2788     { ISD::FMAXNUM,    MVT::f64,     2 },
2789     { ISD::FMAXNUM,    MVT::v2f64,   2 },
2790     { ISD::FMAXNUM,    MVT::v4f64,   2 },
2791     { ISD::FMAXNUM,    MVT::v8f64,   2 },
2792   };
2793   static const CostTblEntry XOPCostTbl[] = {
2794     { ISD::BITREVERSE, MVT::v4i64,   4 },
2795     { ISD::BITREVERSE, MVT::v8i32,   4 },
2796     { ISD::BITREVERSE, MVT::v16i16,  4 },
2797     { ISD::BITREVERSE, MVT::v32i8,   4 },
2798     { ISD::BITREVERSE, MVT::v2i64,   1 },
2799     { ISD::BITREVERSE, MVT::v4i32,   1 },
2800     { ISD::BITREVERSE, MVT::v8i16,   1 },
2801     { ISD::BITREVERSE, MVT::v16i8,   1 },
2802     { ISD::BITREVERSE, MVT::i64,     3 },
2803     { ISD::BITREVERSE, MVT::i32,     3 },
2804     { ISD::BITREVERSE, MVT::i16,     3 },
2805     { ISD::BITREVERSE, MVT::i8,      3 }
2806   };
2807   static const CostTblEntry AVX2CostTbl[] = {
2808     { ISD::ABS,        MVT::v4i64,   2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2809     { ISD::ABS,        MVT::v8i32,   1 },
2810     { ISD::ABS,        MVT::v16i16,  1 },
2811     { ISD::ABS,        MVT::v32i8,   1 },
2812     { ISD::BITREVERSE, MVT::v2i64,   3 },
2813     { ISD::BITREVERSE, MVT::v4i64,   3 },
2814     { ISD::BITREVERSE, MVT::v4i32,   3 },
2815     { ISD::BITREVERSE, MVT::v8i32,   3 },
2816     { ISD::BITREVERSE, MVT::v8i16,   3 },
2817     { ISD::BITREVERSE, MVT::v16i16,  3 },
2818     { ISD::BITREVERSE, MVT::v16i8,   3 },
2819     { ISD::BITREVERSE, MVT::v32i8,   3 },
2820     { ISD::BSWAP,      MVT::v4i64,   1 },
2821     { ISD::BSWAP,      MVT::v8i32,   1 },
2822     { ISD::BSWAP,      MVT::v16i16,  1 },
2823     { ISD::CTLZ,       MVT::v2i64,   7 },
2824     { ISD::CTLZ,       MVT::v4i64,   7 },
2825     { ISD::CTLZ,       MVT::v4i32,   5 },
2826     { ISD::CTLZ,       MVT::v8i32,   5 },
2827     { ISD::CTLZ,       MVT::v8i16,   4 },
2828     { ISD::CTLZ,       MVT::v16i16,  4 },
2829     { ISD::CTLZ,       MVT::v16i8,   3 },
2830     { ISD::CTLZ,       MVT::v32i8,   3 },
2831     { ISD::CTPOP,      MVT::v2i64,   3 },
2832     { ISD::CTPOP,      MVT::v4i64,   3 },
2833     { ISD::CTPOP,      MVT::v4i32,   7 },
2834     { ISD::CTPOP,      MVT::v8i32,   7 },
2835     { ISD::CTPOP,      MVT::v8i16,   3 },
2836     { ISD::CTPOP,      MVT::v16i16,  3 },
2837     { ISD::CTPOP,      MVT::v16i8,   2 },
2838     { ISD::CTPOP,      MVT::v32i8,   2 },
2839     { ISD::CTTZ,       MVT::v2i64,   4 },
2840     { ISD::CTTZ,       MVT::v4i64,   4 },
2841     { ISD::CTTZ,       MVT::v4i32,   7 },
2842     { ISD::CTTZ,       MVT::v8i32,   7 },
2843     { ISD::CTTZ,       MVT::v8i16,   4 },
2844     { ISD::CTTZ,       MVT::v16i16,  4 },
2845     { ISD::CTTZ,       MVT::v16i8,   3 },
2846     { ISD::CTTZ,       MVT::v32i8,   3 },
2847     { ISD::SADDSAT,    MVT::v16i16,  1 },
2848     { ISD::SADDSAT,    MVT::v32i8,   1 },
2849     { ISD::SMAX,       MVT::v8i32,   1 },
2850     { ISD::SMAX,       MVT::v16i16,  1 },
2851     { ISD::SMAX,       MVT::v32i8,   1 },
2852     { ISD::SMIN,       MVT::v8i32,   1 },
2853     { ISD::SMIN,       MVT::v16i16,  1 },
2854     { ISD::SMIN,       MVT::v32i8,   1 },
2855     { ISD::SSUBSAT,    MVT::v16i16,  1 },
2856     { ISD::SSUBSAT,    MVT::v32i8,   1 },
2857     { ISD::UADDSAT,    MVT::v16i16,  1 },
2858     { ISD::UADDSAT,    MVT::v32i8,   1 },
2859     { ISD::UADDSAT,    MVT::v8i32,   3 }, // not + pminud + paddd
2860     { ISD::UMAX,       MVT::v8i32,   1 },
2861     { ISD::UMAX,       MVT::v16i16,  1 },
2862     { ISD::UMAX,       MVT::v32i8,   1 },
2863     { ISD::UMIN,       MVT::v8i32,   1 },
2864     { ISD::UMIN,       MVT::v16i16,  1 },
2865     { ISD::UMIN,       MVT::v32i8,   1 },
2866     { ISD::USUBSAT,    MVT::v16i16,  1 },
2867     { ISD::USUBSAT,    MVT::v32i8,   1 },
2868     { ISD::USUBSAT,    MVT::v8i32,   2 }, // pmaxud + psubd
2869     { ISD::FMAXNUM,    MVT::v8f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2870     { ISD::FMAXNUM,    MVT::v4f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2871     { ISD::FSQRT,      MVT::f32,     7 }, // Haswell from http://www.agner.org/
2872     { ISD::FSQRT,      MVT::v4f32,   7 }, // Haswell from http://www.agner.org/
2873     { ISD::FSQRT,      MVT::v8f32,  14 }, // Haswell from http://www.agner.org/
2874     { ISD::FSQRT,      MVT::f64,    14 }, // Haswell from http://www.agner.org/
2875     { ISD::FSQRT,      MVT::v2f64,  14 }, // Haswell from http://www.agner.org/
2876     { ISD::FSQRT,      MVT::v4f64,  28 }, // Haswell from http://www.agner.org/
2877   };
2878   static const CostTblEntry AVX1CostTbl[] = {
2879     { ISD::ABS,        MVT::v4i64,   5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2880     { ISD::ABS,        MVT::v8i32,   3 },
2881     { ISD::ABS,        MVT::v16i16,  3 },
2882     { ISD::ABS,        MVT::v32i8,   3 },
2883     { ISD::BITREVERSE, MVT::v4i64,  12 }, // 2 x 128-bit Op + extract/insert
2884     { ISD::BITREVERSE, MVT::v8i32,  12 }, // 2 x 128-bit Op + extract/insert
2885     { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2886     { ISD::BITREVERSE, MVT::v32i8,  12 }, // 2 x 128-bit Op + extract/insert
2887     { ISD::BSWAP,      MVT::v4i64,   4 },
2888     { ISD::BSWAP,      MVT::v8i32,   4 },
2889     { ISD::BSWAP,      MVT::v16i16,  4 },
2890     { ISD::CTLZ,       MVT::v4i64,  48 }, // 2 x 128-bit Op + extract/insert
2891     { ISD::CTLZ,       MVT::v8i32,  38 }, // 2 x 128-bit Op + extract/insert
2892     { ISD::CTLZ,       MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2893     { ISD::CTLZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2894     { ISD::CTPOP,      MVT::v4i64,  16 }, // 2 x 128-bit Op + extract/insert
2895     { ISD::CTPOP,      MVT::v8i32,  24 }, // 2 x 128-bit Op + extract/insert
2896     { ISD::CTPOP,      MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2897     { ISD::CTPOP,      MVT::v32i8,  14 }, // 2 x 128-bit Op + extract/insert
2898     { ISD::CTTZ,       MVT::v4i64,  22 }, // 2 x 128-bit Op + extract/insert
2899     { ISD::CTTZ,       MVT::v8i32,  30 }, // 2 x 128-bit Op + extract/insert
2900     { ISD::CTTZ,       MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2901     { ISD::CTTZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2902     { ISD::SADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2903     { ISD::SADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2904     { ISD::SMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2905     { ISD::SMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2906     { ISD::SMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2907     { ISD::SMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2908     { ISD::SMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2909     { ISD::SMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2910     { ISD::SSUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2911     { ISD::SSUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2912     { ISD::UADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2913     { ISD::UADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2914     { ISD::UADDSAT,    MVT::v8i32,   8 }, // 2 x 128-bit Op + extract/insert
2915     { ISD::UMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2916     { ISD::UMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2917     { ISD::UMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2918     { ISD::UMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2919     { ISD::UMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2920     { ISD::UMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2921     { ISD::USUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2922     { ISD::USUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2923     { ISD::USUBSAT,    MVT::v8i32,   6 }, // 2 x 128-bit Op + extract/insert
2924     { ISD::FMAXNUM,    MVT::f32,     3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2925     { ISD::FMAXNUM,    MVT::v4f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2926     { ISD::FMAXNUM,    MVT::v8f32,   5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2927     { ISD::FMAXNUM,    MVT::f64,     3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2928     { ISD::FMAXNUM,    MVT::v2f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2929     { ISD::FMAXNUM,    MVT::v4f64,   5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2930     { ISD::FSQRT,      MVT::f32,    14 }, // SNB from http://www.agner.org/
2931     { ISD::FSQRT,      MVT::v4f32,  14 }, // SNB from http://www.agner.org/
2932     { ISD::FSQRT,      MVT::v8f32,  28 }, // SNB from http://www.agner.org/
2933     { ISD::FSQRT,      MVT::f64,    21 }, // SNB from http://www.agner.org/
2934     { ISD::FSQRT,      MVT::v2f64,  21 }, // SNB from http://www.agner.org/
2935     { ISD::FSQRT,      MVT::v4f64,  43 }, // SNB from http://www.agner.org/
2936   };
2937   static const CostTblEntry GLMCostTbl[] = {
2938     { ISD::FSQRT, MVT::f32,   19 }, // sqrtss
2939     { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2940     { ISD::FSQRT, MVT::f64,   34 }, // sqrtsd
2941     { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2942   };
2943   static const CostTblEntry SLMCostTbl[] = {
2944     { ISD::FSQRT, MVT::f32,   20 }, // sqrtss
2945     { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2946     { ISD::FSQRT, MVT::f64,   35 }, // sqrtsd
2947     { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2948   };
2949   static const CostTblEntry SSE42CostTbl[] = {
2950     { ISD::USUBSAT,    MVT::v4i32,   2 }, // pmaxud + psubd
2951     { ISD::UADDSAT,    MVT::v4i32,   3 }, // not + pminud + paddd
2952     { ISD::FSQRT,      MVT::f32,    18 }, // Nehalem from http://www.agner.org/
2953     { ISD::FSQRT,      MVT::v4f32,  18 }, // Nehalem from http://www.agner.org/
2954   };
2955   static const CostTblEntry SSE41CostTbl[] = {
2956     { ISD::ABS,        MVT::v2i64,   2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2957     { ISD::SMAX,       MVT::v4i32,   1 },
2958     { ISD::SMAX,       MVT::v16i8,   1 },
2959     { ISD::SMIN,       MVT::v4i32,   1 },
2960     { ISD::SMIN,       MVT::v16i8,   1 },
2961     { ISD::UMAX,       MVT::v4i32,   1 },
2962     { ISD::UMAX,       MVT::v8i16,   1 },
2963     { ISD::UMIN,       MVT::v4i32,   1 },
2964     { ISD::UMIN,       MVT::v8i16,   1 },
2965   };
2966   static const CostTblEntry SSSE3CostTbl[] = {
2967     { ISD::ABS,        MVT::v4i32,   1 },
2968     { ISD::ABS,        MVT::v8i16,   1 },
2969     { ISD::ABS,        MVT::v16i8,   1 },
2970     { ISD::BITREVERSE, MVT::v2i64,   5 },
2971     { ISD::BITREVERSE, MVT::v4i32,   5 },
2972     { ISD::BITREVERSE, MVT::v8i16,   5 },
2973     { ISD::BITREVERSE, MVT::v16i8,   5 },
2974     { ISD::BSWAP,      MVT::v2i64,   1 },
2975     { ISD::BSWAP,      MVT::v4i32,   1 },
2976     { ISD::BSWAP,      MVT::v8i16,   1 },
2977     { ISD::CTLZ,       MVT::v2i64,  23 },
2978     { ISD::CTLZ,       MVT::v4i32,  18 },
2979     { ISD::CTLZ,       MVT::v8i16,  14 },
2980     { ISD::CTLZ,       MVT::v16i8,   9 },
2981     { ISD::CTPOP,      MVT::v2i64,   7 },
2982     { ISD::CTPOP,      MVT::v4i32,  11 },
2983     { ISD::CTPOP,      MVT::v8i16,   9 },
2984     { ISD::CTPOP,      MVT::v16i8,   6 },
2985     { ISD::CTTZ,       MVT::v2i64,  10 },
2986     { ISD::CTTZ,       MVT::v4i32,  14 },
2987     { ISD::CTTZ,       MVT::v8i16,  12 },
2988     { ISD::CTTZ,       MVT::v16i8,   9 }
2989   };
2990   static const CostTblEntry SSE2CostTbl[] = {
2991     { ISD::ABS,        MVT::v2i64,   4 },
2992     { ISD::ABS,        MVT::v4i32,   3 },
2993     { ISD::ABS,        MVT::v8i16,   2 },
2994     { ISD::ABS,        MVT::v16i8,   2 },
2995     { ISD::BITREVERSE, MVT::v2i64,  29 },
2996     { ISD::BITREVERSE, MVT::v4i32,  27 },
2997     { ISD::BITREVERSE, MVT::v8i16,  27 },
2998     { ISD::BITREVERSE, MVT::v16i8,  20 },
2999     { ISD::BSWAP,      MVT::v2i64,   7 },
3000     { ISD::BSWAP,      MVT::v4i32,   7 },
3001     { ISD::BSWAP,      MVT::v8i16,   7 },
3002     { ISD::CTLZ,       MVT::v2i64,  25 },
3003     { ISD::CTLZ,       MVT::v4i32,  26 },
3004     { ISD::CTLZ,       MVT::v8i16,  20 },
3005     { ISD::CTLZ,       MVT::v16i8,  17 },
3006     { ISD::CTPOP,      MVT::v2i64,  12 },
3007     { ISD::CTPOP,      MVT::v4i32,  15 },
3008     { ISD::CTPOP,      MVT::v8i16,  13 },
3009     { ISD::CTPOP,      MVT::v16i8,  10 },
3010     { ISD::CTTZ,       MVT::v2i64,  14 },
3011     { ISD::CTTZ,       MVT::v4i32,  18 },
3012     { ISD::CTTZ,       MVT::v8i16,  16 },
3013     { ISD::CTTZ,       MVT::v16i8,  13 },
3014     { ISD::SADDSAT,    MVT::v8i16,   1 },
3015     { ISD::SADDSAT,    MVT::v16i8,   1 },
3016     { ISD::SMAX,       MVT::v8i16,   1 },
3017     { ISD::SMIN,       MVT::v8i16,   1 },
3018     { ISD::SSUBSAT,    MVT::v8i16,   1 },
3019     { ISD::SSUBSAT,    MVT::v16i8,   1 },
3020     { ISD::UADDSAT,    MVT::v8i16,   1 },
3021     { ISD::UADDSAT,    MVT::v16i8,   1 },
3022     { ISD::UMAX,       MVT::v8i16,   2 },
3023     { ISD::UMAX,       MVT::v16i8,   1 },
3024     { ISD::UMIN,       MVT::v8i16,   2 },
3025     { ISD::UMIN,       MVT::v16i8,   1 },
3026     { ISD::USUBSAT,    MVT::v8i16,   1 },
3027     { ISD::USUBSAT,    MVT::v16i8,   1 },
3028     { ISD::FMAXNUM,    MVT::f64,     4 },
3029     { ISD::FMAXNUM,    MVT::v2f64,   4 },
3030     { ISD::FSQRT,      MVT::f64,    32 }, // Nehalem from http://www.agner.org/
3031     { ISD::FSQRT,      MVT::v2f64,  32 }, // Nehalem from http://www.agner.org/
3032   };
3033   static const CostTblEntry SSE1CostTbl[] = {
3034     { ISD::FMAXNUM,    MVT::f32,     4 },
3035     { ISD::FMAXNUM,    MVT::v4f32,   4 },
3036     { ISD::FSQRT,      MVT::f32,    28 }, // Pentium III from http://www.agner.org/
3037     { ISD::FSQRT,      MVT::v4f32,  56 }, // Pentium III from http://www.agner.org/
3038   };
3039   static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
3040     { ISD::CTTZ,       MVT::i64,     1 },
3041   };
3042   static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
3043     { ISD::CTTZ,       MVT::i32,     1 },
3044     { ISD::CTTZ,       MVT::i16,     1 },
3045     { ISD::CTTZ,       MVT::i8,      1 },
3046   };
3047   static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
3048     { ISD::CTLZ,       MVT::i64,     1 },
3049   };
3050   static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
3051     { ISD::CTLZ,       MVT::i32,     1 },
3052     { ISD::CTLZ,       MVT::i16,     1 },
3053     { ISD::CTLZ,       MVT::i8,      1 },
3054   };
3055   static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
3056     { ISD::CTPOP,      MVT::i64,     1 },
3057   };
3058   static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
3059     { ISD::CTPOP,      MVT::i32,     1 },
3060     { ISD::CTPOP,      MVT::i16,     1 },
3061     { ISD::CTPOP,      MVT::i8,      1 },
3062   };
3063   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3064     { ISD::ABS,        MVT::i64,     2 }, // SUB+CMOV
3065     { ISD::BITREVERSE, MVT::i64,    14 },
3066     { ISD::BSWAP,      MVT::i64,     1 },
3067     { ISD::CTLZ,       MVT::i64,     4 }, // BSR+XOR or BSR+XOR+CMOV
3068     { ISD::CTTZ,       MVT::i64,     3 }, // TEST+BSF+CMOV/BRANCH
3069     { ISD::CTPOP,      MVT::i64,    10 },
3070     { ISD::SADDO,      MVT::i64,     1 },
3071     { ISD::UADDO,      MVT::i64,     1 },
3072     { ISD::UMULO,      MVT::i64,     2 }, // mulq + seto
3073   };
3074   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3075     { ISD::ABS,        MVT::i32,     2 }, // SUB+CMOV
3076     { ISD::ABS,        MVT::i16,     2 }, // SUB+CMOV
3077     { ISD::BITREVERSE, MVT::i32,    14 },
3078     { ISD::BITREVERSE, MVT::i16,    14 },
3079     { ISD::BITREVERSE, MVT::i8,     11 },
3080     { ISD::BSWAP,      MVT::i32,     1 },
3081     { ISD::BSWAP,      MVT::i16,     1 }, // ROL
3082     { ISD::CTLZ,       MVT::i32,     4 }, // BSR+XOR or BSR+XOR+CMOV
3083     { ISD::CTLZ,       MVT::i16,     4 }, // BSR+XOR or BSR+XOR+CMOV
3084     { ISD::CTLZ,       MVT::i8,      4 }, // BSR+XOR or BSR+XOR+CMOV
3085     { ISD::CTTZ,       MVT::i32,     3 }, // TEST+BSF+CMOV/BRANCH
3086     { ISD::CTTZ,       MVT::i16,     3 }, // TEST+BSF+CMOV/BRANCH
3087     { ISD::CTTZ,       MVT::i8,      3 }, // TEST+BSF+CMOV/BRANCH
3088     { ISD::CTPOP,      MVT::i32,     8 },
3089     { ISD::CTPOP,      MVT::i16,     9 },
3090     { ISD::CTPOP,      MVT::i8,      7 },
3091     { ISD::SADDO,      MVT::i32,     1 },
3092     { ISD::SADDO,      MVT::i16,     1 },
3093     { ISD::SADDO,      MVT::i8,      1 },
3094     { ISD::UADDO,      MVT::i32,     1 },
3095     { ISD::UADDO,      MVT::i16,     1 },
3096     { ISD::UADDO,      MVT::i8,      1 },
3097     { ISD::UMULO,      MVT::i32,     2 }, // mul + seto
3098     { ISD::UMULO,      MVT::i16,     2 },
3099     { ISD::UMULO,      MVT::i8,      2 },
3100   };
3101 
3102   Type *RetTy = ICA.getReturnType();
3103   Type *OpTy = RetTy;
3104   Intrinsic::ID IID = ICA.getID();
3105   unsigned ISD = ISD::DELETED_NODE;
3106   switch (IID) {
3107   default:
3108     break;
3109   case Intrinsic::abs:
3110     ISD = ISD::ABS;
3111     break;
3112   case Intrinsic::bitreverse:
3113     ISD = ISD::BITREVERSE;
3114     break;
3115   case Intrinsic::bswap:
3116     ISD = ISD::BSWAP;
3117     break;
3118   case Intrinsic::ctlz:
3119     ISD = ISD::CTLZ;
3120     break;
3121   case Intrinsic::ctpop:
3122     ISD = ISD::CTPOP;
3123     break;
3124   case Intrinsic::cttz:
3125     ISD = ISD::CTTZ;
3126     break;
3127   case Intrinsic::maxnum:
3128   case Intrinsic::minnum:
3129     // FMINNUM has same costs so don't duplicate.
3130     ISD = ISD::FMAXNUM;
3131     break;
3132   case Intrinsic::sadd_sat:
3133     ISD = ISD::SADDSAT;
3134     break;
3135   case Intrinsic::smax:
3136     ISD = ISD::SMAX;
3137     break;
3138   case Intrinsic::smin:
3139     ISD = ISD::SMIN;
3140     break;
3141   case Intrinsic::ssub_sat:
3142     ISD = ISD::SSUBSAT;
3143     break;
3144   case Intrinsic::uadd_sat:
3145     ISD = ISD::UADDSAT;
3146     break;
3147   case Intrinsic::umax:
3148     ISD = ISD::UMAX;
3149     break;
3150   case Intrinsic::umin:
3151     ISD = ISD::UMIN;
3152     break;
3153   case Intrinsic::usub_sat:
3154     ISD = ISD::USUBSAT;
3155     break;
3156   case Intrinsic::sqrt:
3157     ISD = ISD::FSQRT;
3158     break;
3159   case Intrinsic::sadd_with_overflow:
3160   case Intrinsic::ssub_with_overflow:
3161     // SSUBO has same costs so don't duplicate.
3162     ISD = ISD::SADDO;
3163     OpTy = RetTy->getContainedType(0);
3164     break;
3165   case Intrinsic::uadd_with_overflow:
3166   case Intrinsic::usub_with_overflow:
3167     // USUBO has same costs so don't duplicate.
3168     ISD = ISD::UADDO;
3169     OpTy = RetTy->getContainedType(0);
3170     break;
3171   case Intrinsic::umul_with_overflow:
3172   case Intrinsic::smul_with_overflow:
3173     // SMULO has same costs so don't duplicate.
3174     ISD = ISD::UMULO;
3175     OpTy = RetTy->getContainedType(0);
3176     break;
3177   }
3178 
3179   if (ISD != ISD::DELETED_NODE) {
3180     // Legalize the type.
3181     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
3182     MVT MTy = LT.second;
3183 
3184     // Attempt to lookup cost.
3185     if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
3186         MTy.isVector()) {
3187       // With PSHUFB the code is very similar for all types. If we have integer
3188       // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
3189       // we also need a PSHUFB.
3190       unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
3191 
3192       // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
3193       // instructions. We also need an extract and an insert.
3194       if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
3195             (ST->hasBWI() && MTy.is512BitVector())))
3196         Cost = Cost * 2 + 2;
3197 
3198       return LT.first * Cost;
3199     }
3200 
3201     auto adjustTableCost = [](const CostTblEntry &Entry,
3202                               InstructionCost LegalizationCost,
3203                               FastMathFlags FMF) {
3204       // If there are no NANs to deal with, then these are reduced to a
3205       // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
3206       // assume is used in the non-fast case.
3207       if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
3208         if (FMF.noNaNs())
3209           return LegalizationCost * 1;
3210       }
3211       return LegalizationCost * (int)Entry.Cost;
3212     };
3213 
3214     if (ST->useGLMDivSqrtCosts())
3215       if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
3216         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3217 
3218     if (ST->useSLMArithCosts())
3219       if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3220         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3221 
3222     if (ST->hasBITALG())
3223       if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
3224         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3225 
3226     if (ST->hasVPOPCNTDQ())
3227       if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
3228         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3229 
3230     if (ST->hasCDI())
3231       if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
3232         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3233 
3234     if (ST->hasBWI())
3235       if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3236         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3237 
3238     if (ST->hasAVX512())
3239       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3240         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3241 
3242     if (ST->hasXOP())
3243       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3244         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3245 
3246     if (ST->hasAVX2())
3247       if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3248         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3249 
3250     if (ST->hasAVX())
3251       if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3252         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3253 
3254     if (ST->hasSSE42())
3255       if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3256         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3257 
3258     if (ST->hasSSE41())
3259       if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3260         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3261 
3262     if (ST->hasSSSE3())
3263       if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
3264         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3265 
3266     if (ST->hasSSE2())
3267       if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3268         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3269 
3270     if (ST->hasSSE1())
3271       if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3272         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3273 
3274     if (ST->hasBMI()) {
3275       if (ST->is64Bit())
3276         if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
3277           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3278 
3279       if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
3280         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3281     }
3282 
3283     if (ST->hasLZCNT()) {
3284       if (ST->is64Bit())
3285         if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
3286           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3287 
3288       if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
3289         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3290     }
3291 
3292     if (ST->hasPOPCNT()) {
3293       if (ST->is64Bit())
3294         if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
3295           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3296 
3297       if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
3298         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3299     }
3300 
3301     if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
3302       if (const Instruction *II = ICA.getInst()) {
3303         if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
3304           return TTI::TCC_Free;
3305         if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
3306           if (LI->hasOneUse())
3307             return TTI::TCC_Free;
3308         }
3309       }
3310     }
3311 
3312     if (ST->is64Bit())
3313       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3314         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3315 
3316     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3317       return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3318   }
3319 
3320   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3321 }
3322 
3323 InstructionCost
3324 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3325                                   TTI::TargetCostKind CostKind) {
3326   if (ICA.isTypeBasedOnly())
3327     return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3328 
3329   static const CostTblEntry AVX512CostTbl[] = {
3330     { ISD::ROTL,       MVT::v8i64,   1 },
3331     { ISD::ROTL,       MVT::v4i64,   1 },
3332     { ISD::ROTL,       MVT::v2i64,   1 },
3333     { ISD::ROTL,       MVT::v16i32,  1 },
3334     { ISD::ROTL,       MVT::v8i32,   1 },
3335     { ISD::ROTL,       MVT::v4i32,   1 },
3336     { ISD::ROTR,       MVT::v8i64,   1 },
3337     { ISD::ROTR,       MVT::v4i64,   1 },
3338     { ISD::ROTR,       MVT::v2i64,   1 },
3339     { ISD::ROTR,       MVT::v16i32,  1 },
3340     { ISD::ROTR,       MVT::v8i32,   1 },
3341     { ISD::ROTR,       MVT::v4i32,   1 }
3342   };
3343   // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3344   static const CostTblEntry XOPCostTbl[] = {
3345     { ISD::ROTL,       MVT::v4i64,   4 },
3346     { ISD::ROTL,       MVT::v8i32,   4 },
3347     { ISD::ROTL,       MVT::v16i16,  4 },
3348     { ISD::ROTL,       MVT::v32i8,   4 },
3349     { ISD::ROTL,       MVT::v2i64,   1 },
3350     { ISD::ROTL,       MVT::v4i32,   1 },
3351     { ISD::ROTL,       MVT::v8i16,   1 },
3352     { ISD::ROTL,       MVT::v16i8,   1 },
3353     { ISD::ROTR,       MVT::v4i64,   6 },
3354     { ISD::ROTR,       MVT::v8i32,   6 },
3355     { ISD::ROTR,       MVT::v16i16,  6 },
3356     { ISD::ROTR,       MVT::v32i8,   6 },
3357     { ISD::ROTR,       MVT::v2i64,   2 },
3358     { ISD::ROTR,       MVT::v4i32,   2 },
3359     { ISD::ROTR,       MVT::v8i16,   2 },
3360     { ISD::ROTR,       MVT::v16i8,   2 }
3361   };
3362   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3363     { ISD::ROTL,       MVT::i64,     1 },
3364     { ISD::ROTR,       MVT::i64,     1 },
3365     { ISD::FSHL,       MVT::i64,     4 }
3366   };
3367   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3368     { ISD::ROTL,       MVT::i32,     1 },
3369     { ISD::ROTL,       MVT::i16,     1 },
3370     { ISD::ROTL,       MVT::i8,      1 },
3371     { ISD::ROTR,       MVT::i32,     1 },
3372     { ISD::ROTR,       MVT::i16,     1 },
3373     { ISD::ROTR,       MVT::i8,      1 },
3374     { ISD::FSHL,       MVT::i32,     4 },
3375     { ISD::FSHL,       MVT::i16,     4 },
3376     { ISD::FSHL,       MVT::i8,      4 }
3377   };
3378 
3379   Intrinsic::ID IID = ICA.getID();
3380   Type *RetTy = ICA.getReturnType();
3381   const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3382   unsigned ISD = ISD::DELETED_NODE;
3383   switch (IID) {
3384   default:
3385     break;
3386   case Intrinsic::fshl:
3387     ISD = ISD::FSHL;
3388     if (Args[0] == Args[1])
3389       ISD = ISD::ROTL;
3390     break;
3391   case Intrinsic::fshr:
3392     // FSHR has same costs so don't duplicate.
3393     ISD = ISD::FSHL;
3394     if (Args[0] == Args[1])
3395       ISD = ISD::ROTR;
3396     break;
3397   }
3398 
3399   if (ISD != ISD::DELETED_NODE) {
3400     // Legalize the type.
3401     std::pair<InstructionCost, MVT> LT =
3402         TLI->getTypeLegalizationCost(DL, RetTy);
3403     MVT MTy = LT.second;
3404 
3405     // Attempt to lookup cost.
3406     if (ST->hasAVX512())
3407       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3408         return LT.first * Entry->Cost;
3409 
3410     if (ST->hasXOP())
3411       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3412         return LT.first * Entry->Cost;
3413 
3414     if (ST->is64Bit())
3415       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3416         return LT.first * Entry->Cost;
3417 
3418     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3419       return LT.first * Entry->Cost;
3420   }
3421 
3422   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3423 }
3424 
3425 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3426                                                unsigned Index) {
3427   static const CostTblEntry SLMCostTbl[] = {
3428      { ISD::EXTRACT_VECTOR_ELT,       MVT::i8,      4 },
3429      { ISD::EXTRACT_VECTOR_ELT,       MVT::i16,     4 },
3430      { ISD::EXTRACT_VECTOR_ELT,       MVT::i32,     4 },
3431      { ISD::EXTRACT_VECTOR_ELT,       MVT::i64,     7 }
3432    };
3433 
3434   assert(Val->isVectorTy() && "This must be a vector type");
3435   Type *ScalarType = Val->getScalarType();
3436   int RegisterFileMoveCost = 0;
3437 
3438   // Non-immediate extraction/insertion can be handled as a sequence of
3439   // aliased loads+stores via the stack.
3440   if (Index == -1U && (Opcode == Instruction::ExtractElement ||
3441                        Opcode == Instruction::InsertElement)) {
3442     // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
3443     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
3444 
3445     // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
3446     assert(isa<FixedVectorType>(Val) && "Fixed vector type expected");
3447     Align VecAlign = DL.getPrefTypeAlign(Val);
3448     Align SclAlign = DL.getPrefTypeAlign(ScalarType);
3449 
3450     // Extract - store vector to stack, load scalar.
3451     if (Opcode == Instruction::ExtractElement) {
3452       return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3453                              TTI::TargetCostKind::TCK_RecipThroughput) +
3454              getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
3455                              TTI::TargetCostKind::TCK_RecipThroughput);
3456     }
3457     // Insert - store vector to stack, store scalar, load vector.
3458     if (Opcode == Instruction::InsertElement) {
3459       return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3460                              TTI::TargetCostKind::TCK_RecipThroughput) +
3461              getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
3462                              TTI::TargetCostKind::TCK_RecipThroughput) +
3463              getMemoryOpCost(Instruction::Load, Val, VecAlign, 0,
3464                              TTI::TargetCostKind::TCK_RecipThroughput);
3465     }
3466   }
3467 
3468   if (Index != -1U && (Opcode == Instruction::ExtractElement ||
3469                        Opcode == Instruction::InsertElement)) {
3470     // Legalize the type.
3471     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3472 
3473     // This type is legalized to a scalar type.
3474     if (!LT.second.isVector())
3475       return 0;
3476 
3477     // The type may be split. Normalize the index to the new type.
3478     unsigned NumElts = LT.second.getVectorNumElements();
3479     unsigned SubNumElts = NumElts;
3480     Index = Index % NumElts;
3481 
3482     // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3483     // For inserts, we also need to insert the subvector back.
3484     if (LT.second.getSizeInBits() > 128) {
3485       assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector");
3486       unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3487       SubNumElts = NumElts / NumSubVecs;
3488       if (SubNumElts <= Index) {
3489         RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3490         Index %= SubNumElts;
3491       }
3492     }
3493 
3494     if (Index == 0) {
3495       // Floating point scalars are already located in index #0.
3496       // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3497       // true for all.
3498       if (ScalarType->isFloatingPointTy())
3499         return RegisterFileMoveCost;
3500 
3501       // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3502       if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3503         return 1 + RegisterFileMoveCost;
3504     }
3505 
3506     int ISD = TLI->InstructionOpcodeToISD(Opcode);
3507     assert(ISD && "Unexpected vector opcode");
3508     MVT MScalarTy = LT.second.getScalarType();
3509     if (ST->useSLMArithCosts())
3510       if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3511         return Entry->Cost + RegisterFileMoveCost;
3512 
3513     // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3514     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3515         (MScalarTy.isInteger() && ST->hasSSE41()))
3516       return 1 + RegisterFileMoveCost;
3517 
3518     // Assume insertps is relatively cheap on all targets.
3519     if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3520         Opcode == Instruction::InsertElement)
3521       return 1 + RegisterFileMoveCost;
3522 
3523     // For extractions we just need to shuffle the element to index 0, which
3524     // should be very cheap (assume cost = 1). For insertions we need to shuffle
3525     // the elements to its destination. In both cases we must handle the
3526     // subvector move(s).
3527     // If the vector type is already less than 128-bits then don't reduce it.
3528     // TODO: Under what circumstances should we shuffle using the full width?
3529     InstructionCost ShuffleCost = 1;
3530     if (Opcode == Instruction::InsertElement) {
3531       auto *SubTy = cast<VectorType>(Val);
3532       EVT VT = TLI->getValueType(DL, Val);
3533       if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3534         SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3535       ShuffleCost =
3536           getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3537     }
3538     int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3539     return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3540   }
3541 
3542   // Add to the base cost if we know that the extracted element of a vector is
3543   // destined to be moved to and used in the integer register file.
3544   if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3545     RegisterFileMoveCost += 1;
3546 
3547   return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3548 }
3549 
3550 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3551                                                      const APInt &DemandedElts,
3552                                                      bool Insert,
3553                                                      bool Extract) {
3554   InstructionCost Cost = 0;
3555 
3556   // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3557   // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3558   if (Insert) {
3559     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3560     MVT MScalarTy = LT.second.getScalarType();
3561 
3562     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3563         (MScalarTy.isInteger() && ST->hasSSE41()) ||
3564         (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3565       // For types we can insert directly, insertion into 128-bit sub vectors is
3566       // cheap, followed by a cheap chain of concatenations.
3567       if (LT.second.getSizeInBits() <= 128) {
3568         Cost +=
3569             BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3570       } else {
3571         // In each 128-lane, if at least one index is demanded but not all
3572         // indices are demanded and this 128-lane is not the first 128-lane of
3573         // the legalized-vector, then this 128-lane needs a extracti128; If in
3574         // each 128-lane, there is at least one demanded index, this 128-lane
3575         // needs a inserti128.
3576 
3577         // The following cases will help you build a better understanding:
3578         // Assume we insert several elements into a v8i32 vector in avx2,
3579         // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3580         // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3581         // inserti128.
3582         // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3583         const int CostValue = *LT.first.getValue();
3584         assert(CostValue >= 0 && "Negative cost!");
3585         unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue;
3586         unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3587         APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3588         unsigned Scale = NumElts / Num128Lanes;
3589         // We iterate each 128-lane, and check if we need a
3590         // extracti128/inserti128 for this 128-lane.
3591         for (unsigned I = 0; I < NumElts; I += Scale) {
3592           APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3593           APInt MaskedDE = Mask & WidenedDemandedElts;
3594           unsigned Population = MaskedDE.countPopulation();
3595           Cost += (Population > 0 && Population != Scale &&
3596                    I % LT.second.getVectorNumElements() != 0);
3597           Cost += Population > 0;
3598         }
3599         Cost += DemandedElts.countPopulation();
3600 
3601         // For vXf32 cases, insertion into the 0'th index in each v4f32
3602         // 128-bit vector is free.
3603         // NOTE: This assumes legalization widens vXf32 vectors.
3604         if (MScalarTy == MVT::f32)
3605           for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3606                i < e; i += 4)
3607             if (DemandedElts[i])
3608               Cost--;
3609       }
3610     } else if (LT.second.isVector()) {
3611       // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3612       // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3613       // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3614       // considered cheap.
3615       if (Ty->isIntOrIntVectorTy())
3616         Cost += DemandedElts.countPopulation();
3617 
3618       // Get the smaller of the legalized or original pow2-extended number of
3619       // vector elements, which represents the number of unpacks we'll end up
3620       // performing.
3621       unsigned NumElts = LT.second.getVectorNumElements();
3622       unsigned Pow2Elts =
3623           PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3624       Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3625     }
3626   }
3627 
3628   // TODO: Use default extraction for now, but we should investigate extending this
3629   // to handle repeated subvector extraction.
3630   if (Extract)
3631     Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3632 
3633   return Cost;
3634 }
3635 
3636 InstructionCost
3637 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
3638                                       int VF, const APInt &DemandedDstElts,
3639                                       TTI::TargetCostKind CostKind) {
3640   const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy);
3641   // We don't differentiate element types here, only element bit width.
3642   EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits);
3643 
3644   auto bailout = [&]() {
3645     return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
3646                                             DemandedDstElts, CostKind);
3647   };
3648 
3649   // For now, only deal with AVX512 cases.
3650   if (!ST->hasAVX512())
3651     return bailout();
3652 
3653   // Do we have a native shuffle for this element type, or should we promote?
3654   unsigned PromEltTyBits = EltTyBits;
3655   switch (EltTyBits) {
3656   case 32:
3657   case 64:
3658     break; // AVX512F.
3659   case 16:
3660     if (!ST->hasBWI())
3661       PromEltTyBits = 32; // promote to i32, AVX512F.
3662     break;                // AVX512BW
3663   case 8:
3664     if (!ST->hasVBMI())
3665       PromEltTyBits = 32; // promote to i32, AVX512F.
3666     break;                // AVX512VBMI
3667   default:
3668     return bailout();
3669   }
3670   auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits);
3671 
3672   auto *SrcVecTy = FixedVectorType::get(EltTy, VF);
3673   auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF);
3674 
3675   int NumDstElements = VF * ReplicationFactor;
3676   auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements);
3677   auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements);
3678 
3679   // Legalize the types.
3680   MVT LegalSrcVecTy = TLI->getTypeLegalizationCost(DL, SrcVecTy).second;
3681   MVT LegalPromSrcVecTy = TLI->getTypeLegalizationCost(DL, PromSrcVecTy).second;
3682   MVT LegalPromDstVecTy = TLI->getTypeLegalizationCost(DL, PromDstVecTy).second;
3683   MVT LegalDstVecTy = TLI->getTypeLegalizationCost(DL, DstVecTy).second;
3684   // They should have legalized into vector types.
3685   if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() ||
3686       !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector())
3687     return bailout();
3688 
3689   if (PromEltTyBits != EltTyBits) {
3690     // If we have to perform the shuffle with wider elt type than our data type,
3691     // then we will first need to anyext (we don't care about the new bits)
3692     // the source elements, and then truncate Dst elements.
3693     InstructionCost PromotionCost;
3694     PromotionCost += getCastInstrCost(
3695         Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy,
3696         TargetTransformInfo::CastContextHint::None, CostKind);
3697     PromotionCost +=
3698         getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy,
3699                          /*Src=*/PromDstVecTy,
3700                          TargetTransformInfo::CastContextHint::None, CostKind);
3701     return PromotionCost + getReplicationShuffleCost(PromEltTy,
3702                                                      ReplicationFactor, VF,
3703                                                      DemandedDstElts, CostKind);
3704   }
3705 
3706   assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits &&
3707          LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() &&
3708          "We expect that the legalization doesn't affect the element width, "
3709          "doesn't coalesce/split elements.");
3710 
3711   unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements();
3712   unsigned NumDstVectors =
3713       divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec);
3714 
3715   auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec);
3716 
3717   // Not all the produced Dst elements may be demanded. In our case,
3718   // given that a single Dst vector is formed by a single shuffle,
3719   // if all elements that will form a single Dst vector aren't demanded,
3720   // then we won't need to do that shuffle, so adjust the cost accordingly.
3721   APInt DemandedDstVectors = APIntOps::ScaleBitMask(
3722       DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec),
3723       NumDstVectors);
3724   unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation();
3725 
3726   InstructionCost SingleShuffleCost =
3727       getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy,
3728                      /*Mask=*/None, /*Index=*/0, /*SubTp=*/nullptr);
3729   return NumDstVectorsDemanded * SingleShuffleCost;
3730 }
3731 
3732 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3733                                             MaybeAlign Alignment,
3734                                             unsigned AddressSpace,
3735                                             TTI::TargetCostKind CostKind,
3736                                             const Instruction *I) {
3737   // TODO: Handle other cost kinds.
3738   if (CostKind != TTI::TCK_RecipThroughput) {
3739     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3740       // Store instruction with index and scale costs 2 Uops.
3741       // Check the preceding GEP to identify non-const indices.
3742       if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3743         if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3744           return TTI::TCC_Basic * 2;
3745       }
3746     }
3747     return TTI::TCC_Basic;
3748   }
3749 
3750   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3751          "Invalid Opcode");
3752   // Type legalization can't handle structs
3753   if (TLI->getValueType(DL, Src, true) == MVT::Other)
3754     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3755                                   CostKind);
3756 
3757   // Legalize the type.
3758   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3759 
3760   auto *VTy = dyn_cast<FixedVectorType>(Src);
3761 
3762   // Handle the simple case of non-vectors.
3763   // NOTE: this assumes that legalization never creates vector from scalars!
3764   if (!VTy || !LT.second.isVector())
3765     // Each load/store unit costs 1.
3766     return LT.first * 1;
3767 
3768   bool IsLoad = Opcode == Instruction::Load;
3769 
3770   Type *EltTy = VTy->getElementType();
3771 
3772   const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3773 
3774   InstructionCost Cost = 0;
3775 
3776   // Source of truth: how many elements were there in the original IR vector?
3777   const unsigned SrcNumElt = VTy->getNumElements();
3778 
3779   // How far have we gotten?
3780   int NumEltRemaining = SrcNumElt;
3781   // Note that we intentionally capture by-reference, NumEltRemaining changes.
3782   auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3783 
3784   const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3785 
3786   // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3787   const unsigned XMMBits = 128;
3788   if (XMMBits % EltTyBits != 0)
3789     // Vector size must be a multiple of the element size. I.e. no padding.
3790     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3791                                   CostKind);
3792   const int NumEltPerXMM = XMMBits / EltTyBits;
3793 
3794   auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3795 
3796   for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3797        NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3798     // How many elements would a single op deal with at once?
3799     if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3800       // Vector size must be a multiple of the element size. I.e. no padding.
3801       return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3802                                     CostKind);
3803     int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3804 
3805     assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
3806     assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
3807             (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
3808            "Unless we haven't halved the op size yet, "
3809            "we have less than two op's sized units of work left.");
3810 
3811     auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3812                           ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3813                           : XMMVecTy;
3814 
3815     assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
3816            "After halving sizes, the vector elt count is no longer a multiple "
3817            "of number of elements per operation?");
3818     auto *CoalescedVecTy =
3819         CurrNumEltPerOp == 1
3820             ? CurrVecTy
3821             : FixedVectorType::get(
3822                   IntegerType::get(Src->getContext(),
3823                                    EltTyBits * CurrNumEltPerOp),
3824                   CurrVecTy->getNumElements() / CurrNumEltPerOp);
3825     assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
3826                DL.getTypeSizeInBits(CurrVecTy) &&
3827            "coalesciing elements doesn't change vector width.");
3828 
3829     while (NumEltRemaining > 0) {
3830       assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
3831 
3832       // Can we use this vector size, as per the remaining element count?
3833       // Iff the vector is naturally aligned, we can do a wide load regardless.
3834       if (NumEltRemaining < CurrNumEltPerOp &&
3835           (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3836           CurrOpSizeBytes != 1)
3837         break; // Try smalled vector size.
3838 
3839       bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3840 
3841       // If we have fully processed the previous reg, we need to replenish it.
3842       if (SubVecEltsLeft == 0) {
3843         SubVecEltsLeft += CurrVecTy->getNumElements();
3844         // And that's free only for the 0'th subvector of a legalized vector.
3845         if (!Is0thSubVec)
3846           Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3847                                         : TTI::ShuffleKind::SK_ExtractSubvector,
3848                                  VTy, None, NumEltDone(), CurrVecTy);
3849       }
3850 
3851       // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3852       // for smaller widths (32/16/8) we have to insert/extract them separately.
3853       // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3854       // but let's pretend that it is also true for 16/8 bit wide ops...)
3855       if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3856         int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3857         assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
3858         int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3859         APInt DemandedElts =
3860             APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3861                               CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
3862         assert(DemandedElts.countPopulation() == 1 && "Inserting single value");
3863         Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
3864                                          !IsLoad);
3865       }
3866 
3867       // This isn't exactly right. We're using slow unaligned 32-byte accesses
3868       // as a proxy for a double-pumped AVX memory interface such as on
3869       // Sandybridge.
3870       if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
3871         Cost += 2;
3872       else
3873         Cost += 1;
3874 
3875       SubVecEltsLeft -= CurrNumEltPerOp;
3876       NumEltRemaining -= CurrNumEltPerOp;
3877       Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
3878     }
3879   }
3880 
3881   assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
3882 
3883   return Cost;
3884 }
3885 
3886 InstructionCost
3887 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
3888                                   unsigned AddressSpace,
3889                                   TTI::TargetCostKind CostKind) {
3890   bool IsLoad = (Instruction::Load == Opcode);
3891   bool IsStore = (Instruction::Store == Opcode);
3892 
3893   auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3894   if (!SrcVTy)
3895     // To calculate scalar take the regular cost, without mask
3896     return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3897 
3898   unsigned NumElem = SrcVTy->getNumElements();
3899   auto *MaskTy =
3900       FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3901   if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3902       (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
3903     // Scalarization
3904     APInt DemandedElts = APInt::getAllOnes(NumElem);
3905     InstructionCost MaskSplitCost =
3906         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3907     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
3908         Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3909         CmpInst::BAD_ICMP_PREDICATE, CostKind);
3910     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3911     InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3912     InstructionCost ValueSplitCost =
3913         getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3914     InstructionCost MemopCost =
3915         NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3916                                          Alignment, AddressSpace, CostKind);
3917     return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3918   }
3919 
3920   // Legalize the type.
3921   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3922   auto VT = TLI->getValueType(DL, SrcVTy);
3923   InstructionCost Cost = 0;
3924   if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3925       LT.second.getVectorNumElements() == NumElem)
3926     // Promotion requires extend/truncate for data and a shuffle for mask.
3927     Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
3928             getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
3929 
3930   else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
3931     auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3932                                            LT.second.getVectorNumElements());
3933     // Expanding requires fill mask with zeroes
3934     Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
3935   }
3936 
3937   // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3938   if (!ST->hasAVX512())
3939     return Cost + LT.first * (IsLoad ? 2 : 8);
3940 
3941   // AVX-512 masked load/store is cheapper
3942   return Cost + LT.first;
3943 }
3944 
3945 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
3946                                                       ScalarEvolution *SE,
3947                                                       const SCEV *Ptr) {
3948   // Address computations in vectorized code with non-consecutive addresses will
3949   // likely result in more instructions compared to scalar code where the
3950   // computation can more often be merged into the index mode. The resulting
3951   // extra micro-ops can significantly decrease throughput.
3952   const unsigned NumVectorInstToHideOverhead = 10;
3953 
3954   // Cost modeling of Strided Access Computation is hidden by the indexing
3955   // modes of X86 regardless of the stride value. We dont believe that there
3956   // is a difference between constant strided access in gerenal and constant
3957   // strided value which is less than or equal to 64.
3958   // Even in the case of (loop invariant) stride whose value is not known at
3959   // compile time, the address computation will not incur more than one extra
3960   // ADD instruction.
3961   if (Ty->isVectorTy() && SE) {
3962     if (!BaseT::isStridedAccess(Ptr))
3963       return NumVectorInstToHideOverhead;
3964     if (!BaseT::getConstantStrideStep(SE, Ptr))
3965       return 1;
3966   }
3967 
3968   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3969 }
3970 
3971 InstructionCost
3972 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3973                                        Optional<FastMathFlags> FMF,
3974                                        TTI::TargetCostKind CostKind) {
3975   if (TTI::requiresOrderedReduction(FMF))
3976     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3977 
3978   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3979   // and make it as the cost.
3980 
3981   static const CostTblEntry SLMCostTblNoPairWise[] = {
3982     { ISD::FADD,  MVT::v2f64,   3 },
3983     { ISD::ADD,   MVT::v2i64,   5 },
3984   };
3985 
3986   static const CostTblEntry SSE2CostTblNoPairWise[] = {
3987     { ISD::FADD,  MVT::v2f64,   2 },
3988     { ISD::FADD,  MVT::v2f32,   2 },
3989     { ISD::FADD,  MVT::v4f32,   4 },
3990     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
3991     { ISD::ADD,   MVT::v2i32,   2 }, // FIXME: chosen to be less than v4i32
3992     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
3993     { ISD::ADD,   MVT::v2i16,   2 },      // The data reported by the IACA tool is "4.3".
3994     { ISD::ADD,   MVT::v4i16,   3 },      // The data reported by the IACA tool is "4.3".
3995     { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
3996     { ISD::ADD,   MVT::v2i8,    2 },
3997     { ISD::ADD,   MVT::v4i8,    2 },
3998     { ISD::ADD,   MVT::v8i8,    2 },
3999     { ISD::ADD,   MVT::v16i8,   3 },
4000   };
4001 
4002   static const CostTblEntry AVX1CostTblNoPairWise[] = {
4003     { ISD::FADD,  MVT::v4f64,   3 },
4004     { ISD::FADD,  MVT::v4f32,   3 },
4005     { ISD::FADD,  MVT::v8f32,   4 },
4006     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
4007     { ISD::ADD,   MVT::v4i64,   3 },
4008     { ISD::ADD,   MVT::v8i32,   5 },
4009     { ISD::ADD,   MVT::v16i16,  5 },
4010     { ISD::ADD,   MVT::v32i8,   4 },
4011   };
4012 
4013   int ISD = TLI->InstructionOpcodeToISD(Opcode);
4014   assert(ISD && "Invalid opcode");
4015 
4016   // Before legalizing the type, give a chance to look up illegal narrow types
4017   // in the table.
4018   // FIXME: Is there a better way to do this?
4019   EVT VT = TLI->getValueType(DL, ValTy);
4020   if (VT.isSimple()) {
4021     MVT MTy = VT.getSimpleVT();
4022     if (ST->useSLMArithCosts())
4023       if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4024         return Entry->Cost;
4025 
4026     if (ST->hasAVX())
4027       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4028         return Entry->Cost;
4029 
4030     if (ST->hasSSE2())
4031       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4032         return Entry->Cost;
4033   }
4034 
4035   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4036 
4037   MVT MTy = LT.second;
4038 
4039   auto *ValVTy = cast<FixedVectorType>(ValTy);
4040 
4041   // Special case: vXi8 mul reductions are performed as vXi16.
4042   if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
4043     auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
4044     auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
4045     return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
4046                             TargetTransformInfo::CastContextHint::None,
4047                             CostKind) +
4048            getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
4049   }
4050 
4051   InstructionCost ArithmeticCost = 0;
4052   if (LT.first != 1 && MTy.isVector() &&
4053       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4054     // Type needs to be split. We need LT.first - 1 arithmetic ops.
4055     auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4056                                             MTy.getVectorNumElements());
4057     ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4058     ArithmeticCost *= LT.first - 1;
4059   }
4060 
4061   if (ST->useSLMArithCosts())
4062     if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4063       return ArithmeticCost + Entry->Cost;
4064 
4065   if (ST->hasAVX())
4066     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4067       return ArithmeticCost + Entry->Cost;
4068 
4069   if (ST->hasSSE2())
4070     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4071       return ArithmeticCost + Entry->Cost;
4072 
4073   // FIXME: These assume a naive kshift+binop lowering, which is probably
4074   // conservative in most cases.
4075   static const CostTblEntry AVX512BoolReduction[] = {
4076     { ISD::AND,  MVT::v2i1,   3 },
4077     { ISD::AND,  MVT::v4i1,   5 },
4078     { ISD::AND,  MVT::v8i1,   7 },
4079     { ISD::AND,  MVT::v16i1,  9 },
4080     { ISD::AND,  MVT::v32i1, 11 },
4081     { ISD::AND,  MVT::v64i1, 13 },
4082     { ISD::OR,   MVT::v2i1,   3 },
4083     { ISD::OR,   MVT::v4i1,   5 },
4084     { ISD::OR,   MVT::v8i1,   7 },
4085     { ISD::OR,   MVT::v16i1,  9 },
4086     { ISD::OR,   MVT::v32i1, 11 },
4087     { ISD::OR,   MVT::v64i1, 13 },
4088   };
4089 
4090   static const CostTblEntry AVX2BoolReduction[] = {
4091     { ISD::AND,  MVT::v16i16,  2 }, // vpmovmskb + cmp
4092     { ISD::AND,  MVT::v32i8,   2 }, // vpmovmskb + cmp
4093     { ISD::OR,   MVT::v16i16,  2 }, // vpmovmskb + cmp
4094     { ISD::OR,   MVT::v32i8,   2 }, // vpmovmskb + cmp
4095   };
4096 
4097   static const CostTblEntry AVX1BoolReduction[] = {
4098     { ISD::AND,  MVT::v4i64,   2 }, // vmovmskpd + cmp
4099     { ISD::AND,  MVT::v8i32,   2 }, // vmovmskps + cmp
4100     { ISD::AND,  MVT::v16i16,  4 }, // vextractf128 + vpand + vpmovmskb + cmp
4101     { ISD::AND,  MVT::v32i8,   4 }, // vextractf128 + vpand + vpmovmskb + cmp
4102     { ISD::OR,   MVT::v4i64,   2 }, // vmovmskpd + cmp
4103     { ISD::OR,   MVT::v8i32,   2 }, // vmovmskps + cmp
4104     { ISD::OR,   MVT::v16i16,  4 }, // vextractf128 + vpor + vpmovmskb + cmp
4105     { ISD::OR,   MVT::v32i8,   4 }, // vextractf128 + vpor + vpmovmskb + cmp
4106   };
4107 
4108   static const CostTblEntry SSE2BoolReduction[] = {
4109     { ISD::AND,  MVT::v2i64,   2 }, // movmskpd + cmp
4110     { ISD::AND,  MVT::v4i32,   2 }, // movmskps + cmp
4111     { ISD::AND,  MVT::v8i16,   2 }, // pmovmskb + cmp
4112     { ISD::AND,  MVT::v16i8,   2 }, // pmovmskb + cmp
4113     { ISD::OR,   MVT::v2i64,   2 }, // movmskpd + cmp
4114     { ISD::OR,   MVT::v4i32,   2 }, // movmskps + cmp
4115     { ISD::OR,   MVT::v8i16,   2 }, // pmovmskb + cmp
4116     { ISD::OR,   MVT::v16i8,   2 }, // pmovmskb + cmp
4117   };
4118 
4119   // Handle bool allof/anyof patterns.
4120   if (ValVTy->getElementType()->isIntegerTy(1)) {
4121     InstructionCost ArithmeticCost = 0;
4122     if (LT.first != 1 && MTy.isVector() &&
4123         MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4124       // Type needs to be split. We need LT.first - 1 arithmetic ops.
4125       auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4126                                               MTy.getVectorNumElements());
4127       ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4128       ArithmeticCost *= LT.first - 1;
4129     }
4130 
4131     if (ST->hasAVX512())
4132       if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
4133         return ArithmeticCost + Entry->Cost;
4134     if (ST->hasAVX2())
4135       if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
4136         return ArithmeticCost + Entry->Cost;
4137     if (ST->hasAVX())
4138       if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
4139         return ArithmeticCost + Entry->Cost;
4140     if (ST->hasSSE2())
4141       if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
4142         return ArithmeticCost + Entry->Cost;
4143 
4144     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4145   }
4146 
4147   unsigned NumVecElts = ValVTy->getNumElements();
4148   unsigned ScalarSize = ValVTy->getScalarSizeInBits();
4149 
4150   // Special case power of 2 reductions where the scalar type isn't changed
4151   // by type legalization.
4152   if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
4153     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4154 
4155   InstructionCost ReductionCost = 0;
4156 
4157   auto *Ty = ValVTy;
4158   if (LT.first != 1 && MTy.isVector() &&
4159       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4160     // Type needs to be split. We need LT.first - 1 arithmetic ops.
4161     Ty = FixedVectorType::get(ValVTy->getElementType(),
4162                               MTy.getVectorNumElements());
4163     ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
4164     ReductionCost *= LT.first - 1;
4165     NumVecElts = MTy.getVectorNumElements();
4166   }
4167 
4168   // Now handle reduction with the legal type, taking into account size changes
4169   // at each level.
4170   while (NumVecElts > 1) {
4171     // Determine the size of the remaining vector we need to reduce.
4172     unsigned Size = NumVecElts * ScalarSize;
4173     NumVecElts /= 2;
4174     // If we're reducing from 256/512 bits, use an extract_subvector.
4175     if (Size > 128) {
4176       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4177       ReductionCost +=
4178           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4179       Ty = SubTy;
4180     } else if (Size == 128) {
4181       // Reducing from 128 bits is a permute of v2f64/v2i64.
4182       FixedVectorType *ShufTy;
4183       if (ValVTy->isFloatingPointTy())
4184         ShufTy =
4185             FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
4186       else
4187         ShufTy =
4188             FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
4189       ReductionCost +=
4190           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4191     } else if (Size == 64) {
4192       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4193       FixedVectorType *ShufTy;
4194       if (ValVTy->isFloatingPointTy())
4195         ShufTy =
4196             FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
4197       else
4198         ShufTy =
4199             FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
4200       ReductionCost +=
4201           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4202     } else {
4203       // Reducing from smaller size is a shift by immediate.
4204       auto *ShiftTy = FixedVectorType::get(
4205           Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
4206       ReductionCost += getArithmeticInstrCost(
4207           Instruction::LShr, ShiftTy, CostKind,
4208           TargetTransformInfo::OK_AnyValue,
4209           TargetTransformInfo::OK_UniformConstantValue,
4210           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4211     }
4212 
4213     // Add the arithmetic op for this level.
4214     ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
4215   }
4216 
4217   // Add the final extract element to the cost.
4218   return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4219 }
4220 
4221 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
4222                                           bool IsUnsigned) {
4223   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
4224 
4225   MVT MTy = LT.second;
4226 
4227   int ISD;
4228   if (Ty->isIntOrIntVectorTy()) {
4229     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4230   } else {
4231     assert(Ty->isFPOrFPVectorTy() &&
4232            "Expected float point or integer vector type.");
4233     ISD = ISD::FMINNUM;
4234   }
4235 
4236   static const CostTblEntry SSE1CostTbl[] = {
4237     {ISD::FMINNUM, MVT::v4f32, 1},
4238   };
4239 
4240   static const CostTblEntry SSE2CostTbl[] = {
4241     {ISD::FMINNUM, MVT::v2f64, 1},
4242     {ISD::SMIN,    MVT::v8i16, 1},
4243     {ISD::UMIN,    MVT::v16i8, 1},
4244   };
4245 
4246   static const CostTblEntry SSE41CostTbl[] = {
4247     {ISD::SMIN,    MVT::v4i32, 1},
4248     {ISD::UMIN,    MVT::v4i32, 1},
4249     {ISD::UMIN,    MVT::v8i16, 1},
4250     {ISD::SMIN,    MVT::v16i8, 1},
4251   };
4252 
4253   static const CostTblEntry SSE42CostTbl[] = {
4254     {ISD::UMIN,    MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
4255   };
4256 
4257   static const CostTblEntry AVX1CostTbl[] = {
4258     {ISD::FMINNUM, MVT::v8f32,  1},
4259     {ISD::FMINNUM, MVT::v4f64,  1},
4260     {ISD::SMIN,    MVT::v8i32,  3},
4261     {ISD::UMIN,    MVT::v8i32,  3},
4262     {ISD::SMIN,    MVT::v16i16, 3},
4263     {ISD::UMIN,    MVT::v16i16, 3},
4264     {ISD::SMIN,    MVT::v32i8,  3},
4265     {ISD::UMIN,    MVT::v32i8,  3},
4266   };
4267 
4268   static const CostTblEntry AVX2CostTbl[] = {
4269     {ISD::SMIN,    MVT::v8i32,  1},
4270     {ISD::UMIN,    MVT::v8i32,  1},
4271     {ISD::SMIN,    MVT::v16i16, 1},
4272     {ISD::UMIN,    MVT::v16i16, 1},
4273     {ISD::SMIN,    MVT::v32i8,  1},
4274     {ISD::UMIN,    MVT::v32i8,  1},
4275   };
4276 
4277   static const CostTblEntry AVX512CostTbl[] = {
4278     {ISD::FMINNUM, MVT::v16f32, 1},
4279     {ISD::FMINNUM, MVT::v8f64,  1},
4280     {ISD::SMIN,    MVT::v2i64,  1},
4281     {ISD::UMIN,    MVT::v2i64,  1},
4282     {ISD::SMIN,    MVT::v4i64,  1},
4283     {ISD::UMIN,    MVT::v4i64,  1},
4284     {ISD::SMIN,    MVT::v8i64,  1},
4285     {ISD::UMIN,    MVT::v8i64,  1},
4286     {ISD::SMIN,    MVT::v16i32, 1},
4287     {ISD::UMIN,    MVT::v16i32, 1},
4288   };
4289 
4290   static const CostTblEntry AVX512BWCostTbl[] = {
4291     {ISD::SMIN,    MVT::v32i16, 1},
4292     {ISD::UMIN,    MVT::v32i16, 1},
4293     {ISD::SMIN,    MVT::v64i8,  1},
4294     {ISD::UMIN,    MVT::v64i8,  1},
4295   };
4296 
4297   // If we have a native MIN/MAX instruction for this type, use it.
4298   if (ST->hasBWI())
4299     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4300       return LT.first * Entry->Cost;
4301 
4302   if (ST->hasAVX512())
4303     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4304       return LT.first * Entry->Cost;
4305 
4306   if (ST->hasAVX2())
4307     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4308       return LT.first * Entry->Cost;
4309 
4310   if (ST->hasAVX())
4311     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4312       return LT.first * Entry->Cost;
4313 
4314   if (ST->hasSSE42())
4315     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4316       return LT.first * Entry->Cost;
4317 
4318   if (ST->hasSSE41())
4319     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4320       return LT.first * Entry->Cost;
4321 
4322   if (ST->hasSSE2())
4323     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4324       return LT.first * Entry->Cost;
4325 
4326   if (ST->hasSSE1())
4327     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4328       return LT.first * Entry->Cost;
4329 
4330   unsigned CmpOpcode;
4331   if (Ty->isFPOrFPVectorTy()) {
4332     CmpOpcode = Instruction::FCmp;
4333   } else {
4334     assert(Ty->isIntOrIntVectorTy() &&
4335            "expecting floating point or integer type for min/max reduction");
4336     CmpOpcode = Instruction::ICmp;
4337   }
4338 
4339   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4340   // Otherwise fall back to cmp+select.
4341   InstructionCost Result =
4342       getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
4343                          CostKind) +
4344       getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
4345                          CmpInst::BAD_ICMP_PREDICATE, CostKind);
4346   return Result;
4347 }
4348 
4349 InstructionCost
4350 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
4351                                    bool IsUnsigned,
4352                                    TTI::TargetCostKind CostKind) {
4353   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4354 
4355   MVT MTy = LT.second;
4356 
4357   int ISD;
4358   if (ValTy->isIntOrIntVectorTy()) {
4359     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4360   } else {
4361     assert(ValTy->isFPOrFPVectorTy() &&
4362            "Expected float point or integer vector type.");
4363     ISD = ISD::FMINNUM;
4364   }
4365 
4366   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4367   // and make it as the cost.
4368 
4369   static const CostTblEntry SSE2CostTblNoPairWise[] = {
4370       {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
4371       {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
4372       {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
4373   };
4374 
4375   static const CostTblEntry SSE41CostTblNoPairWise[] = {
4376       {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
4377       {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
4378       {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
4379       {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
4380       {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
4381       {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
4382       {ISD::SMIN, MVT::v2i8,  3}, // pminsb
4383       {ISD::SMIN, MVT::v4i8,  5}, // pminsb
4384       {ISD::SMIN, MVT::v8i8,  7}, // pminsb
4385       {ISD::SMIN, MVT::v16i8, 6},
4386       {ISD::UMIN, MVT::v2i8,  3}, // same as sse2
4387       {ISD::UMIN, MVT::v4i8,  5}, // same as sse2
4388       {ISD::UMIN, MVT::v8i8,  7}, // same as sse2
4389       {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
4390   };
4391 
4392   static const CostTblEntry AVX1CostTblNoPairWise[] = {
4393       {ISD::SMIN, MVT::v16i16, 6},
4394       {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
4395       {ISD::SMIN, MVT::v32i8, 8},
4396       {ISD::UMIN, MVT::v32i8, 8},
4397   };
4398 
4399   static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
4400       {ISD::SMIN, MVT::v32i16, 8},
4401       {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
4402       {ISD::SMIN, MVT::v64i8, 10},
4403       {ISD::UMIN, MVT::v64i8, 10},
4404   };
4405 
4406   // Before legalizing the type, give a chance to look up illegal narrow types
4407   // in the table.
4408   // FIXME: Is there a better way to do this?
4409   EVT VT = TLI->getValueType(DL, ValTy);
4410   if (VT.isSimple()) {
4411     MVT MTy = VT.getSimpleVT();
4412     if (ST->hasBWI())
4413       if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4414         return Entry->Cost;
4415 
4416     if (ST->hasAVX())
4417       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4418         return Entry->Cost;
4419 
4420     if (ST->hasSSE41())
4421       if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4422         return Entry->Cost;
4423 
4424     if (ST->hasSSE2())
4425       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4426         return Entry->Cost;
4427   }
4428 
4429   auto *ValVTy = cast<FixedVectorType>(ValTy);
4430   unsigned NumVecElts = ValVTy->getNumElements();
4431 
4432   auto *Ty = ValVTy;
4433   InstructionCost MinMaxCost = 0;
4434   if (LT.first != 1 && MTy.isVector() &&
4435       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4436     // Type needs to be split. We need LT.first - 1 operations ops.
4437     Ty = FixedVectorType::get(ValVTy->getElementType(),
4438                               MTy.getVectorNumElements());
4439     auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
4440                                            MTy.getVectorNumElements());
4441     MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4442     MinMaxCost *= LT.first - 1;
4443     NumVecElts = MTy.getVectorNumElements();
4444   }
4445 
4446   if (ST->hasBWI())
4447     if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4448       return MinMaxCost + Entry->Cost;
4449 
4450   if (ST->hasAVX())
4451     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4452       return MinMaxCost + Entry->Cost;
4453 
4454   if (ST->hasSSE41())
4455     if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4456       return MinMaxCost + Entry->Cost;
4457 
4458   if (ST->hasSSE2())
4459     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4460       return MinMaxCost + Entry->Cost;
4461 
4462   unsigned ScalarSize = ValTy->getScalarSizeInBits();
4463 
4464   // Special case power of 2 reductions where the scalar type isn't changed
4465   // by type legalization.
4466   if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4467       ScalarSize != MTy.getScalarSizeInBits())
4468     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
4469 
4470   // Now handle reduction with the legal type, taking into account size changes
4471   // at each level.
4472   while (NumVecElts > 1) {
4473     // Determine the size of the remaining vector we need to reduce.
4474     unsigned Size = NumVecElts * ScalarSize;
4475     NumVecElts /= 2;
4476     // If we're reducing from 256/512 bits, use an extract_subvector.
4477     if (Size > 128) {
4478       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4479       MinMaxCost +=
4480           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4481       Ty = SubTy;
4482     } else if (Size == 128) {
4483       // Reducing from 128 bits is a permute of v2f64/v2i64.
4484       VectorType *ShufTy;
4485       if (ValTy->isFloatingPointTy())
4486         ShufTy =
4487             FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4488       else
4489         ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4490       MinMaxCost +=
4491           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4492     } else if (Size == 64) {
4493       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4494       FixedVectorType *ShufTy;
4495       if (ValTy->isFloatingPointTy())
4496         ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4497       else
4498         ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4499       MinMaxCost +=
4500           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4501     } else {
4502       // Reducing from smaller size is a shift by immediate.
4503       auto *ShiftTy = FixedVectorType::get(
4504           Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4505       MinMaxCost += getArithmeticInstrCost(
4506           Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4507           TargetTransformInfo::OK_AnyValue,
4508           TargetTransformInfo::OK_UniformConstantValue,
4509           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4510     }
4511 
4512     // Add the arithmetic op for this level.
4513     auto *SubCondTy =
4514         FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4515     MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4516   }
4517 
4518   // Add the final extract element to the cost.
4519   return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4520 }
4521 
4522 /// Calculate the cost of materializing a 64-bit value. This helper
4523 /// method might only calculate a fraction of a larger immediate. Therefore it
4524 /// is valid to return a cost of ZERO.
4525 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4526   if (Val == 0)
4527     return TTI::TCC_Free;
4528 
4529   if (isInt<32>(Val))
4530     return TTI::TCC_Basic;
4531 
4532   return 2 * TTI::TCC_Basic;
4533 }
4534 
4535 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4536                                           TTI::TargetCostKind CostKind) {
4537   assert(Ty->isIntegerTy());
4538 
4539   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4540   if (BitSize == 0)
4541     return ~0U;
4542 
4543   // Never hoist constants larger than 128bit, because this might lead to
4544   // incorrect code generation or assertions in codegen.
4545   // Fixme: Create a cost model for types larger than i128 once the codegen
4546   // issues have been fixed.
4547   if (BitSize > 128)
4548     return TTI::TCC_Free;
4549 
4550   if (Imm == 0)
4551     return TTI::TCC_Free;
4552 
4553   // Sign-extend all constants to a multiple of 64-bit.
4554   APInt ImmVal = Imm;
4555   if (BitSize % 64 != 0)
4556     ImmVal = Imm.sext(alignTo(BitSize, 64));
4557 
4558   // Split the constant into 64-bit chunks and calculate the cost for each
4559   // chunk.
4560   InstructionCost Cost = 0;
4561   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4562     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4563     int64_t Val = Tmp.getSExtValue();
4564     Cost += getIntImmCost(Val);
4565   }
4566   // We need at least one instruction to materialize the constant.
4567   return std::max<InstructionCost>(1, Cost);
4568 }
4569 
4570 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4571                                               const APInt &Imm, Type *Ty,
4572                                               TTI::TargetCostKind CostKind,
4573                                               Instruction *Inst) {
4574   assert(Ty->isIntegerTy());
4575 
4576   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4577   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4578   // here, so that constant hoisting will ignore this constant.
4579   if (BitSize == 0)
4580     return TTI::TCC_Free;
4581 
4582   unsigned ImmIdx = ~0U;
4583   switch (Opcode) {
4584   default:
4585     return TTI::TCC_Free;
4586   case Instruction::GetElementPtr:
4587     // Always hoist the base address of a GetElementPtr. This prevents the
4588     // creation of new constants for every base constant that gets constant
4589     // folded with the offset.
4590     if (Idx == 0)
4591       return 2 * TTI::TCC_Basic;
4592     return TTI::TCC_Free;
4593   case Instruction::Store:
4594     ImmIdx = 0;
4595     break;
4596   case Instruction::ICmp:
4597     // This is an imperfect hack to prevent constant hoisting of
4598     // compares that might be trying to check if a 64-bit value fits in
4599     // 32-bits. The backend can optimize these cases using a right shift by 32.
4600     // Ideally we would check the compare predicate here. There also other
4601     // similar immediates the backend can use shifts for.
4602     if (Idx == 1 && Imm.getBitWidth() == 64) {
4603       uint64_t ImmVal = Imm.getZExtValue();
4604       if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4605         return TTI::TCC_Free;
4606     }
4607     ImmIdx = 1;
4608     break;
4609   case Instruction::And:
4610     // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4611     // by using a 32-bit operation with implicit zero extension. Detect such
4612     // immediates here as the normal path expects bit 31 to be sign extended.
4613     if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4614       return TTI::TCC_Free;
4615     ImmIdx = 1;
4616     break;
4617   case Instruction::Add:
4618   case Instruction::Sub:
4619     // For add/sub, we can use the opposite instruction for INT32_MIN.
4620     if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4621       return TTI::TCC_Free;
4622     ImmIdx = 1;
4623     break;
4624   case Instruction::UDiv:
4625   case Instruction::SDiv:
4626   case Instruction::URem:
4627   case Instruction::SRem:
4628     // Division by constant is typically expanded later into a different
4629     // instruction sequence. This completely changes the constants.
4630     // Report them as "free" to stop ConstantHoist from marking them as opaque.
4631     return TTI::TCC_Free;
4632   case Instruction::Mul:
4633   case Instruction::Or:
4634   case Instruction::Xor:
4635     ImmIdx = 1;
4636     break;
4637   // Always return TCC_Free for the shift value of a shift instruction.
4638   case Instruction::Shl:
4639   case Instruction::LShr:
4640   case Instruction::AShr:
4641     if (Idx == 1)
4642       return TTI::TCC_Free;
4643     break;
4644   case Instruction::Trunc:
4645   case Instruction::ZExt:
4646   case Instruction::SExt:
4647   case Instruction::IntToPtr:
4648   case Instruction::PtrToInt:
4649   case Instruction::BitCast:
4650   case Instruction::PHI:
4651   case Instruction::Call:
4652   case Instruction::Select:
4653   case Instruction::Ret:
4654   case Instruction::Load:
4655     break;
4656   }
4657 
4658   if (Idx == ImmIdx) {
4659     int NumConstants = divideCeil(BitSize, 64);
4660     InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4661     return (Cost <= NumConstants * TTI::TCC_Basic)
4662                ? static_cast<int>(TTI::TCC_Free)
4663                : Cost;
4664   }
4665 
4666   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4667 }
4668 
4669 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4670                                                 const APInt &Imm, Type *Ty,
4671                                                 TTI::TargetCostKind CostKind) {
4672   assert(Ty->isIntegerTy());
4673 
4674   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4675   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4676   // here, so that constant hoisting will ignore this constant.
4677   if (BitSize == 0)
4678     return TTI::TCC_Free;
4679 
4680   switch (IID) {
4681   default:
4682     return TTI::TCC_Free;
4683   case Intrinsic::sadd_with_overflow:
4684   case Intrinsic::uadd_with_overflow:
4685   case Intrinsic::ssub_with_overflow:
4686   case Intrinsic::usub_with_overflow:
4687   case Intrinsic::smul_with_overflow:
4688   case Intrinsic::umul_with_overflow:
4689     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4690       return TTI::TCC_Free;
4691     break;
4692   case Intrinsic::experimental_stackmap:
4693     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4694       return TTI::TCC_Free;
4695     break;
4696   case Intrinsic::experimental_patchpoint_void:
4697   case Intrinsic::experimental_patchpoint_i64:
4698     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4699       return TTI::TCC_Free;
4700     break;
4701   }
4702   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4703 }
4704 
4705 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4706                                            TTI::TargetCostKind CostKind,
4707                                            const Instruction *I) {
4708   if (CostKind != TTI::TCK_RecipThroughput)
4709     return Opcode == Instruction::PHI ? 0 : 1;
4710   // Branches are assumed to be predicted.
4711   return 0;
4712 }
4713 
4714 int X86TTIImpl::getGatherOverhead() const {
4715   // Some CPUs have more overhead for gather. The specified overhead is relative
4716   // to the Load operation. "2" is the number provided by Intel architects. This
4717   // parameter is used for cost estimation of Gather Op and comparison with
4718   // other alternatives.
4719   // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4720   // enable gather with a -march.
4721   if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4722     return 2;
4723 
4724   return 1024;
4725 }
4726 
4727 int X86TTIImpl::getScatterOverhead() const {
4728   if (ST->hasAVX512())
4729     return 2;
4730 
4731   return 1024;
4732 }
4733 
4734 // Return an average cost of Gather / Scatter instruction, maybe improved later.
4735 // FIXME: Add TargetCostKind support.
4736 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4737                                             const Value *Ptr, Align Alignment,
4738                                             unsigned AddressSpace) {
4739 
4740   assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
4741   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4742 
4743   // Try to reduce index size from 64 bit (default for GEP)
4744   // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4745   // operation will use 16 x 64 indices which do not fit in a zmm and needs
4746   // to split. Also check that the base pointer is the same for all lanes,
4747   // and that there's at most one variable index.
4748   auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4749     unsigned IndexSize = DL.getPointerSizeInBits();
4750     const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4751     if (IndexSize < 64 || !GEP)
4752       return IndexSize;
4753 
4754     unsigned NumOfVarIndices = 0;
4755     const Value *Ptrs = GEP->getPointerOperand();
4756     if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4757       return IndexSize;
4758     for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4759       if (isa<Constant>(GEP->getOperand(i)))
4760         continue;
4761       Type *IndxTy = GEP->getOperand(i)->getType();
4762       if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4763         IndxTy = IndexVTy->getElementType();
4764       if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4765           !isa<SExtInst>(GEP->getOperand(i))) ||
4766          ++NumOfVarIndices > 1)
4767         return IndexSize; // 64
4768     }
4769     return (unsigned)32;
4770   };
4771 
4772   // Trying to reduce IndexSize to 32 bits for vector 16.
4773   // By default the IndexSize is equal to pointer size.
4774   unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4775                            ? getIndexSizeInBits(Ptr, DL)
4776                            : DL.getPointerSizeInBits();
4777 
4778   auto *IndexVTy = FixedVectorType::get(
4779       IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4780   std::pair<InstructionCost, MVT> IdxsLT =
4781       TLI->getTypeLegalizationCost(DL, IndexVTy);
4782   std::pair<InstructionCost, MVT> SrcLT =
4783       TLI->getTypeLegalizationCost(DL, SrcVTy);
4784   InstructionCost::CostType SplitFactor =
4785       *std::max(IdxsLT.first, SrcLT.first).getValue();
4786   if (SplitFactor > 1) {
4787     // Handle splitting of vector of pointers
4788     auto *SplitSrcTy =
4789         FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4790     return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4791                                          AddressSpace);
4792   }
4793 
4794   // The gather / scatter cost is given by Intel architects. It is a rough
4795   // number since we are looking at one instruction in a time.
4796   const int GSOverhead = (Opcode == Instruction::Load)
4797                              ? getGatherOverhead()
4798                              : getScatterOverhead();
4799   return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4800                                            MaybeAlign(Alignment), AddressSpace,
4801                                            TTI::TCK_RecipThroughput);
4802 }
4803 
4804 /// Return the cost of full scalarization of gather / scatter operation.
4805 ///
4806 /// Opcode - Load or Store instruction.
4807 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4808 /// VariableMask - The mask is non-constant at compile time.
4809 /// Alignment - Alignment for one element.
4810 /// AddressSpace - pointer[s] address space.
4811 ///
4812 /// FIXME: Add TargetCostKind support.
4813 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4814                                             bool VariableMask, Align Alignment,
4815                                             unsigned AddressSpace) {
4816   Type *ScalarTy = SrcVTy->getScalarType();
4817   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4818   APInt DemandedElts = APInt::getAllOnes(VF);
4819   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4820 
4821   InstructionCost MaskUnpackCost = 0;
4822   if (VariableMask) {
4823     auto *MaskTy =
4824         FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4825     MaskUnpackCost = getScalarizationOverhead(
4826         MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true);
4827     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4828         Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4829         CmpInst::BAD_ICMP_PREDICATE, CostKind);
4830     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4831     MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4832   }
4833 
4834   InstructionCost AddressUnpackCost = getScalarizationOverhead(
4835       FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts,
4836       /*Insert=*/false, /*Extract=*/true);
4837 
4838   // The cost of the scalar loads/stores.
4839   InstructionCost MemoryOpCost =
4840       VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment),
4841                            AddressSpace, CostKind);
4842 
4843   // The cost of forming the vector from loaded scalars/
4844   // scalarizing the vector to perform scalar stores.
4845   InstructionCost InsertExtractCost =
4846       getScalarizationOverhead(cast<FixedVectorType>(SrcVTy), DemandedElts,
4847                                /*Insert=*/Opcode == Instruction::Load,
4848                                /*Extract=*/Opcode == Instruction::Store);
4849 
4850   return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4851 }
4852 
4853 /// Calculate the cost of Gather / Scatter operation
4854 InstructionCost X86TTIImpl::getGatherScatterOpCost(
4855     unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4856     Align Alignment, TTI::TargetCostKind CostKind,
4857     const Instruction *I = nullptr) {
4858   if (CostKind != TTI::TCK_RecipThroughput) {
4859     if ((Opcode == Instruction::Load &&
4860          isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4861         (Opcode == Instruction::Store &&
4862          isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4863       return 1;
4864     return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4865                                          Alignment, CostKind, I);
4866   }
4867 
4868   assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
4869   PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4870   if (!PtrTy && Ptr->getType()->isVectorTy())
4871     PtrTy = dyn_cast<PointerType>(
4872         cast<VectorType>(Ptr->getType())->getElementType());
4873   assert(PtrTy && "Unexpected type for Ptr argument");
4874   unsigned AddressSpace = PtrTy->getAddressSpace();
4875 
4876   if ((Opcode == Instruction::Load &&
4877        !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4878       (Opcode == Instruction::Store &&
4879        !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4880     return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4881                            AddressSpace);
4882 
4883   return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4884 }
4885 
4886 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4887                                TargetTransformInfo::LSRCost &C2) {
4888     // X86 specific here are "instruction number 1st priority".
4889     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4890                     C1.NumIVMuls, C1.NumBaseAdds,
4891                     C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4892            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4893                     C2.NumIVMuls, C2.NumBaseAdds,
4894                     C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4895 }
4896 
4897 bool X86TTIImpl::canMacroFuseCmp() {
4898   return ST->hasMacroFusion() || ST->hasBranchFusion();
4899 }
4900 
4901 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4902   if (!ST->hasAVX())
4903     return false;
4904 
4905   // The backend can't handle a single element vector.
4906   if (isa<VectorType>(DataTy) &&
4907       cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4908     return false;
4909   Type *ScalarTy = DataTy->getScalarType();
4910 
4911   if (ScalarTy->isPointerTy())
4912     return true;
4913 
4914   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4915     return true;
4916 
4917   if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16())
4918     return true;
4919 
4920   if (!ScalarTy->isIntegerTy())
4921     return false;
4922 
4923   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4924   return IntWidth == 32 || IntWidth == 64 ||
4925          ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4926 }
4927 
4928 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4929   return isLegalMaskedLoad(DataType, Alignment);
4930 }
4931 
4932 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4933   unsigned DataSize = DL.getTypeStoreSize(DataType);
4934   // The only supported nontemporal loads are for aligned vectors of 16 or 32
4935   // bytes.  Note that 32-byte nontemporal vector loads are supported by AVX2
4936   // (the equivalent stores only require AVX).
4937   if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4938     return DataSize == 16 ?  ST->hasSSE1() : ST->hasAVX2();
4939 
4940   return false;
4941 }
4942 
4943 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4944   unsigned DataSize = DL.getTypeStoreSize(DataType);
4945 
4946   // SSE4A supports nontemporal stores of float and double at arbitrary
4947   // alignment.
4948   if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4949     return true;
4950 
4951   // Besides the SSE4A subtarget exception above, only aligned stores are
4952   // available nontemporaly on any other subtarget.  And only stores with a size
4953   // of 4..32 bytes (powers of 2, only) are permitted.
4954   if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4955       !isPowerOf2_32(DataSize))
4956     return false;
4957 
4958   // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4959   // loads require AVX2).
4960   if (DataSize == 32)
4961     return ST->hasAVX();
4962   if (DataSize == 16)
4963     return ST->hasSSE1();
4964   return true;
4965 }
4966 
4967 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4968   if (!isa<VectorType>(DataTy))
4969     return false;
4970 
4971   if (!ST->hasAVX512())
4972     return false;
4973 
4974   // The backend can't handle a single element vector.
4975   if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4976     return false;
4977 
4978   Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4979 
4980   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4981     return true;
4982 
4983   if (!ScalarTy->isIntegerTy())
4984     return false;
4985 
4986   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4987   return IntWidth == 32 || IntWidth == 64 ||
4988          ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4989 }
4990 
4991 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4992   return isLegalMaskedExpandLoad(DataTy);
4993 }
4994 
4995 bool X86TTIImpl::supportsGather() const {
4996   // Some CPUs have better gather performance than others.
4997   // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4998   // enable gather with a -march.
4999   return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2());
5000 }
5001 
5002 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
5003   if (!supportsGather())
5004     return false;
5005 
5006   // This function is called now in two cases: from the Loop Vectorizer
5007   // and from the Scalarizer.
5008   // When the Loop Vectorizer asks about legality of the feature,
5009   // the vectorization factor is not calculated yet. The Loop Vectorizer
5010   // sends a scalar type and the decision is based on the width of the
5011   // scalar element.
5012   // Later on, the cost model will estimate usage this intrinsic based on
5013   // the vector type.
5014   // The Scalarizer asks again about legality. It sends a vector type.
5015   // In this case we can reject non-power-of-2 vectors.
5016   // We also reject single element vectors as the type legalizer can't
5017   // scalarize it.
5018   if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
5019     unsigned NumElts = DataVTy->getNumElements();
5020     if (NumElts == 1)
5021       return false;
5022     // Gather / Scatter for vector 2 is not profitable on KNL / SKX
5023     // Vector-4 of gather/scatter instruction does not exist on KNL.
5024     // We can extend it to 8 elements, but zeroing upper bits of
5025     // the mask vector will add more instructions. Right now we give the scalar
5026     // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter
5027     // instruction is better in the VariableMask case.
5028     if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())))
5029       return false;
5030   }
5031   Type *ScalarTy = DataTy->getScalarType();
5032   if (ScalarTy->isPointerTy())
5033     return true;
5034 
5035   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5036     return true;
5037 
5038   if (!ScalarTy->isIntegerTy())
5039     return false;
5040 
5041   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5042   return IntWidth == 32 || IntWidth == 64;
5043 }
5044 
5045 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
5046   // AVX2 doesn't support scatter
5047   if (!ST->hasAVX512())
5048     return false;
5049   return isLegalMaskedGather(DataType, Alignment);
5050 }
5051 
5052 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
5053   EVT VT = TLI->getValueType(DL, DataType);
5054   return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
5055 }
5056 
5057 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
5058   return false;
5059 }
5060 
5061 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
5062                                      const Function *Callee) const {
5063   const TargetMachine &TM = getTLI()->getTargetMachine();
5064 
5065   // Work this as a subsetting of subtarget features.
5066   const FeatureBitset &CallerBits =
5067       TM.getSubtargetImpl(*Caller)->getFeatureBits();
5068   const FeatureBitset &CalleeBits =
5069       TM.getSubtargetImpl(*Callee)->getFeatureBits();
5070 
5071   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
5072   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
5073   return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
5074 }
5075 
5076 bool X86TTIImpl::areFunctionArgsABICompatible(
5077     const Function *Caller, const Function *Callee,
5078     SmallPtrSetImpl<Argument *> &Args) const {
5079   if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
5080     return false;
5081 
5082   // If we get here, we know the target features match. If one function
5083   // considers 512-bit vectors legal and the other does not, consider them
5084   // incompatible.
5085   const TargetMachine &TM = getTLI()->getTargetMachine();
5086 
5087   if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
5088       TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
5089     return true;
5090 
5091   // Consider the arguments compatible if they aren't vectors or aggregates.
5092   // FIXME: Look at the size of vectors.
5093   // FIXME: Look at the element types of aggregates to see if there are vectors.
5094   // FIXME: The API of this function seems intended to allow arguments
5095   // to be removed from the set, but the caller doesn't check if the set
5096   // becomes empty so that may not work in practice.
5097   return llvm::none_of(Args, [](Argument *A) {
5098     auto *EltTy = cast<PointerType>(A->getType())->getElementType();
5099     return EltTy->isVectorTy() || EltTy->isAggregateType();
5100   });
5101 }
5102 
5103 X86TTIImpl::TTI::MemCmpExpansionOptions
5104 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
5105   TTI::MemCmpExpansionOptions Options;
5106   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
5107   Options.NumLoadsPerBlock = 2;
5108   // All GPR and vector loads can be unaligned.
5109   Options.AllowOverlappingLoads = true;
5110   if (IsZeroCmp) {
5111     // Only enable vector loads for equality comparison. Right now the vector
5112     // version is not as fast for three way compare (see #33329).
5113     const unsigned PreferredWidth = ST->getPreferVectorWidth();
5114     if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
5115     if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
5116     if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
5117   }
5118   if (ST->is64Bit()) {
5119     Options.LoadSizes.push_back(8);
5120   }
5121   Options.LoadSizes.push_back(4);
5122   Options.LoadSizes.push_back(2);
5123   Options.LoadSizes.push_back(1);
5124   return Options;
5125 }
5126 
5127 bool X86TTIImpl::prefersVectorizedAddressing() const {
5128   return supportsGather();
5129 }
5130 
5131 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const {
5132   return false;
5133 }
5134 
5135 bool X86TTIImpl::enableInterleavedAccessVectorization() {
5136   // TODO: We expect this to be beneficial regardless of arch,
5137   // but there are currently some unexplained performance artifacts on Atom.
5138   // As a temporary solution, disable on Atom.
5139   return !(ST->isAtom());
5140 }
5141 
5142 // Get estimation for interleaved load/store operations and strided load.
5143 // \p Indices contains indices for strided load.
5144 // \p Factor - the factor of interleaving.
5145 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
5146 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
5147     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
5148     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
5149     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
5150   // VecTy for interleave memop is <VF*Factor x Elt>.
5151   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5152   // VecTy = <12 x i32>.
5153 
5154   // Calculate the number of memory operations (NumOfMemOps), required
5155   // for load/store the VecTy.
5156   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5157   unsigned VecTySize = DL.getTypeStoreSize(VecTy);
5158   unsigned LegalVTSize = LegalVT.getStoreSize();
5159   unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
5160 
5161   // Get the cost of one memory operation.
5162   auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
5163                                              LegalVT.getVectorNumElements());
5164   InstructionCost MemOpCost;
5165   if (UseMaskForCond || UseMaskForGaps)
5166     MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment,
5167                                       AddressSpace, CostKind);
5168   else
5169     MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment),
5170                                 AddressSpace, CostKind);
5171 
5172   unsigned VF = VecTy->getNumElements() / Factor;
5173   MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
5174 
5175   // FIXME: this is the most conservative estimate for the mask cost.
5176   InstructionCost MaskCost;
5177   if (UseMaskForCond || UseMaskForGaps) {
5178     APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements());
5179     for (unsigned Index : Indices) {
5180       assert(Index < Factor && "Invalid index for interleaved memory op");
5181       for (unsigned Elm = 0; Elm < VF; Elm++)
5182         DemandedLoadStoreElts.setBit(Index + Elm * Factor);
5183     }
5184 
5185     Type *I8Type = Type::getInt8Ty(VecTy->getContext());
5186 
5187     MaskCost = getReplicationShuffleCost(
5188         I8Type, Factor, VF,
5189         UseMaskForGaps ? DemandedLoadStoreElts
5190                        : APInt::getAllOnes(VecTy->getNumElements()),
5191         CostKind);
5192 
5193     // The Gaps mask is invariant and created outside the loop, therefore the
5194     // cost of creating it is not accounted for here. However if we have both
5195     // a MaskForGaps and some other mask that guards the execution of the
5196     // memory access, we need to account for the cost of And-ing the two masks
5197     // inside the loop.
5198     if (UseMaskForGaps) {
5199       auto *MaskVT = FixedVectorType::get(I8Type, VecTy->getNumElements());
5200       MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind);
5201     }
5202   }
5203 
5204   if (Opcode == Instruction::Load) {
5205     // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
5206     // contain the cost of the optimized shuffle sequence that the
5207     // X86InterleavedAccess pass will generate.
5208     // The cost of loads and stores are computed separately from the table.
5209 
5210     // X86InterleavedAccess support only the following interleaved-access group.
5211     static const CostTblEntry AVX512InterleavedLoadTbl[] = {
5212         {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
5213         {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
5214         {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
5215     };
5216 
5217     if (const auto *Entry =
5218             CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
5219       return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
5220     //If an entry does not exist, fallback to the default implementation.
5221 
5222     // Kind of shuffle depends on number of loaded values.
5223     // If we load the entire data in one register, we can use a 1-src shuffle.
5224     // Otherwise, we'll merge 2 sources in each operation.
5225     TTI::ShuffleKind ShuffleKind =
5226         (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
5227 
5228     InstructionCost ShuffleCost =
5229         getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
5230 
5231     unsigned NumOfLoadsInInterleaveGrp =
5232         Indices.size() ? Indices.size() : Factor;
5233     auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
5234                                           VecTy->getNumElements() / Factor);
5235     InstructionCost NumOfResults =
5236         getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
5237         NumOfLoadsInInterleaveGrp;
5238 
5239     // About a half of the loads may be folded in shuffles when we have only
5240     // one result. If we have more than one result, we do not fold loads at all.
5241     unsigned NumOfUnfoldedLoads =
5242         NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
5243 
5244     // Get a number of shuffle operations per result.
5245     unsigned NumOfShufflesPerResult =
5246         std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
5247 
5248     // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5249     // When we have more than one destination, we need additional instructions
5250     // to keep sources.
5251     InstructionCost NumOfMoves = 0;
5252     if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
5253       NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
5254 
5255     InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
5256                            MaskCost + NumOfUnfoldedLoads * MemOpCost +
5257                            NumOfMoves;
5258 
5259     return Cost;
5260   }
5261 
5262   // Store.
5263   assert(Opcode == Instruction::Store &&
5264          "Expected Store Instruction at this  point");
5265   // X86InterleavedAccess support only the following interleaved-access group.
5266   static const CostTblEntry AVX512InterleavedStoreTbl[] = {
5267       {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
5268       {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
5269       {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
5270 
5271       {4, MVT::v8i8, 10},  // interleave 4 x 8i8  into 32i8  (and store)
5272       {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8  (and store)
5273       {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
5274       {4, MVT::v64i8, 24}  // interleave 4 x 32i8 into 256i8 (and store)
5275   };
5276 
5277   if (const auto *Entry =
5278           CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
5279     return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
5280   //If an entry does not exist, fallback to the default implementation.
5281 
5282   // There is no strided stores meanwhile. And store can't be folded in
5283   // shuffle.
5284   unsigned NumOfSources = Factor; // The number of values to be merged.
5285   InstructionCost ShuffleCost =
5286       getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
5287   unsigned NumOfShufflesPerStore = NumOfSources - 1;
5288 
5289   // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5290   // We need additional instructions to keep sources.
5291   unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
5292   InstructionCost Cost =
5293       MaskCost +
5294       NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
5295       NumOfMoves;
5296   return Cost;
5297 }
5298 
5299 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
5300     unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
5301     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
5302     bool UseMaskForCond, bool UseMaskForGaps) {
5303   auto *VecTy = cast<FixedVectorType>(BaseTy);
5304 
5305   auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) {
5306     Type *EltTy = cast<VectorType>(VecTy)->getElementType();
5307     if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
5308         EltTy->isIntegerTy(32) || EltTy->isPointerTy())
5309       return true;
5310     if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) ||
5311         (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy()))
5312       return HasBW;
5313     return false;
5314   };
5315   if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
5316     return getInterleavedMemoryOpCostAVX512(
5317         Opcode, VecTy, Factor, Indices, Alignment,
5318         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5319 
5320   if (UseMaskForCond || UseMaskForGaps)
5321     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5322                                              Alignment, AddressSpace, CostKind,
5323                                              UseMaskForCond, UseMaskForGaps);
5324 
5325   // Get estimation for interleaved load/store operations for SSE-AVX2.
5326   // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow
5327   // computing the cost using a generic formula as a function of generic
5328   // shuffles. We therefore use a lookup table instead, filled according to
5329   // the instruction sequences that codegen currently generates.
5330 
5331   // VecTy for interleave memop is <VF*Factor x Elt>.
5332   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5333   // VecTy = <12 x i32>.
5334   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5335 
5336   // This function can be called with VecTy=<6xi128>, Factor=3, in which case
5337   // the VF=2, while v2i128 is an unsupported MVT vector type
5338   // (see MachineValueType.h::getVectorVT()).
5339   if (!LegalVT.isVector())
5340     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5341                                              Alignment, AddressSpace, CostKind);
5342 
5343   unsigned VF = VecTy->getNumElements() / Factor;
5344   Type *ScalarTy = VecTy->getElementType();
5345   // Deduplicate entries, model floats/pointers as appropriately-sized integers.
5346   if (!ScalarTy->isIntegerTy())
5347     ScalarTy =
5348         Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
5349 
5350   // Get the cost of all the memory operations.
5351   // FIXME: discount dead loads.
5352   InstructionCost MemOpCosts = getMemoryOpCost(
5353       Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5354 
5355   auto *VT = FixedVectorType::get(ScalarTy, VF);
5356   EVT ETy = TLI->getValueType(DL, VT);
5357   if (!ETy.isSimple())
5358     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5359                                              Alignment, AddressSpace, CostKind);
5360 
5361   // TODO: Complete for other data-types and strides.
5362   // Each combination of Stride, element bit width and VF results in a different
5363   // sequence; The cost tables are therefore accessed with:
5364   // Factor (stride) and VectorType=VFxiN.
5365   // The Cost accounts only for the shuffle sequence;
5366   // The cost of the loads/stores is accounted for separately.
5367   //
5368   static const CostTblEntry AVX2InterleavedLoadTbl[] = {
5369       {2, MVT::v2i8, 2},  // (load 4i8 and) deinterleave into 2 x 2i8
5370       {2, MVT::v4i8, 2},  // (load 8i8 and) deinterleave into 2 x 4i8
5371       {2, MVT::v8i8, 2},  // (load 16i8 and) deinterleave into 2 x 8i8
5372       {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8
5373       {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8
5374 
5375       {2, MVT::v8i16, 6},   // (load 16i16 and) deinterleave into 2 x 8i16
5376       {2, MVT::v16i16, 9},  // (load 32i16 and) deinterleave into 2 x 16i16
5377       {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16
5378 
5379       {2, MVT::v8i32, 4},   // (load 16i32 and) deinterleave into 2 x 8i32
5380       {2, MVT::v16i32, 8},  // (load 32i32 and) deinterleave into 2 x 16i32
5381       {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32
5382 
5383       {2, MVT::v4i64, 4},   // (load 8i64 and) deinterleave into 2 x 4i64
5384       {2, MVT::v8i64, 8},   // (load 16i64 and) deinterleave into 2 x 8i64
5385       {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64
5386       {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64
5387 
5388       {3, MVT::v2i8, 3},   // (load 6i8 and) deinterleave into 3 x 2i8
5389       {3, MVT::v4i8, 3},   // (load 12i8 and) deinterleave into 3 x 4i8
5390       {3, MVT::v8i8, 6},   // (load 24i8 and) deinterleave into 3 x 8i8
5391       {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
5392       {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8
5393 
5394       {3, MVT::v2i16, 5},   // (load 6i16 and) deinterleave into 3 x 2i16
5395       {3, MVT::v4i16, 7},   // (load 12i16 and) deinterleave into 3 x 4i16
5396       {3, MVT::v8i16, 9},   // (load 24i16 and) deinterleave into 3 x 8i16
5397       {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16
5398       {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16
5399 
5400       {3, MVT::v2i32, 3},   // (load 6i32 and) deinterleave into 3 x 2i32
5401       {3, MVT::v4i32, 3},   // (load 12i32 and) deinterleave into 3 x 4i32
5402       {3, MVT::v8i32, 7},   // (load 24i32 and) deinterleave into 3 x 8i32
5403       {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32
5404       {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32
5405 
5406       {3, MVT::v2i64, 1},   // (load 6i64 and) deinterleave into 3 x 2i64
5407       {3, MVT::v4i64, 5},   // (load 12i64 and) deinterleave into 3 x 4i64
5408       {3, MVT::v8i64, 10},  // (load 24i64 and) deinterleave into 3 x 8i64
5409       {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64
5410 
5411       {4, MVT::v2i8, 4},   // (load 8i8 and) deinterleave into 4 x 2i8
5412       {4, MVT::v4i8, 4},   // (load 16i8 and) deinterleave into 4 x 4i8
5413       {4, MVT::v8i8, 12},  // (load 32i8 and) deinterleave into 4 x 8i8
5414       {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8
5415       {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8
5416 
5417       {4, MVT::v2i16, 6},    // (load 8i16 and) deinterleave into 4 x 2i16
5418       {4, MVT::v4i16, 17},   // (load 16i16 and) deinterleave into 4 x 4i16
5419       {4, MVT::v8i16, 33},   // (load 32i16 and) deinterleave into 4 x 8i16
5420       {4, MVT::v16i16, 75},  // (load 64i16 and) deinterleave into 4 x 16i16
5421       {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16
5422 
5423       {4, MVT::v2i32, 4},   // (load 8i32 and) deinterleave into 4 x 2i32
5424       {4, MVT::v4i32, 8},   // (load 16i32 and) deinterleave into 4 x 4i32
5425       {4, MVT::v8i32, 16},  // (load 32i32 and) deinterleave into 4 x 8i32
5426       {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32
5427       {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32
5428 
5429       {4, MVT::v2i64, 6},  // (load 8i64 and) deinterleave into 4 x 2i64
5430       {4, MVT::v4i64, 8},  // (load 16i64 and) deinterleave into 4 x 4i64
5431       {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64
5432       {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64
5433 
5434       {6, MVT::v2i8, 6},   // (load 12i8 and) deinterleave into 6 x 2i8
5435       {6, MVT::v4i8, 14},  // (load 24i8 and) deinterleave into 6 x 4i8
5436       {6, MVT::v8i8, 18},  // (load 48i8 and) deinterleave into 6 x 8i8
5437       {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8
5438       {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8
5439 
5440       {6, MVT::v2i16, 13},   // (load 12i16 and) deinterleave into 6 x 2i16
5441       {6, MVT::v4i16, 9},    // (load 24i16 and) deinterleave into 6 x 4i16
5442       {6, MVT::v8i16, 39},   // (load 48i16 and) deinterleave into 6 x 8i16
5443       {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16
5444       {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16
5445 
5446       {6, MVT::v2i32, 6},   // (load 12i32 and) deinterleave into 6 x 2i32
5447       {6, MVT::v4i32, 15},  // (load 24i32 and) deinterleave into 6 x 4i32
5448       {6, MVT::v8i32, 31},  // (load 48i32 and) deinterleave into 6 x 8i32
5449       {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32
5450 
5451       {6, MVT::v2i64, 6},  // (load 12i64 and) deinterleave into 6 x 2i64
5452       {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64
5453       {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64
5454 
5455       {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
5456   };
5457 
5458   static const CostTblEntry SSSE3InterleavedLoadTbl[] = {
5459       {2, MVT::v4i16, 2},   // (load 8i16 and) deinterleave into 2 x 4i16
5460   };
5461 
5462   static const CostTblEntry SSE2InterleavedLoadTbl[] = {
5463       {2, MVT::v2i16, 2},   // (load 4i16 and) deinterleave into 2 x 2i16
5464       {2, MVT::v4i16, 7},   // (load 8i16 and) deinterleave into 2 x 4i16
5465 
5466       {2, MVT::v2i32, 2},   // (load 4i32 and) deinterleave into 2 x 2i32
5467       {2, MVT::v4i32, 2},   // (load 8i32 and) deinterleave into 2 x 4i32
5468 
5469       {2, MVT::v2i64, 2},   // (load 4i64 and) deinterleave into 2 x 2i64
5470   };
5471 
5472   static const CostTblEntry AVX2InterleavedStoreTbl[] = {
5473       {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store)
5474       {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store)
5475 
5476       {2, MVT::v8i16, 3},  // interleave 2 x 8i16 into 16i16 (and store)
5477       {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store)
5478       {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store)
5479 
5480       {2, MVT::v4i32, 2},   // interleave 2 x 4i32 into 8i32 (and store)
5481       {2, MVT::v8i32, 4},   // interleave 2 x 8i32 into 16i32 (and store)
5482       {2, MVT::v16i32, 8},  // interleave 2 x 16i32 into 32i32 (and store)
5483       {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store)
5484 
5485       {2, MVT::v2i64, 2},   // interleave 2 x 2i64 into 4i64 (and store)
5486       {2, MVT::v4i64, 4},   // interleave 2 x 4i64 into 8i64 (and store)
5487       {2, MVT::v8i64, 8},   // interleave 2 x 8i64 into 16i64 (and store)
5488       {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store)
5489       {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store)
5490 
5491       {3, MVT::v2i8, 4},   // interleave 3 x 2i8 into 6i8 (and store)
5492       {3, MVT::v4i8, 4},   // interleave 3 x 4i8 into 12i8 (and store)
5493       {3, MVT::v8i8, 6},   // interleave 3 x 8i8 into 24i8 (and store)
5494       {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
5495       {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
5496 
5497       {3, MVT::v2i16, 4},   // interleave 3 x 2i16 into 6i16 (and store)
5498       {3, MVT::v4i16, 6},   // interleave 3 x 4i16 into 12i16 (and store)
5499       {3, MVT::v8i16, 12},  // interleave 3 x 8i16 into 24i16 (and store)
5500       {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store)
5501       {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store)
5502 
5503       {3, MVT::v2i32, 4},   // interleave 3 x 2i32 into 6i32 (and store)
5504       {3, MVT::v4i32, 5},   // interleave 3 x 4i32 into 12i32 (and store)
5505       {3, MVT::v8i32, 11},  // interleave 3 x 8i32 into 24i32 (and store)
5506       {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store)
5507       {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store)
5508 
5509       {3, MVT::v2i64, 4},   // interleave 3 x 2i64 into 6i64 (and store)
5510       {3, MVT::v4i64, 6},   // interleave 3 x 4i64 into 12i64 (and store)
5511       {3, MVT::v8i64, 12},  // interleave 3 x 8i64 into 24i64 (and store)
5512       {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store)
5513 
5514       {4, MVT::v2i8, 4},   // interleave 4 x 2i8 into 8i8 (and store)
5515       {4, MVT::v4i8, 4},   // interleave 4 x 4i8 into 16i8 (and store)
5516       {4, MVT::v8i8, 4},   // interleave 4 x 8i8 into 32i8 (and store)
5517       {4, MVT::v16i8, 8},  // interleave 4 x 16i8 into 64i8 (and store)
5518       {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store)
5519 
5520       {4, MVT::v2i16, 2},   // interleave 4 x 2i16 into 8i16 (and store)
5521       {4, MVT::v4i16, 6},   // interleave 4 x 4i16 into 16i16 (and store)
5522       {4, MVT::v8i16, 10},  // interleave 4 x 8i16 into 32i16 (and store)
5523       {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store)
5524       {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store)
5525 
5526       {4, MVT::v2i32, 5},   // interleave 4 x 2i32 into 8i32 (and store)
5527       {4, MVT::v4i32, 6},   // interleave 4 x 4i32 into 16i32 (and store)
5528       {4, MVT::v8i32, 16},  // interleave 4 x 8i32 into 32i32 (and store)
5529       {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store)
5530       {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store)
5531 
5532       {4, MVT::v2i64, 6},  // interleave 4 x 2i64 into 8i64 (and store)
5533       {4, MVT::v4i64, 8},  // interleave 4 x 4i64 into 16i64 (and store)
5534       {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store)
5535       {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store)
5536 
5537       {6, MVT::v2i8, 7},   // interleave 6 x 2i8 into 12i8 (and store)
5538       {6, MVT::v4i8, 9},   // interleave 6 x 4i8 into 24i8 (and store)
5539       {6, MVT::v8i8, 16},  // interleave 6 x 8i8 into 48i8 (and store)
5540       {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store)
5541       {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store)
5542 
5543       {6, MVT::v2i16, 10},  // interleave 6 x 2i16 into 12i16 (and store)
5544       {6, MVT::v4i16, 15},  // interleave 6 x 4i16 into 24i16 (and store)
5545       {6, MVT::v8i16, 21},  // interleave 6 x 8i16 into 48i16 (and store)
5546       {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store)
5547       {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store)
5548 
5549       {6, MVT::v2i32, 9},   // interleave 6 x 2i32 into 12i32 (and store)
5550       {6, MVT::v4i32, 12},  // interleave 6 x 4i32 into 24i32 (and store)
5551       {6, MVT::v8i32, 33},  // interleave 6 x 8i32 into 48i32 (and store)
5552       {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store)
5553 
5554       {6, MVT::v2i64, 8},  // interleave 6 x 2i64 into 12i64 (and store)
5555       {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store)
5556       {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store)
5557   };
5558 
5559   static const CostTblEntry SSE2InterleavedStoreTbl[] = {
5560       {2, MVT::v2i8, 1},   // interleave 2 x 2i8 into 4i8 (and store)
5561       {2, MVT::v4i8, 1},   // interleave 2 x 4i8 into 8i8 (and store)
5562       {2, MVT::v8i8, 1},   // interleave 2 x 8i8 into 16i8 (and store)
5563 
5564       {2, MVT::v2i16, 1},  // interleave 2 x 2i16 into 4i16 (and store)
5565       {2, MVT::v4i16, 1},  // interleave 2 x 4i16 into 8i16 (and store)
5566 
5567       {2, MVT::v2i32, 1},  // interleave 2 x 2i32 into 4i32 (and store)
5568   };
5569 
5570   if (Opcode == Instruction::Load) {
5571     auto GetDiscountedCost = [Factor, NumMembers = Indices.size(),
5572                               MemOpCosts](const CostTblEntry *Entry) {
5573       // NOTE: this is just an approximation!
5574       //       It can over/under -estimate the cost!
5575       return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor);
5576     };
5577 
5578     if (ST->hasAVX2())
5579       if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor,
5580                                               ETy.getSimpleVT()))
5581         return GetDiscountedCost(Entry);
5582 
5583     if (ST->hasSSSE3())
5584       if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor,
5585                                               ETy.getSimpleVT()))
5586         return GetDiscountedCost(Entry);
5587 
5588     if (ST->hasSSE2())
5589       if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor,
5590                                               ETy.getSimpleVT()))
5591         return GetDiscountedCost(Entry);
5592   } else {
5593     assert(Opcode == Instruction::Store &&
5594            "Expected Store Instruction at this point");
5595     assert((!Indices.size() || Indices.size() == Factor) &&
5596            "Interleaved store only supports fully-interleaved groups.");
5597     if (ST->hasAVX2())
5598       if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor,
5599                                               ETy.getSimpleVT()))
5600         return MemOpCosts + Entry->Cost;
5601 
5602     if (ST->hasSSE2())
5603       if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor,
5604                                               ETy.getSimpleVT()))
5605         return MemOpCosts + Entry->Cost;
5606   }
5607 
5608   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5609                                            Alignment, AddressSpace, CostKind,
5610                                            UseMaskForCond, UseMaskForGaps);
5611 }
5612