1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 ///   SSE 3   - Pentium4 / Athlon64
23 ///   SSE 4.1 - Penryn
24 ///   SSE 4.2 - Nehalem
25 ///   AVX     - Sandy Bridge
26 ///   AVX2    - Haswell
27 ///   AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 ///                   divss     sqrtss          rsqrtss
30 ///   AMD K7            11-16     19              3
31 ///   Piledriver        9-24      13-15           5
32 ///   Jaguar            14        16              2
33 ///   Pentium II,III    18        30              2
34 ///   Nehalem           7-14      7-18            3
35 ///   Haswell           10-13     11              5
36 /// TODO: Develop and implement  the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
40 
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
48 
49 using namespace llvm;
50 
51 #define DEBUG_TYPE "x86tti"
52 
53 //===----------------------------------------------------------------------===//
54 //
55 // X86 cost model.
56 //
57 //===----------------------------------------------------------------------===//
58 
59 TargetTransformInfo::PopcntSupportKind
60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
62   // TODO: Currently the __builtin_popcount() implementation using SSE3
63   //   instructions is inefficient. Once the problem is fixed, we should
64   //   call ST->hasSSE3() instead of ST->hasPOPCNT().
65   return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66 }
67 
68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69   TargetTransformInfo::CacheLevel Level) const {
70   switch (Level) {
71   case TargetTransformInfo::CacheLevel::L1D:
72     //   - Penryn
73     //   - Nehalem
74     //   - Westmere
75     //   - Sandy Bridge
76     //   - Ivy Bridge
77     //   - Haswell
78     //   - Broadwell
79     //   - Skylake
80     //   - Kabylake
81     return 32 * 1024;  //  32 KByte
82   case TargetTransformInfo::CacheLevel::L2D:
83     //   - Penryn
84     //   - Nehalem
85     //   - Westmere
86     //   - Sandy Bridge
87     //   - Ivy Bridge
88     //   - Haswell
89     //   - Broadwell
90     //   - Skylake
91     //   - Kabylake
92     return 256 * 1024; // 256 KByte
93   }
94 
95   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
96 }
97 
98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99   TargetTransformInfo::CacheLevel Level) const {
100   //   - Penryn
101   //   - Nehalem
102   //   - Westmere
103   //   - Sandy Bridge
104   //   - Ivy Bridge
105   //   - Haswell
106   //   - Broadwell
107   //   - Skylake
108   //   - Kabylake
109   switch (Level) {
110   case TargetTransformInfo::CacheLevel::L1D:
111     LLVM_FALLTHROUGH;
112   case TargetTransformInfo::CacheLevel::L2D:
113     return 8;
114   }
115 
116   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
117 }
118 
119 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120   bool Vector = (ClassID == 1);
121   if (Vector && !ST->hasSSE1())
122     return 0;
123 
124   if (ST->is64Bit()) {
125     if (Vector && ST->hasAVX512())
126       return 32;
127     return 16;
128   }
129   return 8;
130 }
131 
132 TypeSize
133 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
134   unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135   switch (K) {
136   case TargetTransformInfo::RGK_Scalar:
137     return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
138   case TargetTransformInfo::RGK_FixedWidthVector:
139     if (ST->hasAVX512() && PreferVectorWidth >= 512)
140       return TypeSize::getFixed(512);
141     if (ST->hasAVX() && PreferVectorWidth >= 256)
142       return TypeSize::getFixed(256);
143     if (ST->hasSSE1() && PreferVectorWidth >= 128)
144       return TypeSize::getFixed(128);
145     return TypeSize::getFixed(0);
146   case TargetTransformInfo::RGK_ScalableVector:
147     return TypeSize::getScalable(0);
148   }
149 
150   llvm_unreachable("Unsupported register kind");
151 }
152 
153 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154   return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
155       .getFixedSize();
156 }
157 
158 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
159   // If the loop will not be vectorized, don't interleave the loop.
160   // Let regular unroll to unroll the loop, which saves the overflow
161   // check and memory check cost.
162   if (VF == 1)
163     return 1;
164 
165   if (ST->isAtom())
166     return 1;
167 
168   // Sandybridge and Haswell have multiple execution ports and pipelined
169   // vector units.
170   if (ST->hasAVX())
171     return 4;
172 
173   return 2;
174 }
175 
176 InstructionCost X86TTIImpl::getArithmeticInstrCost(
177     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
178     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
179     TTI::OperandValueProperties Opd1PropInfo,
180     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
181     const Instruction *CxtI) {
182   // TODO: Handle more cost kinds.
183   if (CostKind != TTI::TCK_RecipThroughput)
184     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
185                                          Op2Info, Opd1PropInfo,
186                                          Opd2PropInfo, Args, CxtI);
187 
188   // vXi8 multiplications are always promoted to vXi16.
189   if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
190       Ty->getScalarSizeInBits() == 8) {
191     Type *WideVecTy =
192         VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
193     return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
194                             TargetTransformInfo::CastContextHint::None,
195                             CostKind) +
196            getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
197                             TargetTransformInfo::CastContextHint::None,
198                             CostKind) +
199            getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
200                                   Opd1PropInfo, Opd2PropInfo);
201   }
202 
203   // Legalize the type.
204   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
205 
206   int ISD = TLI->InstructionOpcodeToISD(Opcode);
207   assert(ISD && "Invalid opcode");
208 
209   if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
210       LT.second.getScalarType() == MVT::i32) {
211     // Check if the operands can be represented as a smaller datatype.
212     bool Op1Signed = false, Op2Signed = false;
213     unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
214     unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
215     unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
216 
217     // If both are representable as i15 and at least one is constant,
218     // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we
219     // can treat this as PMADDWD which has the same costs as a vXi16 multiply.
220     if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) {
221       bool Op1Constant =
222           isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
223       bool Op2Constant =
224           isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
225       bool Op1Sext = isa<SExtInst>(Args[0]) &&
226                      (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41()));
227       bool Op2Sext = isa<SExtInst>(Args[1]) &&
228                      (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41()));
229 
230       bool IsZeroExtended = !Op1Signed || !Op2Signed;
231       bool IsConstant = Op1Constant || Op2Constant;
232       bool IsSext = Op1Sext || Op2Sext;
233       if (IsConstant || IsZeroExtended || IsSext)
234         LT.second =
235             MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
236     }
237   }
238 
239   if ((ISD == ISD::MUL || ISD == ISD::SDIV || ISD == ISD::SREM ||
240        ISD == ISD::UDIV || ISD == ISD::UREM) &&
241       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
242        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
243       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
244     // Vector multiply by pow2 will be simplified to shifts.
245     if (ISD == ISD::MUL) {
246       InstructionCost Cost = getArithmeticInstrCost(
247           Instruction::Shl, Ty, CostKind, Op1Info, Op2Info,
248           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
249       return Cost;
250     }
251 
252     if (ISD == ISD::SDIV || ISD == ISD::SREM) {
253       // On X86, vector signed division by constants power-of-two are
254       // normally expanded to the sequence SRA + SRL + ADD + SRA.
255       // The OperandValue properties may not be the same as that of the previous
256       // operation; conservatively assume OP_None.
257       InstructionCost Cost =
258           2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
259                                      Op2Info, TargetTransformInfo::OP_None,
260                                      TargetTransformInfo::OP_None);
261       Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
262                                      Op2Info, TargetTransformInfo::OP_None,
263                                      TargetTransformInfo::OP_None);
264       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
265                                      Op2Info, TargetTransformInfo::OP_None,
266                                      TargetTransformInfo::OP_None);
267 
268       if (ISD == ISD::SREM) {
269         // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
270         Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
271                                        Op2Info);
272         Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
273                                        Op2Info);
274       }
275 
276       return Cost;
277     }
278 
279     // Vector unsigned division/remainder will be simplified to shifts/masks.
280     if (ISD == ISD::UDIV)
281       return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
282                                     Op2Info, TargetTransformInfo::OP_None,
283                                     TargetTransformInfo::OP_None);
284     // UREM
285     return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info,
286                                   Op2Info, TargetTransformInfo::OP_None,
287                                   TargetTransformInfo::OP_None);
288   }
289 
290   static const CostTblEntry GLMCostTable[] = {
291     { ISD::FDIV,  MVT::f32,   18 }, // divss
292     { ISD::FDIV,  MVT::v4f32, 35 }, // divps
293     { ISD::FDIV,  MVT::f64,   33 }, // divsd
294     { ISD::FDIV,  MVT::v2f64, 65 }, // divpd
295   };
296 
297   if (ST->useGLMDivSqrtCosts())
298     if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
299                                             LT.second))
300       return LT.first * Entry->Cost;
301 
302   static const CostTblEntry SLMCostTable[] = {
303     { ISD::MUL,   MVT::v4i32, 11 }, // pmulld
304     { ISD::MUL,   MVT::v8i16, 2  }, // pmullw
305     { ISD::FMUL,  MVT::f64,   2  }, // mulsd
306     { ISD::FMUL,  MVT::v2f64, 4  }, // mulpd
307     { ISD::FMUL,  MVT::v4f32, 2  }, // mulps
308     { ISD::FDIV,  MVT::f32,   17 }, // divss
309     { ISD::FDIV,  MVT::v4f32, 39 }, // divps
310     { ISD::FDIV,  MVT::f64,   32 }, // divsd
311     { ISD::FDIV,  MVT::v2f64, 69 }, // divpd
312     { ISD::FADD,  MVT::v2f64, 2  }, // addpd
313     { ISD::FSUB,  MVT::v2f64, 2  }, // subpd
314     // v2i64/v4i64 mul is custom lowered as a series of long:
315     // multiplies(3), shifts(3) and adds(2)
316     // slm muldq version throughput is 2 and addq throughput 4
317     // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
318     //       3X4 (addq throughput) = 17
319     { ISD::MUL,   MVT::v2i64, 17 },
320     // slm addq\subq throughput is 4
321     { ISD::ADD,   MVT::v2i64, 4  },
322     { ISD::SUB,   MVT::v2i64, 4  },
323   };
324 
325   if (ST->useSLMArithCosts()) {
326     if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
327       // Check if the operands can be shrinked into a smaller datatype.
328       // TODO: Merge this into generiic vXi32 MUL patterns above.
329       bool Op1Signed = false;
330       unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
331       bool Op2Signed = false;
332       unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
333 
334       bool SignedMode = Op1Signed || Op2Signed;
335       unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
336 
337       if (OpMinSize <= 7)
338         return LT.first * 3; // pmullw/sext
339       if (!SignedMode && OpMinSize <= 8)
340         return LT.first * 3; // pmullw/zext
341       if (OpMinSize <= 15)
342         return LT.first * 5; // pmullw/pmulhw/pshuf
343       if (!SignedMode && OpMinSize <= 16)
344         return LT.first * 5; // pmullw/pmulhw/pshuf
345     }
346 
347     if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
348                                             LT.second)) {
349       return LT.first * Entry->Cost;
350     }
351   }
352 
353   static const CostTblEntry AVX512BWUniformConstCostTable[] = {
354     { ISD::SHL,  MVT::v64i8,   2 }, // psllw + pand.
355     { ISD::SRL,  MVT::v64i8,   2 }, // psrlw + pand.
356     { ISD::SRA,  MVT::v64i8,   4 }, // psrlw, pand, pxor, psubb.
357   };
358 
359   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
360       ST->hasBWI()) {
361     if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
362                                             LT.second))
363       return LT.first * Entry->Cost;
364   }
365 
366   static const CostTblEntry AVX512UniformConstCostTable[] = {
367     { ISD::SRA,  MVT::v2i64,   1 },
368     { ISD::SRA,  MVT::v4i64,   1 },
369     { ISD::SRA,  MVT::v8i64,   1 },
370 
371     { ISD::SHL,  MVT::v64i8,   4 }, // psllw + pand.
372     { ISD::SRL,  MVT::v64i8,   4 }, // psrlw + pand.
373     { ISD::SRA,  MVT::v64i8,   8 }, // psrlw, pand, pxor, psubb.
374 
375     { ISD::SDIV, MVT::v16i32,  6 }, // pmuludq sequence
376     { ISD::SREM, MVT::v16i32,  8 }, // pmuludq+mul+sub sequence
377     { ISD::UDIV, MVT::v16i32,  5 }, // pmuludq sequence
378     { ISD::UREM, MVT::v16i32,  7 }, // pmuludq+mul+sub sequence
379   };
380 
381   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
382       ST->hasAVX512()) {
383     if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
384                                             LT.second))
385       return LT.first * Entry->Cost;
386   }
387 
388   static const CostTblEntry AVX2UniformConstCostTable[] = {
389     { ISD::SHL,  MVT::v32i8,   2 }, // psllw + pand.
390     { ISD::SRL,  MVT::v32i8,   2 }, // psrlw + pand.
391     { ISD::SRA,  MVT::v32i8,   4 }, // psrlw, pand, pxor, psubb.
392 
393     { ISD::SRA,  MVT::v4i64,   4 }, // 2 x psrad + shuffle.
394 
395     { ISD::SDIV, MVT::v8i32,   6 }, // pmuludq sequence
396     { ISD::SREM, MVT::v8i32,   8 }, // pmuludq+mul+sub sequence
397     { ISD::UDIV, MVT::v8i32,   5 }, // pmuludq sequence
398     { ISD::UREM, MVT::v8i32,   7 }, // pmuludq+mul+sub sequence
399   };
400 
401   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
402       ST->hasAVX2()) {
403     if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
404                                             LT.second))
405       return LT.first * Entry->Cost;
406   }
407 
408   static const CostTblEntry SSE2UniformConstCostTable[] = {
409     { ISD::SHL,  MVT::v16i8,     2 }, // psllw + pand.
410     { ISD::SRL,  MVT::v16i8,     2 }, // psrlw + pand.
411     { ISD::SRA,  MVT::v16i8,     4 }, // psrlw, pand, pxor, psubb.
412 
413     { ISD::SHL,  MVT::v32i8,   4+2 }, // 2*(psllw + pand) + split.
414     { ISD::SRL,  MVT::v32i8,   4+2 }, // 2*(psrlw + pand) + split.
415     { ISD::SRA,  MVT::v32i8,   8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
416 
417     { ISD::SDIV, MVT::v8i32,  12+2 }, // 2*pmuludq sequence + split.
418     { ISD::SREM, MVT::v8i32,  16+2 }, // 2*pmuludq+mul+sub sequence + split.
419     { ISD::SDIV, MVT::v4i32,     6 }, // pmuludq sequence
420     { ISD::SREM, MVT::v4i32,     8 }, // pmuludq+mul+sub sequence
421     { ISD::UDIV, MVT::v8i32,  10+2 }, // 2*pmuludq sequence + split.
422     { ISD::UREM, MVT::v8i32,  14+2 }, // 2*pmuludq+mul+sub sequence + split.
423     { ISD::UDIV, MVT::v4i32,     5 }, // pmuludq sequence
424     { ISD::UREM, MVT::v4i32,     7 }, // pmuludq+mul+sub sequence
425   };
426 
427   // XOP has faster vXi8 shifts.
428   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
429       ST->hasSSE2() && !ST->hasXOP()) {
430     if (const auto *Entry =
431             CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
432       return LT.first * Entry->Cost;
433   }
434 
435   static const CostTblEntry AVX512BWConstCostTable[] = {
436     { ISD::SDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
437     { ISD::SREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
438     { ISD::UDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
439     { ISD::UREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
440     { ISD::SDIV, MVT::v32i16,  6 }, // vpmulhw sequence
441     { ISD::SREM, MVT::v32i16,  8 }, // vpmulhw+mul+sub sequence
442     { ISD::UDIV, MVT::v32i16,  6 }, // vpmulhuw sequence
443     { ISD::UREM, MVT::v32i16,  8 }, // vpmulhuw+mul+sub sequence
444   };
445 
446   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
447        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
448       ST->hasBWI()) {
449     if (const auto *Entry =
450             CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
451       return LT.first * Entry->Cost;
452   }
453 
454   static const CostTblEntry AVX512ConstCostTable[] = {
455     { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
456     { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
457     { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
458     { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
459     { ISD::SDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
460     { ISD::SREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
461     { ISD::UDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
462     { ISD::UREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
463     { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
464     { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
465     { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
466     { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
467   };
468 
469   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
470        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
471       ST->hasAVX512()) {
472     if (const auto *Entry =
473             CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
474       return LT.first * Entry->Cost;
475   }
476 
477   static const CostTblEntry AVX2ConstCostTable[] = {
478     { ISD::SDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
479     { ISD::SREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
480     { ISD::UDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
481     { ISD::UREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
482     { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
483     { ISD::SREM, MVT::v16i16,  8 }, // vpmulhw+mul+sub sequence
484     { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
485     { ISD::UREM, MVT::v16i16,  8 }, // vpmulhuw+mul+sub sequence
486     { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
487     { ISD::SREM, MVT::v8i32,  19 }, // vpmuldq+mul+sub sequence
488     { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
489     { ISD::UREM, MVT::v8i32,  19 }, // vpmuludq+mul+sub sequence
490   };
491 
492   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
493        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
494       ST->hasAVX2()) {
495     if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
496       return LT.first * Entry->Cost;
497   }
498 
499   static const CostTblEntry SSE2ConstCostTable[] = {
500     { ISD::SDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
501     { ISD::SREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
502     { ISD::SDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
503     { ISD::SREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
504     { ISD::UDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
505     { ISD::UREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
506     { ISD::UDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
507     { ISD::UREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
508     { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
509     { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
510     { ISD::SDIV, MVT::v8i16,     6 }, // pmulhw sequence
511     { ISD::SREM, MVT::v8i16,     8 }, // pmulhw+mul+sub sequence
512     { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
513     { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
514     { ISD::UDIV, MVT::v8i16,     6 }, // pmulhuw sequence
515     { ISD::UREM, MVT::v8i16,     8 }, // pmulhuw+mul+sub sequence
516     { ISD::SDIV, MVT::v8i32,  38+2 }, // 2*pmuludq sequence + split.
517     { ISD::SREM, MVT::v8i32,  48+2 }, // 2*pmuludq+mul+sub sequence + split.
518     { ISD::SDIV, MVT::v4i32,    19 }, // pmuludq sequence
519     { ISD::SREM, MVT::v4i32,    24 }, // pmuludq+mul+sub sequence
520     { ISD::UDIV, MVT::v8i32,  30+2 }, // 2*pmuludq sequence + split.
521     { ISD::UREM, MVT::v8i32,  40+2 }, // 2*pmuludq+mul+sub sequence + split.
522     { ISD::UDIV, MVT::v4i32,    15 }, // pmuludq sequence
523     { ISD::UREM, MVT::v4i32,    20 }, // pmuludq+mul+sub sequence
524   };
525 
526   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
527        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
528       ST->hasSSE2()) {
529     // pmuldq sequence.
530     if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
531       return LT.first * 32;
532     if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
533       return LT.first * 38;
534     if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
535       return LT.first * 15;
536     if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
537       return LT.first * 20;
538 
539     if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
540       return LT.first * Entry->Cost;
541   }
542 
543   static const CostTblEntry AVX512BWShiftCostTable[] = {
544     { ISD::SHL,   MVT::v16i8,      4 }, // extend/vpsllvw/pack sequence.
545     { ISD::SRL,   MVT::v16i8,      4 }, // extend/vpsrlvw/pack sequence.
546     { ISD::SRA,   MVT::v16i8,      4 }, // extend/vpsravw/pack sequence.
547     { ISD::SHL,   MVT::v32i8,      4 }, // extend/vpsllvw/pack sequence.
548     { ISD::SRL,   MVT::v32i8,      4 }, // extend/vpsrlvw/pack sequence.
549     { ISD::SRA,   MVT::v32i8,      6 }, // extend/vpsravw/pack sequence.
550     { ISD::SHL,   MVT::v64i8,      6 }, // extend/vpsllvw/pack sequence.
551     { ISD::SRL,   MVT::v64i8,      7 }, // extend/vpsrlvw/pack sequence.
552     { ISD::SRA,   MVT::v64i8,     15 }, // extend/vpsravw/pack sequence.
553 
554     { ISD::SHL,   MVT::v8i16,      1 }, // vpsllvw
555     { ISD::SRL,   MVT::v8i16,      1 }, // vpsrlvw
556     { ISD::SRA,   MVT::v8i16,      1 }, // vpsravw
557     { ISD::SHL,   MVT::v16i16,     1 }, // vpsllvw
558     { ISD::SRL,   MVT::v16i16,     1 }, // vpsrlvw
559     { ISD::SRA,   MVT::v16i16,     1 }, // vpsravw
560     { ISD::SHL,   MVT::v32i16,     1 }, // vpsllvw
561     { ISD::SRL,   MVT::v32i16,     1 }, // vpsrlvw
562     { ISD::SRA,   MVT::v32i16,     1 }, // vpsravw
563   };
564 
565   if (ST->hasBWI())
566     if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
567       return LT.first * Entry->Cost;
568 
569   static const CostTblEntry AVX2UniformCostTable[] = {
570     // Uniform splats are cheaper for the following instructions.
571     { ISD::SHL,  MVT::v16i16, 1 }, // psllw.
572     { ISD::SRL,  MVT::v16i16, 1 }, // psrlw.
573     { ISD::SRA,  MVT::v16i16, 1 }, // psraw.
574     { ISD::SHL,  MVT::v32i16, 2 }, // 2*psllw.
575     { ISD::SRL,  MVT::v32i16, 2 }, // 2*psrlw.
576     { ISD::SRA,  MVT::v32i16, 2 }, // 2*psraw.
577 
578     { ISD::SHL,  MVT::v8i32,  1 }, // pslld
579     { ISD::SRL,  MVT::v8i32,  1 }, // psrld
580     { ISD::SRA,  MVT::v8i32,  1 }, // psrad
581     { ISD::SHL,  MVT::v4i64,  1 }, // psllq
582     { ISD::SRL,  MVT::v4i64,  1 }, // psrlq
583   };
584 
585   if (ST->hasAVX2() &&
586       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
587        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
588     if (const auto *Entry =
589             CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
590       return LT.first * Entry->Cost;
591   }
592 
593   static const CostTblEntry SSE2UniformCostTable[] = {
594     // Uniform splats are cheaper for the following instructions.
595     { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
596     { ISD::SHL,  MVT::v4i32,  1 }, // pslld
597     { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
598 
599     { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
600     { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
601     { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
602 
603     { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
604     { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
605   };
606 
607   if (ST->hasSSE2() &&
608       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
609        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
610     if (const auto *Entry =
611             CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
612       return LT.first * Entry->Cost;
613   }
614 
615   static const CostTblEntry AVX512DQCostTable[] = {
616     { ISD::MUL,  MVT::v2i64, 2 }, // pmullq
617     { ISD::MUL,  MVT::v4i64, 2 }, // pmullq
618     { ISD::MUL,  MVT::v8i64, 2 }  // pmullq
619   };
620 
621   // Look for AVX512DQ lowering tricks for custom cases.
622   if (ST->hasDQI())
623     if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
624       return LT.first * Entry->Cost;
625 
626   static const CostTblEntry AVX512BWCostTable[] = {
627     { ISD::SHL,   MVT::v64i8,     11 }, // vpblendvb sequence.
628     { ISD::SRL,   MVT::v64i8,     11 }, // vpblendvb sequence.
629     { ISD::SRA,   MVT::v64i8,     24 }, // vpblendvb sequence.
630   };
631 
632   // Look for AVX512BW lowering tricks for custom cases.
633   if (ST->hasBWI())
634     if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
635       return LT.first * Entry->Cost;
636 
637   static const CostTblEntry AVX512CostTable[] = {
638     { ISD::SHL,     MVT::v4i32,      1 },
639     { ISD::SRL,     MVT::v4i32,      1 },
640     { ISD::SRA,     MVT::v4i32,      1 },
641     { ISD::SHL,     MVT::v8i32,      1 },
642     { ISD::SRL,     MVT::v8i32,      1 },
643     { ISD::SRA,     MVT::v8i32,      1 },
644     { ISD::SHL,     MVT::v16i32,     1 },
645     { ISD::SRL,     MVT::v16i32,     1 },
646     { ISD::SRA,     MVT::v16i32,     1 },
647 
648     { ISD::SHL,     MVT::v2i64,      1 },
649     { ISD::SRL,     MVT::v2i64,      1 },
650     { ISD::SHL,     MVT::v4i64,      1 },
651     { ISD::SRL,     MVT::v4i64,      1 },
652     { ISD::SHL,     MVT::v8i64,      1 },
653     { ISD::SRL,     MVT::v8i64,      1 },
654 
655     { ISD::SRA,     MVT::v2i64,      1 },
656     { ISD::SRA,     MVT::v4i64,      1 },
657     { ISD::SRA,     MVT::v8i64,      1 },
658 
659     { ISD::MUL,     MVT::v16i32,     1 }, // pmulld (Skylake from agner.org)
660     { ISD::MUL,     MVT::v8i32,      1 }, // pmulld (Skylake from agner.org)
661     { ISD::MUL,     MVT::v4i32,      1 }, // pmulld (Skylake from agner.org)
662     { ISD::MUL,     MVT::v8i64,      6 }, // 3*pmuludq/3*shift/2*add
663 
664     { ISD::FNEG,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
665     { ISD::FADD,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
666     { ISD::FSUB,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
667     { ISD::FMUL,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
668     { ISD::FDIV,    MVT::f64,        4 }, // Skylake from http://www.agner.org/
669     { ISD::FDIV,    MVT::v2f64,      4 }, // Skylake from http://www.agner.org/
670     { ISD::FDIV,    MVT::v4f64,      8 }, // Skylake from http://www.agner.org/
671     { ISD::FDIV,    MVT::v8f64,     16 }, // Skylake from http://www.agner.org/
672 
673     { ISD::FNEG,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
674     { ISD::FADD,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
675     { ISD::FSUB,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
676     { ISD::FMUL,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
677     { ISD::FDIV,    MVT::f32,        3 }, // Skylake from http://www.agner.org/
678     { ISD::FDIV,    MVT::v4f32,      3 }, // Skylake from http://www.agner.org/
679     { ISD::FDIV,    MVT::v8f32,      5 }, // Skylake from http://www.agner.org/
680     { ISD::FDIV,    MVT::v16f32,    10 }, // Skylake from http://www.agner.org/
681   };
682 
683   if (ST->hasAVX512())
684     if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
685       return LT.first * Entry->Cost;
686 
687   static const CostTblEntry AVX2ShiftCostTable[] = {
688     // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
689     // customize them to detect the cases where shift amount is a scalar one.
690     { ISD::SHL,     MVT::v4i32,    2 }, // vpsllvd (Haswell from agner.org)
691     { ISD::SRL,     MVT::v4i32,    2 }, // vpsrlvd (Haswell from agner.org)
692     { ISD::SRA,     MVT::v4i32,    2 }, // vpsravd (Haswell from agner.org)
693     { ISD::SHL,     MVT::v8i32,    2 }, // vpsllvd (Haswell from agner.org)
694     { ISD::SRL,     MVT::v8i32,    2 }, // vpsrlvd (Haswell from agner.org)
695     { ISD::SRA,     MVT::v8i32,    2 }, // vpsravd (Haswell from agner.org)
696     { ISD::SHL,     MVT::v2i64,    1 }, // vpsllvq (Haswell from agner.org)
697     { ISD::SRL,     MVT::v2i64,    1 }, // vpsrlvq (Haswell from agner.org)
698     { ISD::SHL,     MVT::v4i64,    1 }, // vpsllvq (Haswell from agner.org)
699     { ISD::SRL,     MVT::v4i64,    1 }, // vpsrlvq (Haswell from agner.org)
700   };
701 
702   if (ST->hasAVX512()) {
703     if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
704         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
705          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
706       // On AVX512, a packed v32i16 shift left by a constant build_vector
707       // is lowered into a vector multiply (vpmullw).
708       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
709                                     Op1Info, Op2Info,
710                                     TargetTransformInfo::OP_None,
711                                     TargetTransformInfo::OP_None);
712   }
713 
714   // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
715   if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
716     if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
717         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
718          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
719       // On AVX2, a packed v16i16 shift left by a constant build_vector
720       // is lowered into a vector multiply (vpmullw).
721       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
722                                     Op1Info, Op2Info,
723                                     TargetTransformInfo::OP_None,
724                                     TargetTransformInfo::OP_None);
725 
726     if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
727       return LT.first * Entry->Cost;
728   }
729 
730   static const CostTblEntry XOPShiftCostTable[] = {
731     // 128bit shifts take 1cy, but right shifts require negation beforehand.
732     { ISD::SHL,     MVT::v16i8,    1 },
733     { ISD::SRL,     MVT::v16i8,    2 },
734     { ISD::SRA,     MVT::v16i8,    2 },
735     { ISD::SHL,     MVT::v8i16,    1 },
736     { ISD::SRL,     MVT::v8i16,    2 },
737     { ISD::SRA,     MVT::v8i16,    2 },
738     { ISD::SHL,     MVT::v4i32,    1 },
739     { ISD::SRL,     MVT::v4i32,    2 },
740     { ISD::SRA,     MVT::v4i32,    2 },
741     { ISD::SHL,     MVT::v2i64,    1 },
742     { ISD::SRL,     MVT::v2i64,    2 },
743     { ISD::SRA,     MVT::v2i64,    2 },
744     // 256bit shifts require splitting if AVX2 didn't catch them above.
745     { ISD::SHL,     MVT::v32i8,  2+2 },
746     { ISD::SRL,     MVT::v32i8,  4+2 },
747     { ISD::SRA,     MVT::v32i8,  4+2 },
748     { ISD::SHL,     MVT::v16i16, 2+2 },
749     { ISD::SRL,     MVT::v16i16, 4+2 },
750     { ISD::SRA,     MVT::v16i16, 4+2 },
751     { ISD::SHL,     MVT::v8i32,  2+2 },
752     { ISD::SRL,     MVT::v8i32,  4+2 },
753     { ISD::SRA,     MVT::v8i32,  4+2 },
754     { ISD::SHL,     MVT::v4i64,  2+2 },
755     { ISD::SRL,     MVT::v4i64,  4+2 },
756     { ISD::SRA,     MVT::v4i64,  4+2 },
757   };
758 
759   // Look for XOP lowering tricks.
760   if (ST->hasXOP()) {
761     // If the right shift is constant then we'll fold the negation so
762     // it's as cheap as a left shift.
763     int ShiftISD = ISD;
764     if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
765         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
766          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
767       ShiftISD = ISD::SHL;
768     if (const auto *Entry =
769             CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
770       return LT.first * Entry->Cost;
771   }
772 
773   static const CostTblEntry SSE2UniformShiftCostTable[] = {
774     // Uniform splats are cheaper for the following instructions.
775     { ISD::SHL,  MVT::v16i16, 2+2 }, // 2*psllw + split.
776     { ISD::SHL,  MVT::v8i32,  2+2 }, // 2*pslld + split.
777     { ISD::SHL,  MVT::v4i64,  2+2 }, // 2*psllq + split.
778 
779     { ISD::SRL,  MVT::v16i16, 2+2 }, // 2*psrlw + split.
780     { ISD::SRL,  MVT::v8i32,  2+2 }, // 2*psrld + split.
781     { ISD::SRL,  MVT::v4i64,  2+2 }, // 2*psrlq + split.
782 
783     { ISD::SRA,  MVT::v16i16, 2+2 }, // 2*psraw + split.
784     { ISD::SRA,  MVT::v8i32,  2+2 }, // 2*psrad + split.
785     { ISD::SRA,  MVT::v2i64,    4 }, // 2*psrad + shuffle.
786     { ISD::SRA,  MVT::v4i64,  8+2 }, // 2*(2*psrad + shuffle) + split.
787   };
788 
789   if (ST->hasSSE2() &&
790       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
791        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
792 
793     // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
794     if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
795       return LT.first * 4; // 2*psrad + shuffle.
796 
797     if (const auto *Entry =
798             CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
799       return LT.first * Entry->Cost;
800   }
801 
802   if (ISD == ISD::SHL &&
803       Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
804     MVT VT = LT.second;
805     // Vector shift left by non uniform constant can be lowered
806     // into vector multiply.
807     if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
808         ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
809       ISD = ISD::MUL;
810   }
811 
812   static const CostTblEntry AVX2CostTable[] = {
813     { ISD::SHL,  MVT::v16i8,      6 }, // vpblendvb sequence.
814     { ISD::SHL,  MVT::v32i8,      6 }, // vpblendvb sequence.
815     { ISD::SHL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
816     { ISD::SHL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
817     { ISD::SHL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
818     { ISD::SHL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
819 
820     { ISD::SRL,  MVT::v16i8,      6 }, // vpblendvb sequence.
821     { ISD::SRL,  MVT::v32i8,      6 }, // vpblendvb sequence.
822     { ISD::SRL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
823     { ISD::SRL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
824     { ISD::SRL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
825     { ISD::SRL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
826 
827     { ISD::SRA,  MVT::v16i8,     17 }, // vpblendvb sequence.
828     { ISD::SRA,  MVT::v32i8,     17 }, // vpblendvb sequence.
829     { ISD::SRA,  MVT::v64i8,     34 }, // 2*vpblendvb sequence.
830     { ISD::SRA,  MVT::v8i16,      5 }, // extend/vpsravd/pack sequence.
831     { ISD::SRA,  MVT::v16i16,     7 }, // extend/vpsravd/pack sequence.
832     { ISD::SRA,  MVT::v32i16,    14 }, // 2*extend/vpsravd/pack sequence.
833     { ISD::SRA,  MVT::v2i64,      2 }, // srl/xor/sub sequence.
834     { ISD::SRA,  MVT::v4i64,      2 }, // srl/xor/sub sequence.
835 
836     { ISD::SUB,  MVT::v32i8,      1 }, // psubb
837     { ISD::ADD,  MVT::v32i8,      1 }, // paddb
838     { ISD::SUB,  MVT::v16i16,     1 }, // psubw
839     { ISD::ADD,  MVT::v16i16,     1 }, // paddw
840     { ISD::SUB,  MVT::v8i32,      1 }, // psubd
841     { ISD::ADD,  MVT::v8i32,      1 }, // paddd
842     { ISD::SUB,  MVT::v4i64,      1 }, // psubq
843     { ISD::ADD,  MVT::v4i64,      1 }, // paddq
844 
845     { ISD::MUL,  MVT::v16i16,     1 }, // pmullw
846     { ISD::MUL,  MVT::v8i32,      2 }, // pmulld (Haswell from agner.org)
847     { ISD::MUL,  MVT::v4i64,      6 }, // 3*pmuludq/3*shift/2*add
848 
849     { ISD::FNEG, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
850     { ISD::FNEG, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
851     { ISD::FADD, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
852     { ISD::FADD, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
853     { ISD::FSUB, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
854     { ISD::FSUB, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
855     { ISD::FMUL, MVT::f64,        1 }, // Haswell from http://www.agner.org/
856     { ISD::FMUL, MVT::v2f64,      1 }, // Haswell from http://www.agner.org/
857     { ISD::FMUL, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
858     { ISD::FMUL, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
859 
860     { ISD::FDIV, MVT::f32,        7 }, // Haswell from http://www.agner.org/
861     { ISD::FDIV, MVT::v4f32,      7 }, // Haswell from http://www.agner.org/
862     { ISD::FDIV, MVT::v8f32,     14 }, // Haswell from http://www.agner.org/
863     { ISD::FDIV, MVT::f64,       14 }, // Haswell from http://www.agner.org/
864     { ISD::FDIV, MVT::v2f64,     14 }, // Haswell from http://www.agner.org/
865     { ISD::FDIV, MVT::v4f64,     28 }, // Haswell from http://www.agner.org/
866   };
867 
868   // Look for AVX2 lowering tricks for custom cases.
869   if (ST->hasAVX2())
870     if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
871       return LT.first * Entry->Cost;
872 
873   static const CostTblEntry AVX1CostTable[] = {
874     // We don't have to scalarize unsupported ops. We can issue two half-sized
875     // operations and we only need to extract the upper YMM half.
876     // Two ops + 1 extract + 1 insert = 4.
877     { ISD::MUL,     MVT::v16i16,     4 },
878     { ISD::MUL,     MVT::v8i32,      5 }, // BTVER2 from http://www.agner.org/
879     { ISD::MUL,     MVT::v4i64,     12 },
880 
881     { ISD::SUB,     MVT::v32i8,      4 },
882     { ISD::ADD,     MVT::v32i8,      4 },
883     { ISD::SUB,     MVT::v16i16,     4 },
884     { ISD::ADD,     MVT::v16i16,     4 },
885     { ISD::SUB,     MVT::v8i32,      4 },
886     { ISD::ADD,     MVT::v8i32,      4 },
887     { ISD::SUB,     MVT::v4i64,      4 },
888     { ISD::ADD,     MVT::v4i64,      4 },
889 
890     { ISD::SHL,     MVT::v32i8,     22 }, // pblendvb sequence + split.
891     { ISD::SHL,     MVT::v8i16,      6 }, // pblendvb sequence.
892     { ISD::SHL,     MVT::v16i16,    13 }, // pblendvb sequence + split.
893     { ISD::SHL,     MVT::v4i32,      3 }, // pslld/paddd/cvttps2dq/pmulld
894     { ISD::SHL,     MVT::v8i32,      9 }, // pslld/paddd/cvttps2dq/pmulld + split
895     { ISD::SHL,     MVT::v2i64,      2 }, // Shift each lane + blend.
896     { ISD::SHL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
897 
898     { ISD::SRL,     MVT::v32i8,     23 }, // pblendvb sequence + split.
899     { ISD::SRL,     MVT::v16i16,    28 }, // pblendvb sequence + split.
900     { ISD::SRL,     MVT::v4i32,      6 }, // Shift each lane + blend.
901     { ISD::SRL,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
902     { ISD::SRL,     MVT::v2i64,      2 }, // Shift each lane + blend.
903     { ISD::SRL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
904 
905     { ISD::SRA,     MVT::v32i8,     44 }, // pblendvb sequence + split.
906     { ISD::SRA,     MVT::v16i16,    28 }, // pblendvb sequence + split.
907     { ISD::SRA,     MVT::v4i32,      6 }, // Shift each lane + blend.
908     { ISD::SRA,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
909     { ISD::SRA,     MVT::v2i64,      5 }, // Shift each lane + blend.
910     { ISD::SRA,     MVT::v4i64,     12 }, // Shift each lane + blend + split.
911 
912     { ISD::FNEG,    MVT::v4f64,      2 }, // BTVER2 from http://www.agner.org/
913     { ISD::FNEG,    MVT::v8f32,      2 }, // BTVER2 from http://www.agner.org/
914 
915     { ISD::FMUL,    MVT::f64,        2 }, // BTVER2 from http://www.agner.org/
916     { ISD::FMUL,    MVT::v2f64,      2 }, // BTVER2 from http://www.agner.org/
917     { ISD::FMUL,    MVT::v4f64,      4 }, // BTVER2 from http://www.agner.org/
918 
919     { ISD::FDIV,    MVT::f32,       14 }, // SNB from http://www.agner.org/
920     { ISD::FDIV,    MVT::v4f32,     14 }, // SNB from http://www.agner.org/
921     { ISD::FDIV,    MVT::v8f32,     28 }, // SNB from http://www.agner.org/
922     { ISD::FDIV,    MVT::f64,       22 }, // SNB from http://www.agner.org/
923     { ISD::FDIV,    MVT::v2f64,     22 }, // SNB from http://www.agner.org/
924     { ISD::FDIV,    MVT::v4f64,     44 }, // SNB from http://www.agner.org/
925   };
926 
927   if (ST->hasAVX())
928     if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
929       return LT.first * Entry->Cost;
930 
931   static const CostTblEntry SSE42CostTable[] = {
932     { ISD::FADD, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
933     { ISD::FADD, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
934     { ISD::FADD, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
935     { ISD::FADD, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
936 
937     { ISD::FSUB, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
938     { ISD::FSUB, MVT::f32 ,    1 }, // Nehalem from http://www.agner.org/
939     { ISD::FSUB, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
940     { ISD::FSUB, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
941 
942     { ISD::FMUL, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
943     { ISD::FMUL, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
944     { ISD::FMUL, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
945     { ISD::FMUL, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
946 
947     { ISD::FDIV,  MVT::f32,   14 }, // Nehalem from http://www.agner.org/
948     { ISD::FDIV,  MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
949     { ISD::FDIV,  MVT::f64,   22 }, // Nehalem from http://www.agner.org/
950     { ISD::FDIV,  MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
951 
952     { ISD::MUL,   MVT::v2i64,  6 }  // 3*pmuludq/3*shift/2*add
953   };
954 
955   if (ST->hasSSE42())
956     if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
957       return LT.first * Entry->Cost;
958 
959   static const CostTblEntry SSE41CostTable[] = {
960     { ISD::SHL,  MVT::v16i8,      10 }, // pblendvb sequence.
961     { ISD::SHL,  MVT::v8i16,      11 }, // pblendvb sequence.
962     { ISD::SHL,  MVT::v4i32,       4 }, // pslld/paddd/cvttps2dq/pmulld
963 
964     { ISD::SRL,  MVT::v16i8,      11 }, // pblendvb sequence.
965     { ISD::SRL,  MVT::v8i16,      13 }, // pblendvb sequence.
966     { ISD::SRL,  MVT::v4i32,      16 }, // Shift each lane + blend.
967 
968     { ISD::SRA,  MVT::v16i8,      21 }, // pblendvb sequence.
969     { ISD::SRA,  MVT::v8i16,      13 }, // pblendvb sequence.
970 
971     { ISD::MUL,  MVT::v4i32,       2 }  // pmulld (Nehalem from agner.org)
972   };
973 
974   if (ST->hasSSE41())
975     if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
976       return LT.first * Entry->Cost;
977 
978   static const CostTblEntry SSE2CostTable[] = {
979     // We don't correctly identify costs of casts because they are marked as
980     // custom.
981     { ISD::SHL,  MVT::v16i8,      13 }, // cmpgtb sequence.
982     { ISD::SHL,  MVT::v8i16,      25 }, // cmpgtw sequence.
983     { ISD::SHL,  MVT::v4i32,      16 }, // pslld/paddd/cvttps2dq/pmuludq.
984     { ISD::SHL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
985 
986     { ISD::SRL,  MVT::v16i8,      14 }, // cmpgtb sequence.
987     { ISD::SRL,  MVT::v8i16,      16 }, // cmpgtw sequence.
988     { ISD::SRL,  MVT::v4i32,      12 }, // Shift each lane + blend.
989     { ISD::SRL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
990 
991     { ISD::SRA,  MVT::v16i8,      27 }, // unpacked cmpgtb sequence.
992     { ISD::SRA,  MVT::v8i16,      16 }, // cmpgtw sequence.
993     { ISD::SRA,  MVT::v4i32,      12 }, // Shift each lane + blend.
994     { ISD::SRA,  MVT::v2i64,       8 }, // srl/xor/sub splat+shuffle sequence.
995 
996     { ISD::MUL,  MVT::v8i16,       1 }, // pmullw
997     { ISD::MUL,  MVT::v4i32,       6 }, // 3*pmuludq/4*shuffle
998     { ISD::MUL,  MVT::v2i64,       8 }, // 3*pmuludq/3*shift/2*add
999 
1000     { ISD::FDIV, MVT::f32,        23 }, // Pentium IV from http://www.agner.org/
1001     { ISD::FDIV, MVT::v4f32,      39 }, // Pentium IV from http://www.agner.org/
1002     { ISD::FDIV, MVT::f64,        38 }, // Pentium IV from http://www.agner.org/
1003     { ISD::FDIV, MVT::v2f64,      69 }, // Pentium IV from http://www.agner.org/
1004 
1005     { ISD::FNEG, MVT::f32,         1 }, // Pentium IV from http://www.agner.org/
1006     { ISD::FNEG, MVT::f64,         1 }, // Pentium IV from http://www.agner.org/
1007     { ISD::FNEG, MVT::v4f32,       1 }, // Pentium IV from http://www.agner.org/
1008     { ISD::FNEG, MVT::v2f64,       1 }, // Pentium IV from http://www.agner.org/
1009 
1010     { ISD::FADD, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
1011     { ISD::FADD, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
1012 
1013     { ISD::FSUB, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
1014     { ISD::FSUB, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
1015   };
1016 
1017   if (ST->hasSSE2())
1018     if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
1019       return LT.first * Entry->Cost;
1020 
1021   static const CostTblEntry SSE1CostTable[] = {
1022     { ISD::FDIV, MVT::f32,   17 }, // Pentium III from http://www.agner.org/
1023     { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
1024 
1025     { ISD::FNEG, MVT::f32,    2 }, // Pentium III from http://www.agner.org/
1026     { ISD::FNEG, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1027 
1028     { ISD::FADD, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
1029     { ISD::FADD, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1030 
1031     { ISD::FSUB, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
1032     { ISD::FSUB, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1033   };
1034 
1035   if (ST->hasSSE1())
1036     if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1037       return LT.first * Entry->Cost;
1038 
1039   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1040     { ISD::ADD,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1041     { ISD::SUB,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1042     { ISD::MUL,  MVT::i64,    2 }, // Nehalem from http://www.agner.org/
1043   };
1044 
1045   if (ST->is64Bit())
1046     if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1047       return LT.first * Entry->Cost;
1048 
1049   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1050     { ISD::ADD,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1051     { ISD::ADD,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1052     { ISD::ADD,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1053 
1054     { ISD::SUB,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1055     { ISD::SUB,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1056     { ISD::SUB,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1057   };
1058 
1059   if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1060     return LT.first * Entry->Cost;
1061 
1062   // It is not a good idea to vectorize division. We have to scalarize it and
1063   // in the process we will often end up having to spilling regular
1064   // registers. The overhead of division is going to dominate most kernels
1065   // anyways so try hard to prevent vectorization of division - it is
1066   // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1067   // to hide "20 cycles" for each lane.
1068   if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
1069                                ISD == ISD::UDIV || ISD == ISD::UREM)) {
1070     InstructionCost ScalarCost = getArithmeticInstrCost(
1071         Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1072         TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1073     return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1074   }
1075 
1076   // Fallback to the default implementation.
1077   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1078 }
1079 
1080 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1081                                            VectorType *BaseTp,
1082                                            ArrayRef<int> Mask, int Index,
1083                                            VectorType *SubTp) {
1084   // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1085   // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1086   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1087 
1088   Kind = improveShuffleKindFromMask(Kind, Mask);
1089   // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1090   if (Kind == TTI::SK_Transpose)
1091     Kind = TTI::SK_PermuteTwoSrc;
1092 
1093   // For Broadcasts we are splatting the first element from the first input
1094   // register, so only need to reference that input and all the output
1095   // registers are the same.
1096   if (Kind == TTI::SK_Broadcast)
1097     LT.first = 1;
1098 
1099   // Subvector extractions are free if they start at the beginning of a
1100   // vector and cheap if the subvectors are aligned.
1101   if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1102     int NumElts = LT.second.getVectorNumElements();
1103     if ((Index % NumElts) == 0)
1104       return 0;
1105     std::pair<InstructionCost, MVT> SubLT =
1106         TLI->getTypeLegalizationCost(DL, SubTp);
1107     if (SubLT.second.isVector()) {
1108       int NumSubElts = SubLT.second.getVectorNumElements();
1109       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1110         return SubLT.first;
1111       // Handle some cases for widening legalization. For now we only handle
1112       // cases where the original subvector was naturally aligned and evenly
1113       // fit in its legalized subvector type.
1114       // FIXME: Remove some of the alignment restrictions.
1115       // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1116       // vectors.
1117       int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1118       if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1119           (NumSubElts % OrigSubElts) == 0 &&
1120           LT.second.getVectorElementType() ==
1121               SubLT.second.getVectorElementType() &&
1122           LT.second.getVectorElementType().getSizeInBits() ==
1123               BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1124         assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1125                "Unexpected number of elements!");
1126         auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1127                                            LT.second.getVectorNumElements());
1128         auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1129                                            SubLT.second.getVectorNumElements());
1130         int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1131         InstructionCost ExtractCost = getShuffleCost(
1132             TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1133 
1134         // If the original size is 32-bits or more, we can use pshufd. Otherwise
1135         // if we have SSSE3 we can use pshufb.
1136         if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1137           return ExtractCost + 1; // pshufd or pshufb
1138 
1139         assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1140                "Unexpected vector size");
1141 
1142         return ExtractCost + 2; // worst case pshufhw + pshufd
1143       }
1144     }
1145   }
1146 
1147   // Subvector insertions are cheap if the subvectors are aligned.
1148   // Note that in general, the insertion starting at the beginning of a vector
1149   // isn't free, because we need to preserve the rest of the wide vector.
1150   if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1151     int NumElts = LT.second.getVectorNumElements();
1152     std::pair<InstructionCost, MVT> SubLT =
1153         TLI->getTypeLegalizationCost(DL, SubTp);
1154     if (SubLT.second.isVector()) {
1155       int NumSubElts = SubLT.second.getVectorNumElements();
1156       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1157         return SubLT.first;
1158     }
1159 
1160     // If the insertion isn't aligned, treat it like a 2-op shuffle.
1161     Kind = TTI::SK_PermuteTwoSrc;
1162   }
1163 
1164   // Handle some common (illegal) sub-vector types as they are often very cheap
1165   // to shuffle even on targets without PSHUFB.
1166   EVT VT = TLI->getValueType(DL, BaseTp);
1167   if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1168       !ST->hasSSSE3()) {
1169      static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1170       {TTI::SK_Broadcast,        MVT::v4i16, 1}, // pshuflw
1171       {TTI::SK_Broadcast,        MVT::v2i16, 1}, // pshuflw
1172       {TTI::SK_Broadcast,        MVT::v8i8,  2}, // punpck/pshuflw
1173       {TTI::SK_Broadcast,        MVT::v4i8,  2}, // punpck/pshuflw
1174       {TTI::SK_Broadcast,        MVT::v2i8,  1}, // punpck
1175 
1176       {TTI::SK_Reverse,          MVT::v4i16, 1}, // pshuflw
1177       {TTI::SK_Reverse,          MVT::v2i16, 1}, // pshuflw
1178       {TTI::SK_Reverse,          MVT::v4i8,  3}, // punpck/pshuflw/packus
1179       {TTI::SK_Reverse,          MVT::v2i8,  1}, // punpck
1180 
1181       {TTI::SK_PermuteTwoSrc,    MVT::v4i16, 2}, // punpck/pshuflw
1182       {TTI::SK_PermuteTwoSrc,    MVT::v2i16, 2}, // punpck/pshuflw
1183       {TTI::SK_PermuteTwoSrc,    MVT::v8i8,  7}, // punpck/pshuflw
1184       {TTI::SK_PermuteTwoSrc,    MVT::v4i8,  4}, // punpck/pshuflw
1185       {TTI::SK_PermuteTwoSrc,    MVT::v2i8,  2}, // punpck
1186 
1187       {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1188       {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1189       {TTI::SK_PermuteSingleSrc, MVT::v8i8,  5}, // punpck/pshuflw
1190       {TTI::SK_PermuteSingleSrc, MVT::v4i8,  3}, // punpck/pshuflw
1191       {TTI::SK_PermuteSingleSrc, MVT::v2i8,  1}, // punpck
1192     };
1193 
1194     if (ST->hasSSE2())
1195       if (const auto *Entry =
1196               CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1197         return Entry->Cost;
1198   }
1199 
1200   // We are going to permute multiple sources and the result will be in multiple
1201   // destinations. Providing an accurate cost only for splits where the element
1202   // type remains the same.
1203   if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1204     MVT LegalVT = LT.second;
1205     if (LegalVT.isVector() &&
1206         LegalVT.getVectorElementType().getSizeInBits() ==
1207             BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1208         LegalVT.getVectorNumElements() <
1209             cast<FixedVectorType>(BaseTp)->getNumElements()) {
1210 
1211       unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1212       unsigned LegalVTSize = LegalVT.getStoreSize();
1213       // Number of source vectors after legalization:
1214       unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1215       // Number of destination vectors after legalization:
1216       InstructionCost NumOfDests = LT.first;
1217 
1218       auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1219                                               LegalVT.getVectorNumElements());
1220 
1221       InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1222       return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1223                                             None, 0, nullptr);
1224     }
1225 
1226     return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1227   }
1228 
1229   // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1230   if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1231     // We assume that source and destination have the same vector type.
1232     InstructionCost NumOfDests = LT.first;
1233     InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1234     LT.first = NumOfDests * NumOfShufflesPerDest;
1235   }
1236 
1237   static const CostTblEntry AVX512FP16ShuffleTbl[] = {
1238       {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1239       {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1240       {TTI::SK_Broadcast, MVT::v8f16, 1},  // vpbroadcastw
1241 
1242       {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1243       {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw
1244       {TTI::SK_Reverse, MVT::v8f16, 1},  // vpshufb
1245 
1246       {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1247       {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1248       {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1},  // vpshufb
1249 
1250       {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1251       {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w
1252       {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2}   // vpermt2w
1253   };
1254 
1255   if (!ST->useSoftFloat() && ST->hasFP16())
1256     if (const auto *Entry =
1257             CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second))
1258       return LT.first * Entry->Cost;
1259 
1260   static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1261       {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1262       {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1263 
1264       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1265       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1266 
1267       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1268       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1269       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2}  // vpermt2b
1270   };
1271 
1272   if (ST->hasVBMI())
1273     if (const auto *Entry =
1274             CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1275       return LT.first * Entry->Cost;
1276 
1277   static const CostTblEntry AVX512BWShuffleTbl[] = {
1278       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1279       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1280 
1281       {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1282       {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1283       {TTI::SK_Reverse, MVT::v64i8, 2},  // pshufb + vshufi64x2
1284 
1285       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1286       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1287       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8},  // extend to v32i16
1288 
1289       {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1290       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1291       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2},  // vpermt2w
1292       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1293 
1294       {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1295       {TTI::SK_Select, MVT::v64i8,  1}, // vblendmb
1296   };
1297 
1298   if (ST->hasBWI())
1299     if (const auto *Entry =
1300             CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1301       return LT.first * Entry->Cost;
1302 
1303   static const CostTblEntry AVX512ShuffleTbl[] = {
1304       {TTI::SK_Broadcast, MVT::v8f64, 1},  // vbroadcastpd
1305       {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1306       {TTI::SK_Broadcast, MVT::v8i64, 1},  // vpbroadcastq
1307       {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1308       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1309       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1310 
1311       {TTI::SK_Reverse, MVT::v8f64, 1},  // vpermpd
1312       {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1313       {TTI::SK_Reverse, MVT::v8i64, 1},  // vpermq
1314       {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1315       {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1316       {TTI::SK_Reverse, MVT::v64i8,  7}, // per mca
1317 
1318       {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1},  // vpermpd
1319       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1320       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1},  // vpermpd
1321       {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1322       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1323       {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1},  // vpermps
1324       {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1},  // vpermq
1325       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1326       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1},  // vpermq
1327       {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1328       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1329       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1},  // vpermd
1330       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1},  // pshufb
1331 
1332       {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1},  // vpermt2pd
1333       {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1334       {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1},  // vpermt2q
1335       {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1336       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1},  // vpermt2pd
1337       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1},  // vpermt2ps
1338       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1},  // vpermt2q
1339       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1},  // vpermt2d
1340       {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1},  // vpermt2pd
1341       {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1},  // vpermt2ps
1342       {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1},  // vpermt2q
1343       {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1},  // vpermt2d
1344 
1345       // FIXME: This just applies the type legalization cost rules above
1346       // assuming these completely split.
1347       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1348       {TTI::SK_PermuteSingleSrc, MVT::v64i8,  14},
1349       {TTI::SK_PermuteTwoSrc,    MVT::v32i16, 42},
1350       {TTI::SK_PermuteTwoSrc,    MVT::v64i8,  42},
1351 
1352       {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1353       {TTI::SK_Select, MVT::v64i8,  1}, // vpternlogq
1354       {TTI::SK_Select, MVT::v8f64,  1}, // vblendmpd
1355       {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1356       {TTI::SK_Select, MVT::v8i64,  1}, // vblendmq
1357       {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1358   };
1359 
1360   if (ST->hasAVX512())
1361     if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1362       return LT.first * Entry->Cost;
1363 
1364   static const CostTblEntry AVX2ShuffleTbl[] = {
1365       {TTI::SK_Broadcast, MVT::v4f64, 1},  // vbroadcastpd
1366       {TTI::SK_Broadcast, MVT::v8f32, 1},  // vbroadcastps
1367       {TTI::SK_Broadcast, MVT::v4i64, 1},  // vpbroadcastq
1368       {TTI::SK_Broadcast, MVT::v8i32, 1},  // vpbroadcastd
1369       {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1370       {TTI::SK_Broadcast, MVT::v32i8, 1},  // vpbroadcastb
1371 
1372       {TTI::SK_Reverse, MVT::v4f64, 1},  // vpermpd
1373       {TTI::SK_Reverse, MVT::v8f32, 1},  // vpermps
1374       {TTI::SK_Reverse, MVT::v4i64, 1},  // vpermq
1375       {TTI::SK_Reverse, MVT::v8i32, 1},  // vpermd
1376       {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1377       {TTI::SK_Reverse, MVT::v32i8, 2},  // vperm2i128 + pshufb
1378 
1379       {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1380       {TTI::SK_Select, MVT::v32i8, 1},  // vpblendvb
1381 
1382       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1383       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1384       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1385       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1386       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1387                                                   // + vpblendvb
1388       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vperm2i128 + 2*vpshufb
1389                                                   // + vpblendvb
1390 
1391       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},  // 2*vpermpd + vblendpd
1392       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3},  // 2*vpermps + vblendps
1393       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},  // 2*vpermq + vpblendd
1394       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3},  // 2*vpermd + vpblendd
1395       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1396                                                // + vpblendvb
1397       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7},  // 2*vperm2i128 + 4*vpshufb
1398                                                // + vpblendvb
1399   };
1400 
1401   if (ST->hasAVX2())
1402     if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1403       return LT.first * Entry->Cost;
1404 
1405   static const CostTblEntry XOPShuffleTbl[] = {
1406       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vpermil2pd
1407       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2},  // vperm2f128 + vpermil2ps
1408       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vpermil2pd
1409       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2},  // vperm2f128 + vpermil2ps
1410       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1411                                                   // + vinsertf128
1412       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vextractf128 + 2*vpperm
1413                                                   // + vinsertf128
1414 
1415       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1416                                                // + vinsertf128
1417       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1},  // vpperm
1418       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9},  // 2*vextractf128 + 6*vpperm
1419                                                // + vinsertf128
1420       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1},  // vpperm
1421   };
1422 
1423   if (ST->hasXOP())
1424     if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1425       return LT.first * Entry->Cost;
1426 
1427   static const CostTblEntry AVX1ShuffleTbl[] = {
1428       {TTI::SK_Broadcast, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1429       {TTI::SK_Broadcast, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1430       {TTI::SK_Broadcast, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1431       {TTI::SK_Broadcast, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1432       {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1433       {TTI::SK_Broadcast, MVT::v32i8, 2},  // vpshufb + vinsertf128
1434 
1435       {TTI::SK_Reverse, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1436       {TTI::SK_Reverse, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1437       {TTI::SK_Reverse, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1438       {TTI::SK_Reverse, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1439       {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1440                                          // + vinsertf128
1441       {TTI::SK_Reverse, MVT::v32i8, 4},  // vextractf128 + 2*pshufb
1442                                          // + vinsertf128
1443 
1444       {TTI::SK_Select, MVT::v4i64, 1},  // vblendpd
1445       {TTI::SK_Select, MVT::v4f64, 1},  // vblendpd
1446       {TTI::SK_Select, MVT::v8i32, 1},  // vblendps
1447       {TTI::SK_Select, MVT::v8f32, 1},  // vblendps
1448       {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1449       {TTI::SK_Select, MVT::v32i8, 3},  // vpand + vpandn + vpor
1450 
1451       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vshufpd
1452       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vshufpd
1453       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4},  // 2*vperm2f128 + 2*vshufps
1454       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4},  // 2*vperm2f128 + 2*vshufps
1455       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1456                                                   // + 2*por + vinsertf128
1457       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8},  // vextractf128 + 4*pshufb
1458                                                   // + 2*por + vinsertf128
1459 
1460       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},   // 2*vperm2f128 + vshufpd
1461       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},   // 2*vperm2f128 + vshufpd
1462       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4},   // 2*vperm2f128 + 2*vshufps
1463       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4},   // 2*vperm2f128 + 2*vshufps
1464       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1465                                                 // + 4*por + vinsertf128
1466       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15},  // 2*vextractf128 + 8*pshufb
1467                                                 // + 4*por + vinsertf128
1468   };
1469 
1470   if (ST->hasAVX())
1471     if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1472       return LT.first * Entry->Cost;
1473 
1474   static const CostTblEntry SSE41ShuffleTbl[] = {
1475       {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1476       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1477       {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1478       {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1479       {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1480       {TTI::SK_Select, MVT::v16i8, 1}  // pblendvb
1481   };
1482 
1483   if (ST->hasSSE41())
1484     if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1485       return LT.first * Entry->Cost;
1486 
1487   static const CostTblEntry SSSE3ShuffleTbl[] = {
1488       {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1489       {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1490 
1491       {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1492       {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1493 
1494       {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1495       {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1496 
1497       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1498       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1499 
1500       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1501       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1502   };
1503 
1504   if (ST->hasSSSE3())
1505     if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1506       return LT.first * Entry->Cost;
1507 
1508   static const CostTblEntry SSE2ShuffleTbl[] = {
1509       {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1510       {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1511       {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1512       {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1513       {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1514 
1515       {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1516       {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1517       {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1518       {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1519       {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1520                                         // + 2*pshufd + 2*unpck + packus
1521 
1522       {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1523       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1524       {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1525       {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1526       {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1527 
1528       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1529       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1530       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1531       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1532                                                   // + pshufd/unpck
1533     { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1534                                                   // + 2*pshufd + 2*unpck + 2*packus
1535 
1536     { TTI::SK_PermuteTwoSrc,    MVT::v2f64,  1 }, // shufpd
1537     { TTI::SK_PermuteTwoSrc,    MVT::v2i64,  1 }, // shufpd
1538     { TTI::SK_PermuteTwoSrc,    MVT::v4i32,  2 }, // 2*{unpck,movsd,pshufd}
1539     { TTI::SK_PermuteTwoSrc,    MVT::v8i16,  8 }, // blend+permute
1540     { TTI::SK_PermuteTwoSrc,    MVT::v16i8, 13 }, // blend+permute
1541   };
1542 
1543   if (ST->hasSSE2())
1544     if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1545       return LT.first * Entry->Cost;
1546 
1547   static const CostTblEntry SSE1ShuffleTbl[] = {
1548     { TTI::SK_Broadcast,        MVT::v4f32, 1 }, // shufps
1549     { TTI::SK_Reverse,          MVT::v4f32, 1 }, // shufps
1550     { TTI::SK_Select,           MVT::v4f32, 2 }, // 2*shufps
1551     { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1552     { TTI::SK_PermuteTwoSrc,    MVT::v4f32, 2 }, // 2*shufps
1553   };
1554 
1555   if (ST->hasSSE1())
1556     if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1557       return LT.first * Entry->Cost;
1558 
1559   return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1560 }
1561 
1562 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1563                                              Type *Src,
1564                                              TTI::CastContextHint CCH,
1565                                              TTI::TargetCostKind CostKind,
1566                                              const Instruction *I) {
1567   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1568   assert(ISD && "Invalid opcode");
1569 
1570   // TODO: Allow non-throughput costs that aren't binary.
1571   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1572     if (CostKind != TTI::TCK_RecipThroughput)
1573       return Cost == 0 ? 0 : 1;
1574     return Cost;
1575   };
1576 
1577   // The cost tables include both specific, custom (non-legal) src/dst type
1578   // conversions and generic, legalized types. We test for customs first, before
1579   // falling back to legalization.
1580   // FIXME: Need a better design of the cost table to handle non-simple types of
1581   // potential massive combinations (elem_num x src_type x dst_type).
1582   static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1583     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1584     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1585 
1586     // Mask sign extend has an instruction.
1587     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1588     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1589     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1590     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1591     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1592     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1593     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1594     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1595     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1596     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1597     { ISD::SIGN_EXTEND, MVT::v64i8,  MVT::v64i1, 1 },
1598 
1599     // Mask zero extend is a sext + shift.
1600     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1601     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1602     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1603     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1604     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1605     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1606     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1607     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1608     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1609     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1610     { ISD::ZERO_EXTEND, MVT::v64i8,  MVT::v64i1, 2 },
1611 
1612     { ISD::TRUNCATE,    MVT::v32i8,  MVT::v32i16, 2 },
1613     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // widen to zmm
1614     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // widen to zmm
1615     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // widen to zmm
1616     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  2 }, // vpmovwb
1617     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // widen to zmm
1618     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // widen to zmm
1619     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  2 }, // vpmovwb
1620     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // widen to zmm
1621     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // widen to zmm
1622     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  2 }, // vpmovwb
1623     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // widen to zmm
1624     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // widen to zmm
1625     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // widen to zmm
1626     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i16, 2 },
1627     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v64i8,  2 },
1628   };
1629 
1630   static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1631     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1632     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1633 
1634     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1635     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1636 
1637     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f32,  1 },
1638     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f64,  1 },
1639 
1640     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f32,  1 },
1641     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f64,  1 },
1642   };
1643 
1644   // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1645   // 256-bit wide vectors.
1646 
1647   static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1648     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
1649     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
1650     { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
1651 
1652     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1653     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1654     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1655     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  3 }, // sext+vpslld+vptestmd
1656     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1657     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1658     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1659     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1660     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // zmm vpslld+vptestmd
1661     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // zmm vpslld+vptestmd
1662     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // zmm vpslld+vptestmd
1663     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i32, 2 }, // vpslld+vptestmd
1664     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // zmm vpsllq+vptestmq
1665     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // zmm vpsllq+vptestmq
1666     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i64,  2 }, // vpsllq+vptestmq
1667     { ISD::TRUNCATE,  MVT::v2i8,    MVT::v2i32,  2 }, // vpmovdb
1668     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i32,  2 }, // vpmovdb
1669     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 2 }, // vpmovdb
1670     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 2 }, // vpmovdw
1671     { ISD::TRUNCATE,  MVT::v32i16,  MVT::v16i32, 2 }, // vpmovdw
1672     { ISD::TRUNCATE,  MVT::v2i8,    MVT::v2i64,  2 }, // vpmovqb
1673     { ISD::TRUNCATE,  MVT::v2i16,   MVT::v2i64,  1 }, // vpshufb
1674     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i64,  2 }, // vpmovqb
1675     { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  2 }, // vpmovqw
1676     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v8i64,  2 }, // vpmovqw
1677     { ISD::TRUNCATE,  MVT::v32i16,  MVT::v8i64,  2 }, // vpmovqw
1678     { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 }, // vpmovqd
1679     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // zmm vpmovqd
1680     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1681 
1682     { ISD::TRUNCATE,  MVT::v16i8,  MVT::v16i16,  3 }, // extend to v16i32
1683     { ISD::TRUNCATE,  MVT::v32i8,  MVT::v32i16,  8 },
1684 
1685     // Sign extend is zmm vpternlogd+vptruncdb.
1686     // Zero extend is zmm broadcast load+vptruncdw.
1687     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   3 },
1688     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   4 },
1689     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   3 },
1690     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   4 },
1691     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   3 },
1692     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   4 },
1693     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  3 },
1694     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  4 },
1695 
1696     // Sign extend is zmm vpternlogd+vptruncdw.
1697     // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1698     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   3 },
1699     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1700     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   3 },
1701     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1702     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   3 },
1703     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1704     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  3 },
1705     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
1706 
1707     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // zmm vpternlogd
1708     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // zmm vpternlogd+psrld
1709     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // zmm vpternlogd
1710     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // zmm vpternlogd+psrld
1711     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // zmm vpternlogd
1712     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // zmm vpternlogd+psrld
1713     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // zmm vpternlogq
1714     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // zmm vpternlogq+psrlq
1715     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // zmm vpternlogq
1716     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // zmm vpternlogq+psrlq
1717 
1718     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  1 }, // vpternlogd
1719     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 }, // vpternlogd+psrld
1720     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i1,   1 }, // vpternlogq
1721     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i1,   2 }, // vpternlogq+psrlq
1722 
1723     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1724     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1725     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1726     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1727     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1728     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1729     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1730     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1731     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1732     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1733 
1734     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8,  3 }, // FIXME: May not be right
1735     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8,  3 }, // FIXME: May not be right
1736 
1737     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1738     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1739     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v16i8,  2 },
1740     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  1 },
1741     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1742     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 1 },
1743     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1744     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1745 
1746     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1747     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1748     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v16i8,  2 },
1749     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i8,  1 },
1750     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1751     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 1 },
1752     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1753     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1754     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64, 26 },
1755     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  5 },
1756 
1757     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 2 },
1758     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f64, 7 },
1759     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v32f64,15 },
1760     { ISD::FP_TO_SINT,  MVT::v64i8,  MVT::v64f32,11 },
1761     { ISD::FP_TO_SINT,  MVT::v64i8,  MVT::v64f64,31 },
1762     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f64,  3 },
1763     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f64, 7 },
1764     { ISD::FP_TO_SINT,  MVT::v32i16, MVT::v32f32, 5 },
1765     { ISD::FP_TO_SINT,  MVT::v32i16, MVT::v32f64,15 },
1766     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  1 },
1767     { ISD::FP_TO_SINT,  MVT::v16i32, MVT::v16f64, 3 },
1768 
1769     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1770     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f64,  3 },
1771     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f64,  3 },
1772     { ISD::FP_TO_UINT,  MVT::v16i32, MVT::v16f32, 1 },
1773     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 3 },
1774     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v16f32, 3 },
1775   };
1776 
1777   static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1778     // Mask sign extend has an instruction.
1779     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,  1 },
1780     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,  1 },
1781     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,  1 },
1782     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,  1 },
1783     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,  1 },
1784     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,  1 },
1785     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 1 },
1786     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1787     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1, 1 },
1788 
1789     // Mask zero extend is a sext + shift.
1790     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,  2 },
1791     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,  2 },
1792     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,  2 },
1793     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,  2 },
1794     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,  2 },
1795     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,  2 },
1796     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 2 },
1797     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1798     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1, 2 },
1799 
1800     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 },
1801     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 }, // vpsllw+vptestmb
1802     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // vpsllw+vptestmw
1803     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // vpsllw+vptestmb
1804     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 }, // vpsllw+vptestmw
1805     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 }, // vpsllw+vptestmb
1806     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 }, // vpsllw+vptestmw
1807     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 }, // vpsllw+vptestmb
1808     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 }, // vpsllw+vptestmw
1809     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 }, // vpsllw+vptestmb
1810   };
1811 
1812   static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1813     { ISD::SINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1814     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1815     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1816     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1817 
1818     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1819     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1820     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1821     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1822 
1823     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v4f32,  1 },
1824     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f32,  1 },
1825     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f64,  1 },
1826     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f64,  1 },
1827 
1828     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v4f32,  1 },
1829     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f32,  1 },
1830     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f64,  1 },
1831     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f64,  1 },
1832   };
1833 
1834   static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1835     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1836     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1837     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1838     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  8 }, // split+2*v8i8
1839     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1840     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1841     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1842     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 8 }, // split+2*v8i16
1843     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // vpslld+vptestmd
1844     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // vpslld+vptestmd
1845     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // vpslld+vptestmd
1846     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // vpsllq+vptestmq
1847     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // vpsllq+vptestmq
1848     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // vpmovqd
1849     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i64,  2 }, // vpmovqb
1850     { ISD::TRUNCATE,  MVT::v4i16,   MVT::v4i64,  2 }, // vpmovqw
1851     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i32,  2 }, // vpmovwb
1852 
1853     // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1854     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1855     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   5 },
1856     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   6 },
1857     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   5 },
1858     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   6 },
1859     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   5 },
1860     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   6 },
1861     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 10 },
1862     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 12 },
1863 
1864     // sign extend is vpcmpeq+maskedmove+vpmovdw
1865     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1866     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1867     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   5 },
1868     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1869     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   5 },
1870     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1871     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   5 },
1872     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1873     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1874 
1875     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // vpternlogd
1876     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // vpternlogd+psrld
1877     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // vpternlogd
1878     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // vpternlogd+psrld
1879     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // vpternlogd
1880     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // vpternlogd+psrld
1881     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // vpternlogq
1882     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // vpternlogq+psrlq
1883     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // vpternlogq
1884     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // vpternlogq+psrlq
1885 
1886     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  1 },
1887     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  1 },
1888     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  1 },
1889     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  1 },
1890     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1891     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
1892     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  1 },
1893     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  1 },
1894     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1895     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
1896     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1897     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
1898 
1899     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
1900     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  1 },
1901     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
1902     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  1 },
1903 
1904     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
1905     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
1906     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
1907     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  1 },
1908     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
1909     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  1 },
1910     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  1 },
1911     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
1912     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
1913     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
1914     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  5 },
1915     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
1916     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  5 },
1917 
1918     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v8f32,  2 },
1919     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 2 },
1920     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v32f32, 5 },
1921 
1922     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    1 },
1923     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    1 },
1924     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  1 },
1925     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  1 },
1926     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  1 },
1927     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  1 },
1928     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1929   };
1930 
1931   static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1932     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1933     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
1934     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1935     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
1936     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1937     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1938 
1939     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  2 },
1940     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  2 },
1941     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  2 },
1942     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  2 },
1943     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1944     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
1945     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  2 },
1946     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  2 },
1947     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1948     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
1949     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1950     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1951     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  2 },
1952     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  2 },
1953 
1954     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
1955 
1956     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 4 },
1957     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 4 },
1958     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i16,  1 },
1959     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  1 },
1960     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  1 },
1961     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i32,  4 },
1962     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i64,  4 },
1963     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  1 },
1964     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v2i64,  1 },
1965     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i64,  5 },
1966     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  1 },
1967     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
1968 
1969     { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
1970     { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
1971 
1972     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v8f32,  1 },
1973     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f64,  1 },
1974     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f32,  1 },
1975     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  3 },
1976 
1977     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    3 },
1978     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    3 },
1979     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v8f32,  1 },
1980     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  3 },
1981     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
1982     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  4 },
1983     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  3 },
1984     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v4f64,  4 },
1985 
1986     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  2 },
1987     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  2 },
1988     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  2 },
1989     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
1990     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
1991     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
1992     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  3 },
1993 
1994     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  2 },
1995     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  2 },
1996     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  2 },
1997     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
1998     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  2 },
1999     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  1 },
2000     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  2 },
2001     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2002     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  2 },
2003     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  4 },
2004   };
2005 
2006   static const TypeConversionCostTblEntry AVXConversionTbl[] = {
2007     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   6 },
2008     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   4 },
2009     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   7 },
2010     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   4 },
2011     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
2012     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
2013 
2014     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  3 },
2015     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  3 },
2016     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  3 },
2017     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  3 },
2018     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2019     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2020     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  3 },
2021     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  3 },
2022     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2023     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2024     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2025     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2026 
2027     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i64,  4 },
2028     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  5 },
2029     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 4 },
2030     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i64,  9 },
2031     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i64, 11 },
2032 
2033     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 6 },
2034     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 6 },
2035     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // and+extract+packuswb
2036     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i32,  5 },
2037     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2038     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i64,  5 },
2039     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i64,  3 }, // and+extract+2*packusdw
2040     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
2041 
2042     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i1,   3 },
2043     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i1,   3 },
2044     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i1,   8 },
2045     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  4 },
2046     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v16i8,  2 },
2047     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  4 },
2048     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v8i16,  2 },
2049     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2050     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  2 },
2051     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  4 },
2052     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v2i64,  5 },
2053     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  8 },
2054 
2055     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i1,   7 },
2056     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i1,   7 },
2057     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i1,   6 },
2058     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  4 },
2059     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v16i8,  2 },
2060     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  4 },
2061     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v8i16,  2 },
2062     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  4 },
2063     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  4 },
2064     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  5 },
2065     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  6 },
2066     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  8 },
2067     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32, 10 },
2068     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64, 10 },
2069     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64, 18 },
2070     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
2071     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64, 10 },
2072 
2073     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v8f32,  2 },
2074     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f64,  2 },
2075     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v8f32,  2 },
2076     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v4f64,  2 },
2077     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f32,  2 },
2078     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f64,  2 },
2079     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v8f32,  2 },
2080     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v4f64,  2 },
2081     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f64,  2 },
2082     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f32,  2 },
2083     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  5 },
2084 
2085     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v8f32,  2 },
2086     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f64,  2 },
2087     { ISD::FP_TO_UINT,  MVT::v32i8,  MVT::v8f32,  2 },
2088     { ISD::FP_TO_UINT,  MVT::v32i8,  MVT::v4f64,  2 },
2089     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f32,  2 },
2090     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f64,  2 },
2091     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v8f32,  2 },
2092     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v4f64,  2 },
2093     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  3 },
2094     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2095     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  6 },
2096     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  7 },
2097     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v4f64,  7 },
2098 
2099     { ISD::FP_EXTEND,   MVT::v4f64,  MVT::v4f32,  1 },
2100     { ISD::FP_ROUND,    MVT::v4f32,  MVT::v4f64,  1 },
2101   };
2102 
2103   static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2104     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8,   1 },
2105     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8,   1 },
2106     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8,   1 },
2107     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8,   1 },
2108     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8,   1 },
2109     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8,   1 },
2110     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16,   1 },
2111     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16,   1 },
2112     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16,   1 },
2113     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16,   1 },
2114     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32,   1 },
2115     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32,   1 },
2116 
2117     // These truncates end up widening elements.
2118     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   1 }, // PMOVXZBQ
2119     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  1 }, // PMOVXZWQ
2120     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   1 }, // PMOVXZBD
2121 
2122     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  2 },
2123     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  2 },
2124     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  2 },
2125 
2126     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i32,    1 },
2127     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i32,    1 },
2128     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
2129     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
2130     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  1 },
2131     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2132     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  1 },
2133     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2134     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
2135     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  1 },
2136     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2137 
2138     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i32,    1 },
2139     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i32,    1 },
2140     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    4 },
2141     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    4 },
2142     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  1 },
2143     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2144     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  1 },
2145     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2146     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  3 },
2147     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  3 },
2148     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  2 },
2149     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v2i64, 12 },
2150     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64, 22 },
2151     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  4 },
2152 
2153     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f32,    1 },
2154     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f32,    1 },
2155     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f64,    1 },
2156     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f64,    1 },
2157     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f32,  2 },
2158     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v2f64,  2 },
2159     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f32,  1 },
2160     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v2f64,  1 },
2161     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f32,  1 },
2162     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v2f64,  1 },
2163 
2164     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f32,    1 },
2165     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2166     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f64,    1 },
2167     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    4 },
2168     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f32,  2 },
2169     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v2f64,  2 },
2170     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f32,  1 },
2171     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v2f64,  1 },
2172     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  4 },
2173     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2174   };
2175 
2176   static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2177     // These are somewhat magic numbers justified by comparing the
2178     // output of llvm-mca for our various supported scheduler models
2179     // and basing it off the worst case scenario.
2180     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i32,    3 },
2181     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i32,    3 },
2182     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i64,    3 },
2183     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i64,    3 },
2184     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  3 },
2185     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  4 },
2186     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  3 },
2187     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  4 },
2188     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  3 },
2189     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  4 },
2190     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v2i64,  8 },
2191     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  8 },
2192 
2193     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i32,    3 },
2194     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i32,    3 },
2195     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    8 },
2196     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    9 },
2197     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  4 },
2198     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  4 },
2199     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  4 },
2200     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  4 },
2201     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  7 },
2202     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  7 },
2203     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  5 },
2204     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64, 15 },
2205     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v2i64, 18 },
2206 
2207     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f32,    4 },
2208     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f32,    4 },
2209     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f64,    4 },
2210     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f64,    4 },
2211     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f32,  6 },
2212     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v2f64,  6 },
2213     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f32,  5 },
2214     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v2f64,  5 },
2215     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f32,  4 },
2216     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v2f64,  4 },
2217 
2218     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f32,    4 },
2219     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2220     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f64,    4 },
2221     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,   15 },
2222     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f32,  6 },
2223     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v2f64,  6 },
2224     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f32,  5 },
2225     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v2f64,  5 },
2226     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  8 },
2227     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  8 },
2228 
2229     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v16i8,  4 },
2230     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v16i8,  4 },
2231     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v16i8,  2 },
2232     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v16i8,  3 },
2233     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v16i8,  1 },
2234     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v16i8,  2 },
2235     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v8i16,  2 },
2236     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v8i16,  3 },
2237     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v8i16,  1 },
2238     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v8i16,  2 },
2239     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v4i32,  1 },
2240     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v4i32,  2 },
2241 
2242     // These truncates are really widening elements.
2243     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i32,  1 }, // PSHUFD
2244     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // PUNPCKLWD+DQ
2245     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   3 }, // PUNPCKLBW+WD+PSHUFD
2246     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  1 }, // PUNPCKLWD
2247     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // PUNPCKLBW+WD
2248     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   1 }, // PUNPCKLBW
2249 
2250     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i16,  2 }, // PAND+PACKUSWB
2251     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 3 },
2252     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  3 }, // PAND+2*PACKUSWB
2253     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 7 },
2254     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i32,  1 },
2255     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  3 },
2256     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2257     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32,10 },
2258     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  4 }, // PAND+3*PACKUSWB
2259     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v2i64,  2 }, // PSHUFD+PSHUFLW
2260     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v2i64,  1 }, // PSHUFD
2261   };
2262 
2263   // Attempt to map directly to (simple) MVT types to let us match custom entries.
2264   EVT SrcTy = TLI->getValueType(DL, Src);
2265   EVT DstTy = TLI->getValueType(DL, Dst);
2266 
2267   // The function getSimpleVT only handles simple value types.
2268   if (SrcTy.isSimple() && DstTy.isSimple()) {
2269     MVT SimpleSrcTy = SrcTy.getSimpleVT();
2270     MVT SimpleDstTy = DstTy.getSimpleVT();
2271 
2272     if (ST->useAVX512Regs()) {
2273       if (ST->hasBWI())
2274         if (const auto *Entry = ConvertCostTableLookup(
2275                 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2276           return AdjustCost(Entry->Cost);
2277 
2278       if (ST->hasDQI())
2279         if (const auto *Entry = ConvertCostTableLookup(
2280                 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2281           return AdjustCost(Entry->Cost);
2282 
2283       if (ST->hasAVX512())
2284         if (const auto *Entry = ConvertCostTableLookup(
2285                 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2286           return AdjustCost(Entry->Cost);
2287     }
2288 
2289     if (ST->hasBWI())
2290       if (const auto *Entry = ConvertCostTableLookup(
2291               AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2292         return AdjustCost(Entry->Cost);
2293 
2294     if (ST->hasDQI())
2295       if (const auto *Entry = ConvertCostTableLookup(
2296               AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2297         return AdjustCost(Entry->Cost);
2298 
2299     if (ST->hasAVX512())
2300       if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2301                                                      SimpleDstTy, SimpleSrcTy))
2302         return AdjustCost(Entry->Cost);
2303 
2304     if (ST->hasAVX2()) {
2305       if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2306                                                      SimpleDstTy, SimpleSrcTy))
2307         return AdjustCost(Entry->Cost);
2308     }
2309 
2310     if (ST->hasAVX()) {
2311       if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2312                                                      SimpleDstTy, SimpleSrcTy))
2313         return AdjustCost(Entry->Cost);
2314     }
2315 
2316     if (ST->hasSSE41()) {
2317       if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2318                                                      SimpleDstTy, SimpleSrcTy))
2319         return AdjustCost(Entry->Cost);
2320     }
2321 
2322     if (ST->hasSSE2()) {
2323       if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2324                                                      SimpleDstTy, SimpleSrcTy))
2325         return AdjustCost(Entry->Cost);
2326     }
2327   }
2328 
2329   // Fall back to legalized types.
2330   std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2331   std::pair<InstructionCost, MVT> LTDest =
2332       TLI->getTypeLegalizationCost(DL, Dst);
2333 
2334   if (ST->useAVX512Regs()) {
2335     if (ST->hasBWI())
2336       if (const auto *Entry = ConvertCostTableLookup(
2337               AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2338         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2339 
2340     if (ST->hasDQI())
2341       if (const auto *Entry = ConvertCostTableLookup(
2342               AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2343         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2344 
2345     if (ST->hasAVX512())
2346       if (const auto *Entry = ConvertCostTableLookup(
2347               AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
2348         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2349   }
2350 
2351   if (ST->hasBWI())
2352     if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2353                                                    LTDest.second, LTSrc.second))
2354       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2355 
2356   if (ST->hasDQI())
2357     if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2358                                                    LTDest.second, LTSrc.second))
2359       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2360 
2361   if (ST->hasAVX512())
2362     if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2363                                                    LTDest.second, LTSrc.second))
2364       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2365 
2366   if (ST->hasAVX2())
2367     if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2368                                                    LTDest.second, LTSrc.second))
2369       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2370 
2371   if (ST->hasAVX())
2372     if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2373                                                    LTDest.second, LTSrc.second))
2374       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2375 
2376   if (ST->hasSSE41())
2377     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2378                                                    LTDest.second, LTSrc.second))
2379       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2380 
2381   if (ST->hasSSE2())
2382     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2383                                                    LTDest.second, LTSrc.second))
2384       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2385 
2386   // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2387   // sitofp.
2388   if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
2389       1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
2390     Type *ExtSrc = Src->getWithNewBitWidth(32);
2391     unsigned ExtOpc =
2392         (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
2393 
2394     // For scalar loads the extend would be free.
2395     InstructionCost ExtCost = 0;
2396     if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
2397       ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
2398 
2399     return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
2400                                       TTI::CastContextHint::None, CostKind);
2401   }
2402 
2403   // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
2404   // i32.
2405   if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
2406       1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
2407     Type *TruncDst = Dst->getWithNewBitWidth(32);
2408     return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
2409            getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
2410                             TTI::CastContextHint::None, CostKind);
2411   }
2412 
2413   return AdjustCost(
2414       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2415 }
2416 
2417 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2418                                                Type *CondTy,
2419                                                CmpInst::Predicate VecPred,
2420                                                TTI::TargetCostKind CostKind,
2421                                                const Instruction *I) {
2422   // TODO: Handle other cost kinds.
2423   if (CostKind != TTI::TCK_RecipThroughput)
2424     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2425                                      I);
2426 
2427   // Legalize the type.
2428   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2429 
2430   MVT MTy = LT.second;
2431 
2432   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2433   assert(ISD && "Invalid opcode");
2434 
2435   unsigned ExtraCost = 0;
2436   if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
2437     // Some vector comparison predicates cost extra instructions.
2438     // TODO: Should we invert this and assume worst case cmp costs
2439     // and reduce for particular predicates?
2440     if (MTy.isVector() &&
2441         !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2442           (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2443           ST->hasBWI())) {
2444       // Fallback to I if a specific predicate wasn't specified.
2445       CmpInst::Predicate Pred = VecPred;
2446       if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE ||
2447                 Pred == CmpInst::BAD_FCMP_PREDICATE))
2448         Pred = cast<CmpInst>(I)->getPredicate();
2449 
2450       switch (Pred) {
2451       case CmpInst::Predicate::ICMP_NE:
2452         // xor(cmpeq(x,y),-1)
2453         ExtraCost = 1;
2454         break;
2455       case CmpInst::Predicate::ICMP_SGE:
2456       case CmpInst::Predicate::ICMP_SLE:
2457         // xor(cmpgt(x,y),-1)
2458         ExtraCost = 1;
2459         break;
2460       case CmpInst::Predicate::ICMP_ULT:
2461       case CmpInst::Predicate::ICMP_UGT:
2462         // cmpgt(xor(x,signbit),xor(y,signbit))
2463         // xor(cmpeq(pmaxu(x,y),x),-1)
2464         ExtraCost = 2;
2465         break;
2466       case CmpInst::Predicate::ICMP_ULE:
2467       case CmpInst::Predicate::ICMP_UGE:
2468         if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2469             (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2470           // cmpeq(psubus(x,y),0)
2471           // cmpeq(pminu(x,y),x)
2472           ExtraCost = 1;
2473         } else {
2474           // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2475           ExtraCost = 3;
2476         }
2477         break;
2478       case CmpInst::Predicate::BAD_ICMP_PREDICATE:
2479       case CmpInst::Predicate::BAD_FCMP_PREDICATE:
2480         // Assume worst case scenario and add the maximum extra cost.
2481         ExtraCost = 3;
2482         break;
2483       default:
2484         break;
2485       }
2486     }
2487   }
2488 
2489   static const CostTblEntry SLMCostTbl[] = {
2490     // slm pcmpeq/pcmpgt throughput is 2
2491     { ISD::SETCC,   MVT::v2i64,   2 },
2492   };
2493 
2494   static const CostTblEntry AVX512BWCostTbl[] = {
2495     { ISD::SETCC,   MVT::v32i16,  1 },
2496     { ISD::SETCC,   MVT::v64i8,   1 },
2497 
2498     { ISD::SELECT,  MVT::v32i16,  1 },
2499     { ISD::SELECT,  MVT::v64i8,   1 },
2500   };
2501 
2502   static const CostTblEntry AVX512CostTbl[] = {
2503     { ISD::SETCC,   MVT::v8i64,   1 },
2504     { ISD::SETCC,   MVT::v16i32,  1 },
2505     { ISD::SETCC,   MVT::v8f64,   1 },
2506     { ISD::SETCC,   MVT::v16f32,  1 },
2507 
2508     { ISD::SELECT,  MVT::v8i64,   1 },
2509     { ISD::SELECT,  MVT::v16i32,  1 },
2510     { ISD::SELECT,  MVT::v8f64,   1 },
2511     { ISD::SELECT,  MVT::v16f32,  1 },
2512 
2513     { ISD::SETCC,   MVT::v32i16,  2 }, // FIXME: should probably be 4
2514     { ISD::SETCC,   MVT::v64i8,   2 }, // FIXME: should probably be 4
2515 
2516     { ISD::SELECT,  MVT::v32i16,  2 }, // FIXME: should be 3
2517     { ISD::SELECT,  MVT::v64i8,   2 }, // FIXME: should be 3
2518   };
2519 
2520   static const CostTblEntry AVX2CostTbl[] = {
2521     { ISD::SETCC,   MVT::v4i64,   1 },
2522     { ISD::SETCC,   MVT::v8i32,   1 },
2523     { ISD::SETCC,   MVT::v16i16,  1 },
2524     { ISD::SETCC,   MVT::v32i8,   1 },
2525 
2526     { ISD::SELECT,  MVT::v4i64,   1 }, // pblendvb
2527     { ISD::SELECT,  MVT::v8i32,   1 }, // pblendvb
2528     { ISD::SELECT,  MVT::v16i16,  1 }, // pblendvb
2529     { ISD::SELECT,  MVT::v32i8,   1 }, // pblendvb
2530   };
2531 
2532   static const CostTblEntry AVX1CostTbl[] = {
2533     { ISD::SETCC,   MVT::v4f64,   1 },
2534     { ISD::SETCC,   MVT::v8f32,   1 },
2535     // AVX1 does not support 8-wide integer compare.
2536     { ISD::SETCC,   MVT::v4i64,   4 },
2537     { ISD::SETCC,   MVT::v8i32,   4 },
2538     { ISD::SETCC,   MVT::v16i16,  4 },
2539     { ISD::SETCC,   MVT::v32i8,   4 },
2540 
2541     { ISD::SELECT,  MVT::v4f64,   1 }, // vblendvpd
2542     { ISD::SELECT,  MVT::v8f32,   1 }, // vblendvps
2543     { ISD::SELECT,  MVT::v4i64,   1 }, // vblendvpd
2544     { ISD::SELECT,  MVT::v8i32,   1 }, // vblendvps
2545     { ISD::SELECT,  MVT::v16i16,  3 }, // vandps + vandnps + vorps
2546     { ISD::SELECT,  MVT::v32i8,   3 }, // vandps + vandnps + vorps
2547   };
2548 
2549   static const CostTblEntry SSE42CostTbl[] = {
2550     { ISD::SETCC,   MVT::v2f64,   1 },
2551     { ISD::SETCC,   MVT::v4f32,   1 },
2552     { ISD::SETCC,   MVT::v2i64,   1 },
2553   };
2554 
2555   static const CostTblEntry SSE41CostTbl[] = {
2556     { ISD::SELECT,  MVT::v2f64,   1 }, // blendvpd
2557     { ISD::SELECT,  MVT::v4f32,   1 }, // blendvps
2558     { ISD::SELECT,  MVT::v2i64,   1 }, // pblendvb
2559     { ISD::SELECT,  MVT::v4i32,   1 }, // pblendvb
2560     { ISD::SELECT,  MVT::v8i16,   1 }, // pblendvb
2561     { ISD::SELECT,  MVT::v16i8,   1 }, // pblendvb
2562   };
2563 
2564   static const CostTblEntry SSE2CostTbl[] = {
2565     { ISD::SETCC,   MVT::v2f64,   2 },
2566     { ISD::SETCC,   MVT::f64,     1 },
2567     { ISD::SETCC,   MVT::v2i64,   8 },
2568     { ISD::SETCC,   MVT::v4i32,   1 },
2569     { ISD::SETCC,   MVT::v8i16,   1 },
2570     { ISD::SETCC,   MVT::v16i8,   1 },
2571 
2572     { ISD::SELECT,  MVT::v2f64,   3 }, // andpd + andnpd + orpd
2573     { ISD::SELECT,  MVT::v2i64,   3 }, // pand + pandn + por
2574     { ISD::SELECT,  MVT::v4i32,   3 }, // pand + pandn + por
2575     { ISD::SELECT,  MVT::v8i16,   3 }, // pand + pandn + por
2576     { ISD::SELECT,  MVT::v16i8,   3 }, // pand + pandn + por
2577   };
2578 
2579   static const CostTblEntry SSE1CostTbl[] = {
2580     { ISD::SETCC,   MVT::v4f32,   2 },
2581     { ISD::SETCC,   MVT::f32,     1 },
2582 
2583     { ISD::SELECT,  MVT::v4f32,   3 }, // andps + andnps + orps
2584   };
2585 
2586   if (ST->useSLMArithCosts())
2587     if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2588       return LT.first * (ExtraCost + Entry->Cost);
2589 
2590   if (ST->hasBWI())
2591     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2592       return LT.first * (ExtraCost + Entry->Cost);
2593 
2594   if (ST->hasAVX512())
2595     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2596       return LT.first * (ExtraCost + Entry->Cost);
2597 
2598   if (ST->hasAVX2())
2599     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2600       return LT.first * (ExtraCost + Entry->Cost);
2601 
2602   if (ST->hasAVX())
2603     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2604       return LT.first * (ExtraCost + Entry->Cost);
2605 
2606   if (ST->hasSSE42())
2607     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2608       return LT.first * (ExtraCost + Entry->Cost);
2609 
2610   if (ST->hasSSE41())
2611     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2612       return LT.first * (ExtraCost + Entry->Cost);
2613 
2614   if (ST->hasSSE2())
2615     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2616       return LT.first * (ExtraCost + Entry->Cost);
2617 
2618   if (ST->hasSSE1())
2619     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2620       return LT.first * (ExtraCost + Entry->Cost);
2621 
2622   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2623 }
2624 
2625 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2626 
2627 InstructionCost
2628 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2629                                            TTI::TargetCostKind CostKind) {
2630 
2631   // Costs should match the codegen from:
2632   // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2633   // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2634   // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2635   // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2636   // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2637 
2638   // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2639   //       specialized in these tables yet.
2640   static const CostTblEntry AVX512BITALGCostTbl[] = {
2641     { ISD::CTPOP,      MVT::v32i16,  1 },
2642     { ISD::CTPOP,      MVT::v64i8,   1 },
2643     { ISD::CTPOP,      MVT::v16i16,  1 },
2644     { ISD::CTPOP,      MVT::v32i8,   1 },
2645     { ISD::CTPOP,      MVT::v8i16,   1 },
2646     { ISD::CTPOP,      MVT::v16i8,   1 },
2647   };
2648   static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = {
2649     { ISD::CTPOP,      MVT::v8i64,   1 },
2650     { ISD::CTPOP,      MVT::v16i32,  1 },
2651     { ISD::CTPOP,      MVT::v4i64,   1 },
2652     { ISD::CTPOP,      MVT::v8i32,   1 },
2653     { ISD::CTPOP,      MVT::v2i64,   1 },
2654     { ISD::CTPOP,      MVT::v4i32,   1 },
2655   };
2656   static const CostTblEntry AVX512CDCostTbl[] = {
2657     { ISD::CTLZ,       MVT::v8i64,   1 },
2658     { ISD::CTLZ,       MVT::v16i32,  1 },
2659     { ISD::CTLZ,       MVT::v32i16,  8 },
2660     { ISD::CTLZ,       MVT::v64i8,  20 },
2661     { ISD::CTLZ,       MVT::v4i64,   1 },
2662     { ISD::CTLZ,       MVT::v8i32,   1 },
2663     { ISD::CTLZ,       MVT::v16i16,  4 },
2664     { ISD::CTLZ,       MVT::v32i8,  10 },
2665     { ISD::CTLZ,       MVT::v2i64,   1 },
2666     { ISD::CTLZ,       MVT::v4i32,   1 },
2667     { ISD::CTLZ,       MVT::v8i16,   4 },
2668     { ISD::CTLZ,       MVT::v16i8,   4 },
2669   };
2670   static const CostTblEntry AVX512BWCostTbl[] = {
2671     { ISD::ABS,        MVT::v32i16,  1 },
2672     { ISD::ABS,        MVT::v64i8,   1 },
2673     { ISD::BITREVERSE, MVT::v8i64,   3 },
2674     { ISD::BITREVERSE, MVT::v16i32,  3 },
2675     { ISD::BITREVERSE, MVT::v32i16,  3 },
2676     { ISD::BITREVERSE, MVT::v64i8,   2 },
2677     { ISD::BSWAP,      MVT::v8i64,   1 },
2678     { ISD::BSWAP,      MVT::v16i32,  1 },
2679     { ISD::BSWAP,      MVT::v32i16,  1 },
2680     { ISD::CTLZ,       MVT::v8i64,  23 },
2681     { ISD::CTLZ,       MVT::v16i32, 22 },
2682     { ISD::CTLZ,       MVT::v32i16, 18 },
2683     { ISD::CTLZ,       MVT::v64i8,  17 },
2684     { ISD::CTPOP,      MVT::v8i64,   7 },
2685     { ISD::CTPOP,      MVT::v16i32, 11 },
2686     { ISD::CTPOP,      MVT::v32i16,  9 },
2687     { ISD::CTPOP,      MVT::v64i8,   6 },
2688     { ISD::CTTZ,       MVT::v8i64,  10 },
2689     { ISD::CTTZ,       MVT::v16i32, 14 },
2690     { ISD::CTTZ,       MVT::v32i16, 12 },
2691     { ISD::CTTZ,       MVT::v64i8,   9 },
2692     { ISD::SADDSAT,    MVT::v32i16,  1 },
2693     { ISD::SADDSAT,    MVT::v64i8,   1 },
2694     { ISD::SMAX,       MVT::v32i16,  1 },
2695     { ISD::SMAX,       MVT::v64i8,   1 },
2696     { ISD::SMIN,       MVT::v32i16,  1 },
2697     { ISD::SMIN,       MVT::v64i8,   1 },
2698     { ISD::SSUBSAT,    MVT::v32i16,  1 },
2699     { ISD::SSUBSAT,    MVT::v64i8,   1 },
2700     { ISD::UADDSAT,    MVT::v32i16,  1 },
2701     { ISD::UADDSAT,    MVT::v64i8,   1 },
2702     { ISD::UMAX,       MVT::v32i16,  1 },
2703     { ISD::UMAX,       MVT::v64i8,   1 },
2704     { ISD::UMIN,       MVT::v32i16,  1 },
2705     { ISD::UMIN,       MVT::v64i8,   1 },
2706     { ISD::USUBSAT,    MVT::v32i16,  1 },
2707     { ISD::USUBSAT,    MVT::v64i8,   1 },
2708   };
2709   static const CostTblEntry AVX512CostTbl[] = {
2710     { ISD::ABS,        MVT::v8i64,   1 },
2711     { ISD::ABS,        MVT::v16i32,  1 },
2712     { ISD::ABS,        MVT::v32i16,  2 },
2713     { ISD::ABS,        MVT::v64i8,   2 },
2714     { ISD::ABS,        MVT::v4i64,   1 },
2715     { ISD::ABS,        MVT::v2i64,   1 },
2716     { ISD::BITREVERSE, MVT::v8i64,  36 },
2717     { ISD::BITREVERSE, MVT::v16i32, 24 },
2718     { ISD::BITREVERSE, MVT::v32i16, 10 },
2719     { ISD::BITREVERSE, MVT::v64i8,  10 },
2720     { ISD::BSWAP,      MVT::v8i64,   4 },
2721     { ISD::BSWAP,      MVT::v16i32,  4 },
2722     { ISD::BSWAP,      MVT::v32i16,  4 },
2723     { ISD::CTLZ,       MVT::v8i64,  29 },
2724     { ISD::CTLZ,       MVT::v16i32, 35 },
2725     { ISD::CTLZ,       MVT::v32i16, 28 },
2726     { ISD::CTLZ,       MVT::v64i8,  18 },
2727     { ISD::CTPOP,      MVT::v8i64,  16 },
2728     { ISD::CTPOP,      MVT::v16i32, 24 },
2729     { ISD::CTPOP,      MVT::v32i16, 18 },
2730     { ISD::CTPOP,      MVT::v64i8,  12 },
2731     { ISD::CTTZ,       MVT::v8i64,  20 },
2732     { ISD::CTTZ,       MVT::v16i32, 28 },
2733     { ISD::CTTZ,       MVT::v32i16, 24 },
2734     { ISD::CTTZ,       MVT::v64i8,  18 },
2735     { ISD::SMAX,       MVT::v8i64,   1 },
2736     { ISD::SMAX,       MVT::v16i32,  1 },
2737     { ISD::SMAX,       MVT::v32i16,  2 },
2738     { ISD::SMAX,       MVT::v64i8,   2 },
2739     { ISD::SMAX,       MVT::v4i64,   1 },
2740     { ISD::SMAX,       MVT::v2i64,   1 },
2741     { ISD::SMIN,       MVT::v8i64,   1 },
2742     { ISD::SMIN,       MVT::v16i32,  1 },
2743     { ISD::SMIN,       MVT::v32i16,  2 },
2744     { ISD::SMIN,       MVT::v64i8,   2 },
2745     { ISD::SMIN,       MVT::v4i64,   1 },
2746     { ISD::SMIN,       MVT::v2i64,   1 },
2747     { ISD::UMAX,       MVT::v8i64,   1 },
2748     { ISD::UMAX,       MVT::v16i32,  1 },
2749     { ISD::UMAX,       MVT::v32i16,  2 },
2750     { ISD::UMAX,       MVT::v64i8,   2 },
2751     { ISD::UMAX,       MVT::v4i64,   1 },
2752     { ISD::UMAX,       MVT::v2i64,   1 },
2753     { ISD::UMIN,       MVT::v8i64,   1 },
2754     { ISD::UMIN,       MVT::v16i32,  1 },
2755     { ISD::UMIN,       MVT::v32i16,  2 },
2756     { ISD::UMIN,       MVT::v64i8,   2 },
2757     { ISD::UMIN,       MVT::v4i64,   1 },
2758     { ISD::UMIN,       MVT::v2i64,   1 },
2759     { ISD::USUBSAT,    MVT::v16i32,  2 }, // pmaxud + psubd
2760     { ISD::USUBSAT,    MVT::v2i64,   2 }, // pmaxuq + psubq
2761     { ISD::USUBSAT,    MVT::v4i64,   2 }, // pmaxuq + psubq
2762     { ISD::USUBSAT,    MVT::v8i64,   2 }, // pmaxuq + psubq
2763     { ISD::UADDSAT,    MVT::v16i32,  3 }, // not + pminud + paddd
2764     { ISD::UADDSAT,    MVT::v2i64,   3 }, // not + pminuq + paddq
2765     { ISD::UADDSAT,    MVT::v4i64,   3 }, // not + pminuq + paddq
2766     { ISD::UADDSAT,    MVT::v8i64,   3 }, // not + pminuq + paddq
2767     { ISD::SADDSAT,    MVT::v32i16,  2 },
2768     { ISD::SADDSAT,    MVT::v64i8,   2 },
2769     { ISD::SSUBSAT,    MVT::v32i16,  2 },
2770     { ISD::SSUBSAT,    MVT::v64i8,   2 },
2771     { ISD::UADDSAT,    MVT::v32i16,  2 },
2772     { ISD::UADDSAT,    MVT::v64i8,   2 },
2773     { ISD::USUBSAT,    MVT::v32i16,  2 },
2774     { ISD::USUBSAT,    MVT::v64i8,   2 },
2775     { ISD::FMAXNUM,    MVT::f32,     2 },
2776     { ISD::FMAXNUM,    MVT::v4f32,   2 },
2777     { ISD::FMAXNUM,    MVT::v8f32,   2 },
2778     { ISD::FMAXNUM,    MVT::v16f32,  2 },
2779     { ISD::FMAXNUM,    MVT::f64,     2 },
2780     { ISD::FMAXNUM,    MVT::v2f64,   2 },
2781     { ISD::FMAXNUM,    MVT::v4f64,   2 },
2782     { ISD::FMAXNUM,    MVT::v8f64,   2 },
2783   };
2784   static const CostTblEntry XOPCostTbl[] = {
2785     { ISD::BITREVERSE, MVT::v4i64,   4 },
2786     { ISD::BITREVERSE, MVT::v8i32,   4 },
2787     { ISD::BITREVERSE, MVT::v16i16,  4 },
2788     { ISD::BITREVERSE, MVT::v32i8,   4 },
2789     { ISD::BITREVERSE, MVT::v2i64,   1 },
2790     { ISD::BITREVERSE, MVT::v4i32,   1 },
2791     { ISD::BITREVERSE, MVT::v8i16,   1 },
2792     { ISD::BITREVERSE, MVT::v16i8,   1 },
2793     { ISD::BITREVERSE, MVT::i64,     3 },
2794     { ISD::BITREVERSE, MVT::i32,     3 },
2795     { ISD::BITREVERSE, MVT::i16,     3 },
2796     { ISD::BITREVERSE, MVT::i8,      3 }
2797   };
2798   static const CostTblEntry AVX2CostTbl[] = {
2799     { ISD::ABS,        MVT::v4i64,   2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2800     { ISD::ABS,        MVT::v8i32,   1 },
2801     { ISD::ABS,        MVT::v16i16,  1 },
2802     { ISD::ABS,        MVT::v32i8,   1 },
2803     { ISD::BITREVERSE, MVT::v2i64,   3 },
2804     { ISD::BITREVERSE, MVT::v4i64,   3 },
2805     { ISD::BITREVERSE, MVT::v4i32,   3 },
2806     { ISD::BITREVERSE, MVT::v8i32,   3 },
2807     { ISD::BITREVERSE, MVT::v8i16,   3 },
2808     { ISD::BITREVERSE, MVT::v16i16,  3 },
2809     { ISD::BITREVERSE, MVT::v16i8,   3 },
2810     { ISD::BITREVERSE, MVT::v32i8,   3 },
2811     { ISD::BSWAP,      MVT::v4i64,   1 },
2812     { ISD::BSWAP,      MVT::v8i32,   1 },
2813     { ISD::BSWAP,      MVT::v16i16,  1 },
2814     { ISD::CTLZ,       MVT::v2i64,   7 },
2815     { ISD::CTLZ,       MVT::v4i64,   7 },
2816     { ISD::CTLZ,       MVT::v4i32,   5 },
2817     { ISD::CTLZ,       MVT::v8i32,   5 },
2818     { ISD::CTLZ,       MVT::v8i16,   4 },
2819     { ISD::CTLZ,       MVT::v16i16,  4 },
2820     { ISD::CTLZ,       MVT::v16i8,   3 },
2821     { ISD::CTLZ,       MVT::v32i8,   3 },
2822     { ISD::CTPOP,      MVT::v2i64,   3 },
2823     { ISD::CTPOP,      MVT::v4i64,   3 },
2824     { ISD::CTPOP,      MVT::v4i32,   7 },
2825     { ISD::CTPOP,      MVT::v8i32,   7 },
2826     { ISD::CTPOP,      MVT::v8i16,   3 },
2827     { ISD::CTPOP,      MVT::v16i16,  3 },
2828     { ISD::CTPOP,      MVT::v16i8,   2 },
2829     { ISD::CTPOP,      MVT::v32i8,   2 },
2830     { ISD::CTTZ,       MVT::v2i64,   4 },
2831     { ISD::CTTZ,       MVT::v4i64,   4 },
2832     { ISD::CTTZ,       MVT::v4i32,   7 },
2833     { ISD::CTTZ,       MVT::v8i32,   7 },
2834     { ISD::CTTZ,       MVT::v8i16,   4 },
2835     { ISD::CTTZ,       MVT::v16i16,  4 },
2836     { ISD::CTTZ,       MVT::v16i8,   3 },
2837     { ISD::CTTZ,       MVT::v32i8,   3 },
2838     { ISD::SADDSAT,    MVT::v16i16,  1 },
2839     { ISD::SADDSAT,    MVT::v32i8,   1 },
2840     { ISD::SMAX,       MVT::v8i32,   1 },
2841     { ISD::SMAX,       MVT::v16i16,  1 },
2842     { ISD::SMAX,       MVT::v32i8,   1 },
2843     { ISD::SMIN,       MVT::v8i32,   1 },
2844     { ISD::SMIN,       MVT::v16i16,  1 },
2845     { ISD::SMIN,       MVT::v32i8,   1 },
2846     { ISD::SSUBSAT,    MVT::v16i16,  1 },
2847     { ISD::SSUBSAT,    MVT::v32i8,   1 },
2848     { ISD::UADDSAT,    MVT::v16i16,  1 },
2849     { ISD::UADDSAT,    MVT::v32i8,   1 },
2850     { ISD::UADDSAT,    MVT::v8i32,   3 }, // not + pminud + paddd
2851     { ISD::UMAX,       MVT::v8i32,   1 },
2852     { ISD::UMAX,       MVT::v16i16,  1 },
2853     { ISD::UMAX,       MVT::v32i8,   1 },
2854     { ISD::UMIN,       MVT::v8i32,   1 },
2855     { ISD::UMIN,       MVT::v16i16,  1 },
2856     { ISD::UMIN,       MVT::v32i8,   1 },
2857     { ISD::USUBSAT,    MVT::v16i16,  1 },
2858     { ISD::USUBSAT,    MVT::v32i8,   1 },
2859     { ISD::USUBSAT,    MVT::v8i32,   2 }, // pmaxud + psubd
2860     { ISD::FMAXNUM,    MVT::v8f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2861     { ISD::FMAXNUM,    MVT::v4f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2862     { ISD::FSQRT,      MVT::f32,     7 }, // Haswell from http://www.agner.org/
2863     { ISD::FSQRT,      MVT::v4f32,   7 }, // Haswell from http://www.agner.org/
2864     { ISD::FSQRT,      MVT::v8f32,  14 }, // Haswell from http://www.agner.org/
2865     { ISD::FSQRT,      MVT::f64,    14 }, // Haswell from http://www.agner.org/
2866     { ISD::FSQRT,      MVT::v2f64,  14 }, // Haswell from http://www.agner.org/
2867     { ISD::FSQRT,      MVT::v4f64,  28 }, // Haswell from http://www.agner.org/
2868   };
2869   static const CostTblEntry AVX1CostTbl[] = {
2870     { ISD::ABS,        MVT::v4i64,   5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2871     { ISD::ABS,        MVT::v8i32,   3 },
2872     { ISD::ABS,        MVT::v16i16,  3 },
2873     { ISD::ABS,        MVT::v32i8,   3 },
2874     { ISD::BITREVERSE, MVT::v4i64,  12 }, // 2 x 128-bit Op + extract/insert
2875     { ISD::BITREVERSE, MVT::v8i32,  12 }, // 2 x 128-bit Op + extract/insert
2876     { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2877     { ISD::BITREVERSE, MVT::v32i8,  12 }, // 2 x 128-bit Op + extract/insert
2878     { ISD::BSWAP,      MVT::v4i64,   4 },
2879     { ISD::BSWAP,      MVT::v8i32,   4 },
2880     { ISD::BSWAP,      MVT::v16i16,  4 },
2881     { ISD::CTLZ,       MVT::v4i64,  48 }, // 2 x 128-bit Op + extract/insert
2882     { ISD::CTLZ,       MVT::v8i32,  38 }, // 2 x 128-bit Op + extract/insert
2883     { ISD::CTLZ,       MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2884     { ISD::CTLZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2885     { ISD::CTPOP,      MVT::v4i64,  16 }, // 2 x 128-bit Op + extract/insert
2886     { ISD::CTPOP,      MVT::v8i32,  24 }, // 2 x 128-bit Op + extract/insert
2887     { ISD::CTPOP,      MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2888     { ISD::CTPOP,      MVT::v32i8,  14 }, // 2 x 128-bit Op + extract/insert
2889     { ISD::CTTZ,       MVT::v4i64,  22 }, // 2 x 128-bit Op + extract/insert
2890     { ISD::CTTZ,       MVT::v8i32,  30 }, // 2 x 128-bit Op + extract/insert
2891     { ISD::CTTZ,       MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2892     { ISD::CTTZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2893     { ISD::SADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2894     { ISD::SADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2895     { ISD::SMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2896     { ISD::SMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2897     { ISD::SMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2898     { ISD::SMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2899     { ISD::SMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2900     { ISD::SMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2901     { ISD::SSUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2902     { ISD::SSUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2903     { ISD::UADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2904     { ISD::UADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2905     { ISD::UADDSAT,    MVT::v8i32,   8 }, // 2 x 128-bit Op + extract/insert
2906     { ISD::UMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2907     { ISD::UMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2908     { ISD::UMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2909     { ISD::UMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
2910     { ISD::UMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2911     { ISD::UMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2912     { ISD::USUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
2913     { ISD::USUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
2914     { ISD::USUBSAT,    MVT::v8i32,   6 }, // 2 x 128-bit Op + extract/insert
2915     { ISD::FMAXNUM,    MVT::f32,     3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2916     { ISD::FMAXNUM,    MVT::v4f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2917     { ISD::FMAXNUM,    MVT::v8f32,   5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2918     { ISD::FMAXNUM,    MVT::f64,     3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2919     { ISD::FMAXNUM,    MVT::v2f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2920     { ISD::FMAXNUM,    MVT::v4f64,   5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2921     { ISD::FSQRT,      MVT::f32,    14 }, // SNB from http://www.agner.org/
2922     { ISD::FSQRT,      MVT::v4f32,  14 }, // SNB from http://www.agner.org/
2923     { ISD::FSQRT,      MVT::v8f32,  28 }, // SNB from http://www.agner.org/
2924     { ISD::FSQRT,      MVT::f64,    21 }, // SNB from http://www.agner.org/
2925     { ISD::FSQRT,      MVT::v2f64,  21 }, // SNB from http://www.agner.org/
2926     { ISD::FSQRT,      MVT::v4f64,  43 }, // SNB from http://www.agner.org/
2927   };
2928   static const CostTblEntry GLMCostTbl[] = {
2929     { ISD::FSQRT, MVT::f32,   19 }, // sqrtss
2930     { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2931     { ISD::FSQRT, MVT::f64,   34 }, // sqrtsd
2932     { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2933   };
2934   static const CostTblEntry SLMCostTbl[] = {
2935     { ISD::FSQRT, MVT::f32,   20 }, // sqrtss
2936     { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2937     { ISD::FSQRT, MVT::f64,   35 }, // sqrtsd
2938     { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2939   };
2940   static const CostTblEntry SSE42CostTbl[] = {
2941     { ISD::USUBSAT,    MVT::v4i32,   2 }, // pmaxud + psubd
2942     { ISD::UADDSAT,    MVT::v4i32,   3 }, // not + pminud + paddd
2943     { ISD::FSQRT,      MVT::f32,    18 }, // Nehalem from http://www.agner.org/
2944     { ISD::FSQRT,      MVT::v4f32,  18 }, // Nehalem from http://www.agner.org/
2945   };
2946   static const CostTblEntry SSE41CostTbl[] = {
2947     { ISD::ABS,        MVT::v2i64,   2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2948     { ISD::SMAX,       MVT::v4i32,   1 },
2949     { ISD::SMAX,       MVT::v16i8,   1 },
2950     { ISD::SMIN,       MVT::v4i32,   1 },
2951     { ISD::SMIN,       MVT::v16i8,   1 },
2952     { ISD::UMAX,       MVT::v4i32,   1 },
2953     { ISD::UMAX,       MVT::v8i16,   1 },
2954     { ISD::UMIN,       MVT::v4i32,   1 },
2955     { ISD::UMIN,       MVT::v8i16,   1 },
2956   };
2957   static const CostTblEntry SSSE3CostTbl[] = {
2958     { ISD::ABS,        MVT::v4i32,   1 },
2959     { ISD::ABS,        MVT::v8i16,   1 },
2960     { ISD::ABS,        MVT::v16i8,   1 },
2961     { ISD::BITREVERSE, MVT::v2i64,   5 },
2962     { ISD::BITREVERSE, MVT::v4i32,   5 },
2963     { ISD::BITREVERSE, MVT::v8i16,   5 },
2964     { ISD::BITREVERSE, MVT::v16i8,   5 },
2965     { ISD::BSWAP,      MVT::v2i64,   1 },
2966     { ISD::BSWAP,      MVT::v4i32,   1 },
2967     { ISD::BSWAP,      MVT::v8i16,   1 },
2968     { ISD::CTLZ,       MVT::v2i64,  23 },
2969     { ISD::CTLZ,       MVT::v4i32,  18 },
2970     { ISD::CTLZ,       MVT::v8i16,  14 },
2971     { ISD::CTLZ,       MVT::v16i8,   9 },
2972     { ISD::CTPOP,      MVT::v2i64,   7 },
2973     { ISD::CTPOP,      MVT::v4i32,  11 },
2974     { ISD::CTPOP,      MVT::v8i16,   9 },
2975     { ISD::CTPOP,      MVT::v16i8,   6 },
2976     { ISD::CTTZ,       MVT::v2i64,  10 },
2977     { ISD::CTTZ,       MVT::v4i32,  14 },
2978     { ISD::CTTZ,       MVT::v8i16,  12 },
2979     { ISD::CTTZ,       MVT::v16i8,   9 }
2980   };
2981   static const CostTblEntry SSE2CostTbl[] = {
2982     { ISD::ABS,        MVT::v2i64,   4 },
2983     { ISD::ABS,        MVT::v4i32,   3 },
2984     { ISD::ABS,        MVT::v8i16,   2 },
2985     { ISD::ABS,        MVT::v16i8,   2 },
2986     { ISD::BITREVERSE, MVT::v2i64,  29 },
2987     { ISD::BITREVERSE, MVT::v4i32,  27 },
2988     { ISD::BITREVERSE, MVT::v8i16,  27 },
2989     { ISD::BITREVERSE, MVT::v16i8,  20 },
2990     { ISD::BSWAP,      MVT::v2i64,   7 },
2991     { ISD::BSWAP,      MVT::v4i32,   7 },
2992     { ISD::BSWAP,      MVT::v8i16,   7 },
2993     { ISD::CTLZ,       MVT::v2i64,  25 },
2994     { ISD::CTLZ,       MVT::v4i32,  26 },
2995     { ISD::CTLZ,       MVT::v8i16,  20 },
2996     { ISD::CTLZ,       MVT::v16i8,  17 },
2997     { ISD::CTPOP,      MVT::v2i64,  12 },
2998     { ISD::CTPOP,      MVT::v4i32,  15 },
2999     { ISD::CTPOP,      MVT::v8i16,  13 },
3000     { ISD::CTPOP,      MVT::v16i8,  10 },
3001     { ISD::CTTZ,       MVT::v2i64,  14 },
3002     { ISD::CTTZ,       MVT::v4i32,  18 },
3003     { ISD::CTTZ,       MVT::v8i16,  16 },
3004     { ISD::CTTZ,       MVT::v16i8,  13 },
3005     { ISD::SADDSAT,    MVT::v8i16,   1 },
3006     { ISD::SADDSAT,    MVT::v16i8,   1 },
3007     { ISD::SMAX,       MVT::v8i16,   1 },
3008     { ISD::SMIN,       MVT::v8i16,   1 },
3009     { ISD::SSUBSAT,    MVT::v8i16,   1 },
3010     { ISD::SSUBSAT,    MVT::v16i8,   1 },
3011     { ISD::UADDSAT,    MVT::v8i16,   1 },
3012     { ISD::UADDSAT,    MVT::v16i8,   1 },
3013     { ISD::UMAX,       MVT::v8i16,   2 },
3014     { ISD::UMAX,       MVT::v16i8,   1 },
3015     { ISD::UMIN,       MVT::v8i16,   2 },
3016     { ISD::UMIN,       MVT::v16i8,   1 },
3017     { ISD::USUBSAT,    MVT::v8i16,   1 },
3018     { ISD::USUBSAT,    MVT::v16i8,   1 },
3019     { ISD::FMAXNUM,    MVT::f64,     4 },
3020     { ISD::FMAXNUM,    MVT::v2f64,   4 },
3021     { ISD::FSQRT,      MVT::f64,    32 }, // Nehalem from http://www.agner.org/
3022     { ISD::FSQRT,      MVT::v2f64,  32 }, // Nehalem from http://www.agner.org/
3023   };
3024   static const CostTblEntry SSE1CostTbl[] = {
3025     { ISD::FMAXNUM,    MVT::f32,     4 },
3026     { ISD::FMAXNUM,    MVT::v4f32,   4 },
3027     { ISD::FSQRT,      MVT::f32,    28 }, // Pentium III from http://www.agner.org/
3028     { ISD::FSQRT,      MVT::v4f32,  56 }, // Pentium III from http://www.agner.org/
3029   };
3030   static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
3031     { ISD::CTTZ,       MVT::i64,     1 },
3032   };
3033   static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
3034     { ISD::CTTZ,       MVT::i32,     1 },
3035     { ISD::CTTZ,       MVT::i16,     1 },
3036     { ISD::CTTZ,       MVT::i8,      1 },
3037   };
3038   static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
3039     { ISD::CTLZ,       MVT::i64,     1 },
3040   };
3041   static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
3042     { ISD::CTLZ,       MVT::i32,     1 },
3043     { ISD::CTLZ,       MVT::i16,     1 },
3044     { ISD::CTLZ,       MVT::i8,      1 },
3045   };
3046   static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
3047     { ISD::CTPOP,      MVT::i64,     1 },
3048   };
3049   static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
3050     { ISD::CTPOP,      MVT::i32,     1 },
3051     { ISD::CTPOP,      MVT::i16,     1 },
3052     { ISD::CTPOP,      MVT::i8,      1 },
3053   };
3054   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3055     { ISD::ABS,        MVT::i64,     2 }, // SUB+CMOV
3056     { ISD::BITREVERSE, MVT::i64,    14 },
3057     { ISD::BSWAP,      MVT::i64,     1 },
3058     { ISD::CTLZ,       MVT::i64,     4 }, // BSR+XOR or BSR+XOR+CMOV
3059     { ISD::CTTZ,       MVT::i64,     3 }, // TEST+BSF+CMOV/BRANCH
3060     { ISD::CTPOP,      MVT::i64,    10 },
3061     { ISD::SADDO,      MVT::i64,     1 },
3062     { ISD::UADDO,      MVT::i64,     1 },
3063     { ISD::UMULO,      MVT::i64,     2 }, // mulq + seto
3064   };
3065   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3066     { ISD::ABS,        MVT::i32,     2 }, // SUB+CMOV
3067     { ISD::ABS,        MVT::i16,     2 }, // SUB+CMOV
3068     { ISD::BITREVERSE, MVT::i32,    14 },
3069     { ISD::BITREVERSE, MVT::i16,    14 },
3070     { ISD::BITREVERSE, MVT::i8,     11 },
3071     { ISD::BSWAP,      MVT::i32,     1 },
3072     { ISD::BSWAP,      MVT::i16,     1 }, // ROL
3073     { ISD::CTLZ,       MVT::i32,     4 }, // BSR+XOR or BSR+XOR+CMOV
3074     { ISD::CTLZ,       MVT::i16,     4 }, // BSR+XOR or BSR+XOR+CMOV
3075     { ISD::CTLZ,       MVT::i8,      4 }, // BSR+XOR or BSR+XOR+CMOV
3076     { ISD::CTTZ,       MVT::i32,     3 }, // TEST+BSF+CMOV/BRANCH
3077     { ISD::CTTZ,       MVT::i16,     3 }, // TEST+BSF+CMOV/BRANCH
3078     { ISD::CTTZ,       MVT::i8,      3 }, // TEST+BSF+CMOV/BRANCH
3079     { ISD::CTPOP,      MVT::i32,     8 },
3080     { ISD::CTPOP,      MVT::i16,     9 },
3081     { ISD::CTPOP,      MVT::i8,      7 },
3082     { ISD::SADDO,      MVT::i32,     1 },
3083     { ISD::SADDO,      MVT::i16,     1 },
3084     { ISD::SADDO,      MVT::i8,      1 },
3085     { ISD::UADDO,      MVT::i32,     1 },
3086     { ISD::UADDO,      MVT::i16,     1 },
3087     { ISD::UADDO,      MVT::i8,      1 },
3088     { ISD::UMULO,      MVT::i32,     2 }, // mul + seto
3089     { ISD::UMULO,      MVT::i16,     2 },
3090     { ISD::UMULO,      MVT::i8,      2 },
3091   };
3092 
3093   Type *RetTy = ICA.getReturnType();
3094   Type *OpTy = RetTy;
3095   Intrinsic::ID IID = ICA.getID();
3096   unsigned ISD = ISD::DELETED_NODE;
3097   switch (IID) {
3098   default:
3099     break;
3100   case Intrinsic::abs:
3101     ISD = ISD::ABS;
3102     break;
3103   case Intrinsic::bitreverse:
3104     ISD = ISD::BITREVERSE;
3105     break;
3106   case Intrinsic::bswap:
3107     ISD = ISD::BSWAP;
3108     break;
3109   case Intrinsic::ctlz:
3110     ISD = ISD::CTLZ;
3111     break;
3112   case Intrinsic::ctpop:
3113     ISD = ISD::CTPOP;
3114     break;
3115   case Intrinsic::cttz:
3116     ISD = ISD::CTTZ;
3117     break;
3118   case Intrinsic::maxnum:
3119   case Intrinsic::minnum:
3120     // FMINNUM has same costs so don't duplicate.
3121     ISD = ISD::FMAXNUM;
3122     break;
3123   case Intrinsic::sadd_sat:
3124     ISD = ISD::SADDSAT;
3125     break;
3126   case Intrinsic::smax:
3127     ISD = ISD::SMAX;
3128     break;
3129   case Intrinsic::smin:
3130     ISD = ISD::SMIN;
3131     break;
3132   case Intrinsic::ssub_sat:
3133     ISD = ISD::SSUBSAT;
3134     break;
3135   case Intrinsic::uadd_sat:
3136     ISD = ISD::UADDSAT;
3137     break;
3138   case Intrinsic::umax:
3139     ISD = ISD::UMAX;
3140     break;
3141   case Intrinsic::umin:
3142     ISD = ISD::UMIN;
3143     break;
3144   case Intrinsic::usub_sat:
3145     ISD = ISD::USUBSAT;
3146     break;
3147   case Intrinsic::sqrt:
3148     ISD = ISD::FSQRT;
3149     break;
3150   case Intrinsic::sadd_with_overflow:
3151   case Intrinsic::ssub_with_overflow:
3152     // SSUBO has same costs so don't duplicate.
3153     ISD = ISD::SADDO;
3154     OpTy = RetTy->getContainedType(0);
3155     break;
3156   case Intrinsic::uadd_with_overflow:
3157   case Intrinsic::usub_with_overflow:
3158     // USUBO has same costs so don't duplicate.
3159     ISD = ISD::UADDO;
3160     OpTy = RetTy->getContainedType(0);
3161     break;
3162   case Intrinsic::umul_with_overflow:
3163   case Intrinsic::smul_with_overflow:
3164     // SMULO has same costs so don't duplicate.
3165     ISD = ISD::UMULO;
3166     OpTy = RetTy->getContainedType(0);
3167     break;
3168   }
3169 
3170   if (ISD != ISD::DELETED_NODE) {
3171     // Legalize the type.
3172     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
3173     MVT MTy = LT.second;
3174 
3175     // Attempt to lookup cost.
3176     if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
3177         MTy.isVector()) {
3178       // With PSHUFB the code is very similar for all types. If we have integer
3179       // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
3180       // we also need a PSHUFB.
3181       unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
3182 
3183       // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
3184       // instructions. We also need an extract and an insert.
3185       if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
3186             (ST->hasBWI() && MTy.is512BitVector())))
3187         Cost = Cost * 2 + 2;
3188 
3189       return LT.first * Cost;
3190     }
3191 
3192     auto adjustTableCost = [](const CostTblEntry &Entry,
3193                               InstructionCost LegalizationCost,
3194                               FastMathFlags FMF) {
3195       // If there are no NANs to deal with, then these are reduced to a
3196       // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
3197       // assume is used in the non-fast case.
3198       if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
3199         if (FMF.noNaNs())
3200           return LegalizationCost * 1;
3201       }
3202       return LegalizationCost * (int)Entry.Cost;
3203     };
3204 
3205     if (ST->useGLMDivSqrtCosts())
3206       if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
3207         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3208 
3209     if (ST->useSLMArithCosts())
3210       if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3211         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3212 
3213     if (ST->hasBITALG())
3214       if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
3215         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3216 
3217     if (ST->hasVPOPCNTDQ())
3218       if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
3219         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3220 
3221     if (ST->hasCDI())
3222       if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
3223         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3224 
3225     if (ST->hasBWI())
3226       if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3227         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3228 
3229     if (ST->hasAVX512())
3230       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3231         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3232 
3233     if (ST->hasXOP())
3234       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3235         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3236 
3237     if (ST->hasAVX2())
3238       if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3239         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3240 
3241     if (ST->hasAVX())
3242       if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3243         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3244 
3245     if (ST->hasSSE42())
3246       if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3247         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3248 
3249     if (ST->hasSSE41())
3250       if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3251         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3252 
3253     if (ST->hasSSSE3())
3254       if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
3255         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3256 
3257     if (ST->hasSSE2())
3258       if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3259         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3260 
3261     if (ST->hasSSE1())
3262       if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3263         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3264 
3265     if (ST->hasBMI()) {
3266       if (ST->is64Bit())
3267         if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
3268           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3269 
3270       if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
3271         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3272     }
3273 
3274     if (ST->hasLZCNT()) {
3275       if (ST->is64Bit())
3276         if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
3277           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3278 
3279       if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
3280         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3281     }
3282 
3283     if (ST->hasPOPCNT()) {
3284       if (ST->is64Bit())
3285         if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
3286           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3287 
3288       if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
3289         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3290     }
3291 
3292     if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
3293       if (const Instruction *II = ICA.getInst()) {
3294         if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
3295           return TTI::TCC_Free;
3296         if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
3297           if (LI->hasOneUse())
3298             return TTI::TCC_Free;
3299         }
3300       }
3301     }
3302 
3303     if (ST->is64Bit())
3304       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3305         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3306 
3307     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3308       return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3309   }
3310 
3311   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3312 }
3313 
3314 InstructionCost
3315 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3316                                   TTI::TargetCostKind CostKind) {
3317   if (ICA.isTypeBasedOnly())
3318     return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3319 
3320   static const CostTblEntry AVX512CostTbl[] = {
3321     { ISD::ROTL,       MVT::v8i64,   1 },
3322     { ISD::ROTL,       MVT::v4i64,   1 },
3323     { ISD::ROTL,       MVT::v2i64,   1 },
3324     { ISD::ROTL,       MVT::v16i32,  1 },
3325     { ISD::ROTL,       MVT::v8i32,   1 },
3326     { ISD::ROTL,       MVT::v4i32,   1 },
3327     { ISD::ROTR,       MVT::v8i64,   1 },
3328     { ISD::ROTR,       MVT::v4i64,   1 },
3329     { ISD::ROTR,       MVT::v2i64,   1 },
3330     { ISD::ROTR,       MVT::v16i32,  1 },
3331     { ISD::ROTR,       MVT::v8i32,   1 },
3332     { ISD::ROTR,       MVT::v4i32,   1 }
3333   };
3334   // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3335   static const CostTblEntry XOPCostTbl[] = {
3336     { ISD::ROTL,       MVT::v4i64,   4 },
3337     { ISD::ROTL,       MVT::v8i32,   4 },
3338     { ISD::ROTL,       MVT::v16i16,  4 },
3339     { ISD::ROTL,       MVT::v32i8,   4 },
3340     { ISD::ROTL,       MVT::v2i64,   1 },
3341     { ISD::ROTL,       MVT::v4i32,   1 },
3342     { ISD::ROTL,       MVT::v8i16,   1 },
3343     { ISD::ROTL,       MVT::v16i8,   1 },
3344     { ISD::ROTR,       MVT::v4i64,   6 },
3345     { ISD::ROTR,       MVT::v8i32,   6 },
3346     { ISD::ROTR,       MVT::v16i16,  6 },
3347     { ISD::ROTR,       MVT::v32i8,   6 },
3348     { ISD::ROTR,       MVT::v2i64,   2 },
3349     { ISD::ROTR,       MVT::v4i32,   2 },
3350     { ISD::ROTR,       MVT::v8i16,   2 },
3351     { ISD::ROTR,       MVT::v16i8,   2 }
3352   };
3353   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3354     { ISD::ROTL,       MVT::i64,     1 },
3355     { ISD::ROTR,       MVT::i64,     1 },
3356     { ISD::FSHL,       MVT::i64,     4 }
3357   };
3358   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3359     { ISD::ROTL,       MVT::i32,     1 },
3360     { ISD::ROTL,       MVT::i16,     1 },
3361     { ISD::ROTL,       MVT::i8,      1 },
3362     { ISD::ROTR,       MVT::i32,     1 },
3363     { ISD::ROTR,       MVT::i16,     1 },
3364     { ISD::ROTR,       MVT::i8,      1 },
3365     { ISD::FSHL,       MVT::i32,     4 },
3366     { ISD::FSHL,       MVT::i16,     4 },
3367     { ISD::FSHL,       MVT::i8,      4 }
3368   };
3369 
3370   Intrinsic::ID IID = ICA.getID();
3371   Type *RetTy = ICA.getReturnType();
3372   const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3373   unsigned ISD = ISD::DELETED_NODE;
3374   switch (IID) {
3375   default:
3376     break;
3377   case Intrinsic::fshl:
3378     ISD = ISD::FSHL;
3379     if (Args[0] == Args[1])
3380       ISD = ISD::ROTL;
3381     break;
3382   case Intrinsic::fshr:
3383     // FSHR has same costs so don't duplicate.
3384     ISD = ISD::FSHL;
3385     if (Args[0] == Args[1])
3386       ISD = ISD::ROTR;
3387     break;
3388   }
3389 
3390   if (ISD != ISD::DELETED_NODE) {
3391     // Legalize the type.
3392     std::pair<InstructionCost, MVT> LT =
3393         TLI->getTypeLegalizationCost(DL, RetTy);
3394     MVT MTy = LT.second;
3395 
3396     // Attempt to lookup cost.
3397     if (ST->hasAVX512())
3398       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3399         return LT.first * Entry->Cost;
3400 
3401     if (ST->hasXOP())
3402       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3403         return LT.first * Entry->Cost;
3404 
3405     if (ST->is64Bit())
3406       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3407         return LT.first * Entry->Cost;
3408 
3409     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3410       return LT.first * Entry->Cost;
3411   }
3412 
3413   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3414 }
3415 
3416 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3417                                                unsigned Index) {
3418   static const CostTblEntry SLMCostTbl[] = {
3419      { ISD::EXTRACT_VECTOR_ELT,       MVT::i8,      4 },
3420      { ISD::EXTRACT_VECTOR_ELT,       MVT::i16,     4 },
3421      { ISD::EXTRACT_VECTOR_ELT,       MVT::i32,     4 },
3422      { ISD::EXTRACT_VECTOR_ELT,       MVT::i64,     7 }
3423    };
3424 
3425   assert(Val->isVectorTy() && "This must be a vector type");
3426   Type *ScalarType = Val->getScalarType();
3427   int RegisterFileMoveCost = 0;
3428 
3429   // Non-immediate extraction/insertion can be handled as a sequence of
3430   // aliased loads+stores via the stack.
3431   if (Index == -1U && (Opcode == Instruction::ExtractElement ||
3432                        Opcode == Instruction::InsertElement)) {
3433     // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
3434     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
3435 
3436     // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
3437     assert(isa<FixedVectorType>(Val) && "Fixed vector type expected");
3438     Align VecAlign = DL.getPrefTypeAlign(Val);
3439     Align SclAlign = DL.getPrefTypeAlign(ScalarType);
3440 
3441     // Extract - store vector to stack, load scalar.
3442     if (Opcode == Instruction::ExtractElement) {
3443       return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3444                              TTI::TargetCostKind::TCK_RecipThroughput) +
3445              getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
3446                              TTI::TargetCostKind::TCK_RecipThroughput);
3447     }
3448     // Insert - store vector to stack, store scalar, load vector.
3449     if (Opcode == Instruction::InsertElement) {
3450       return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3451                              TTI::TargetCostKind::TCK_RecipThroughput) +
3452              getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
3453                              TTI::TargetCostKind::TCK_RecipThroughput) +
3454              getMemoryOpCost(Instruction::Load, Val, VecAlign, 0,
3455                              TTI::TargetCostKind::TCK_RecipThroughput);
3456     }
3457   }
3458 
3459   if (Index != -1U && (Opcode == Instruction::ExtractElement ||
3460                        Opcode == Instruction::InsertElement)) {
3461     // Legalize the type.
3462     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3463 
3464     // This type is legalized to a scalar type.
3465     if (!LT.second.isVector())
3466       return 0;
3467 
3468     // The type may be split. Normalize the index to the new type.
3469     unsigned NumElts = LT.second.getVectorNumElements();
3470     unsigned SubNumElts = NumElts;
3471     Index = Index % NumElts;
3472 
3473     // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3474     // For inserts, we also need to insert the subvector back.
3475     if (LT.second.getSizeInBits() > 128) {
3476       assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector");
3477       unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3478       SubNumElts = NumElts / NumSubVecs;
3479       if (SubNumElts <= Index) {
3480         RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3481         Index %= SubNumElts;
3482       }
3483     }
3484 
3485     if (Index == 0) {
3486       // Floating point scalars are already located in index #0.
3487       // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3488       // true for all.
3489       if (ScalarType->isFloatingPointTy())
3490         return RegisterFileMoveCost;
3491 
3492       // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3493       if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3494         return 1 + RegisterFileMoveCost;
3495     }
3496 
3497     int ISD = TLI->InstructionOpcodeToISD(Opcode);
3498     assert(ISD && "Unexpected vector opcode");
3499     MVT MScalarTy = LT.second.getScalarType();
3500     if (ST->useSLMArithCosts())
3501       if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3502         return Entry->Cost + RegisterFileMoveCost;
3503 
3504     // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3505     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3506         (MScalarTy.isInteger() && ST->hasSSE41()))
3507       return 1 + RegisterFileMoveCost;
3508 
3509     // Assume insertps is relatively cheap on all targets.
3510     if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3511         Opcode == Instruction::InsertElement)
3512       return 1 + RegisterFileMoveCost;
3513 
3514     // For extractions we just need to shuffle the element to index 0, which
3515     // should be very cheap (assume cost = 1). For insertions we need to shuffle
3516     // the elements to its destination. In both cases we must handle the
3517     // subvector move(s).
3518     // If the vector type is already less than 128-bits then don't reduce it.
3519     // TODO: Under what circumstances should we shuffle using the full width?
3520     InstructionCost ShuffleCost = 1;
3521     if (Opcode == Instruction::InsertElement) {
3522       auto *SubTy = cast<VectorType>(Val);
3523       EVT VT = TLI->getValueType(DL, Val);
3524       if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3525         SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3526       ShuffleCost =
3527           getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3528     }
3529     int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3530     return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3531   }
3532 
3533   // Add to the base cost if we know that the extracted element of a vector is
3534   // destined to be moved to and used in the integer register file.
3535   if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3536     RegisterFileMoveCost += 1;
3537 
3538   return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3539 }
3540 
3541 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3542                                                      const APInt &DemandedElts,
3543                                                      bool Insert,
3544                                                      bool Extract) {
3545   InstructionCost Cost = 0;
3546 
3547   // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3548   // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3549   if (Insert) {
3550     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3551     MVT MScalarTy = LT.second.getScalarType();
3552 
3553     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3554         (MScalarTy.isInteger() && ST->hasSSE41()) ||
3555         (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3556       // For types we can insert directly, insertion into 128-bit sub vectors is
3557       // cheap, followed by a cheap chain of concatenations.
3558       if (LT.second.getSizeInBits() <= 128) {
3559         Cost +=
3560             BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3561       } else {
3562         // In each 128-lane, if at least one index is demanded but not all
3563         // indices are demanded and this 128-lane is not the first 128-lane of
3564         // the legalized-vector, then this 128-lane needs a extracti128; If in
3565         // each 128-lane, there is at least one demanded index, this 128-lane
3566         // needs a inserti128.
3567 
3568         // The following cases will help you build a better understanding:
3569         // Assume we insert several elements into a v8i32 vector in avx2,
3570         // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3571         // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3572         // inserti128.
3573         // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3574         const int CostValue = *LT.first.getValue();
3575         assert(CostValue >= 0 && "Negative cost!");
3576         unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue;
3577         unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3578         APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3579         unsigned Scale = NumElts / Num128Lanes;
3580         // We iterate each 128-lane, and check if we need a
3581         // extracti128/inserti128 for this 128-lane.
3582         for (unsigned I = 0; I < NumElts; I += Scale) {
3583           APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3584           APInt MaskedDE = Mask & WidenedDemandedElts;
3585           unsigned Population = MaskedDE.countPopulation();
3586           Cost += (Population > 0 && Population != Scale &&
3587                    I % LT.second.getVectorNumElements() != 0);
3588           Cost += Population > 0;
3589         }
3590         Cost += DemandedElts.countPopulation();
3591 
3592         // For vXf32 cases, insertion into the 0'th index in each v4f32
3593         // 128-bit vector is free.
3594         // NOTE: This assumes legalization widens vXf32 vectors.
3595         if (MScalarTy == MVT::f32)
3596           for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3597                i < e; i += 4)
3598             if (DemandedElts[i])
3599               Cost--;
3600       }
3601     } else if (LT.second.isVector()) {
3602       // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3603       // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3604       // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3605       // considered cheap.
3606       if (Ty->isIntOrIntVectorTy())
3607         Cost += DemandedElts.countPopulation();
3608 
3609       // Get the smaller of the legalized or original pow2-extended number of
3610       // vector elements, which represents the number of unpacks we'll end up
3611       // performing.
3612       unsigned NumElts = LT.second.getVectorNumElements();
3613       unsigned Pow2Elts =
3614           PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3615       Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3616     }
3617   }
3618 
3619   // TODO: Use default extraction for now, but we should investigate extending this
3620   // to handle repeated subvector extraction.
3621   if (Extract)
3622     Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3623 
3624   return Cost;
3625 }
3626 
3627 InstructionCost
3628 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
3629                                       int VF, const APInt &DemandedDstElts,
3630                                       TTI::TargetCostKind CostKind) {
3631   const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy);
3632   // We don't differentiate element types here, only element bit width.
3633   EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits);
3634 
3635   auto bailout = [&]() {
3636     return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
3637                                             DemandedDstElts, CostKind);
3638   };
3639 
3640   // For now, only deal with AVX512 cases.
3641   if (!ST->hasAVX512())
3642     return bailout();
3643 
3644   // Do we have a native shuffle for this element type, or should we promote?
3645   unsigned PromEltTyBits = EltTyBits;
3646   switch (EltTyBits) {
3647   case 32:
3648   case 64:
3649     break; // AVX512F.
3650   case 16:
3651     if (!ST->hasBWI())
3652       PromEltTyBits = 32; // promote to i32, AVX512F.
3653     break;                // AVX512BW
3654   case 8:
3655     if (!ST->hasVBMI())
3656       return bailout();
3657     break;
3658   default:
3659     return bailout();
3660   }
3661   auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits);
3662 
3663   auto *SrcVecTy = FixedVectorType::get(EltTy, VF);
3664   auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF);
3665 
3666   int NumDstElements = VF * ReplicationFactor;
3667   auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements);
3668   auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements);
3669 
3670   // Legalize the types.
3671   MVT LegalSrcVecTy = TLI->getTypeLegalizationCost(DL, SrcVecTy).second;
3672   MVT LegalPromSrcVecTy = TLI->getTypeLegalizationCost(DL, PromSrcVecTy).second;
3673   MVT LegalPromDstVecTy = TLI->getTypeLegalizationCost(DL, PromDstVecTy).second;
3674   MVT LegalDstVecTy = TLI->getTypeLegalizationCost(DL, DstVecTy).second;
3675   // They should have legalized into vector types.
3676   if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() ||
3677       !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector())
3678     return bailout();
3679 
3680   if (PromEltTyBits != EltTyBits) {
3681     // If we have to perform the shuffle with wider elt type than our data type,
3682     // then we will first need to anyext (we don't care about the new bits)
3683     // the source elements, and then truncate Dst elements.
3684     InstructionCost PromotionCost;
3685     PromotionCost += getCastInstrCost(
3686         Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy,
3687         TargetTransformInfo::CastContextHint::None, CostKind);
3688     PromotionCost +=
3689         getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy,
3690                          /*Src=*/PromDstVecTy,
3691                          TargetTransformInfo::CastContextHint::None, CostKind);
3692     return PromotionCost + getReplicationShuffleCost(PromEltTy,
3693                                                      ReplicationFactor, VF,
3694                                                      DemandedDstElts, CostKind);
3695   }
3696 
3697   assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits &&
3698          LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() &&
3699          "We expect that the legalization doesn't affect the element width, "
3700          "doesn't coalesce/split elements.");
3701 
3702   unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements();
3703   unsigned NumDstVectors =
3704       divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec);
3705 
3706   auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec);
3707 
3708   // Not all the produced Dst elements may be demanded. In our case,
3709   // given that a single Dst vector is formed by a single shuffle,
3710   // if all elements that will form a single Dst vector aren't demanded,
3711   // then we won't need to do that shuffle, so adjust the cost accordingly.
3712   APInt DemandedDstVectors = APIntOps::ScaleBitMask(
3713       DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec),
3714       NumDstVectors);
3715   unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation();
3716 
3717   InstructionCost SingleShuffleCost =
3718       getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy,
3719                      /*Mask=*/None, /*Index=*/0, /*SubTp=*/nullptr);
3720   return NumDstVectorsDemanded * SingleShuffleCost;
3721 }
3722 
3723 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3724                                             MaybeAlign Alignment,
3725                                             unsigned AddressSpace,
3726                                             TTI::TargetCostKind CostKind,
3727                                             const Instruction *I) {
3728   // TODO: Handle other cost kinds.
3729   if (CostKind != TTI::TCK_RecipThroughput) {
3730     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3731       // Store instruction with index and scale costs 2 Uops.
3732       // Check the preceding GEP to identify non-const indices.
3733       if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3734         if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3735           return TTI::TCC_Basic * 2;
3736       }
3737     }
3738     return TTI::TCC_Basic;
3739   }
3740 
3741   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3742          "Invalid Opcode");
3743   // Type legalization can't handle structs
3744   if (TLI->getValueType(DL, Src, true) == MVT::Other)
3745     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3746                                   CostKind);
3747 
3748   // Legalize the type.
3749   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3750 
3751   auto *VTy = dyn_cast<FixedVectorType>(Src);
3752 
3753   // Handle the simple case of non-vectors.
3754   // NOTE: this assumes that legalization never creates vector from scalars!
3755   if (!VTy || !LT.second.isVector())
3756     // Each load/store unit costs 1.
3757     return LT.first * 1;
3758 
3759   bool IsLoad = Opcode == Instruction::Load;
3760 
3761   Type *EltTy = VTy->getElementType();
3762 
3763   const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3764 
3765   InstructionCost Cost = 0;
3766 
3767   // Source of truth: how many elements were there in the original IR vector?
3768   const unsigned SrcNumElt = VTy->getNumElements();
3769 
3770   // How far have we gotten?
3771   int NumEltRemaining = SrcNumElt;
3772   // Note that we intentionally capture by-reference, NumEltRemaining changes.
3773   auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3774 
3775   const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3776 
3777   // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3778   const unsigned XMMBits = 128;
3779   if (XMMBits % EltTyBits != 0)
3780     // Vector size must be a multiple of the element size. I.e. no padding.
3781     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3782                                   CostKind);
3783   const int NumEltPerXMM = XMMBits / EltTyBits;
3784 
3785   auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3786 
3787   for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3788        NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3789     // How many elements would a single op deal with at once?
3790     if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3791       // Vector size must be a multiple of the element size. I.e. no padding.
3792       return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3793                                     CostKind);
3794     int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3795 
3796     assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
3797     assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
3798             (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
3799            "Unless we haven't halved the op size yet, "
3800            "we have less than two op's sized units of work left.");
3801 
3802     auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3803                           ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3804                           : XMMVecTy;
3805 
3806     assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
3807            "After halving sizes, the vector elt count is no longer a multiple "
3808            "of number of elements per operation?");
3809     auto *CoalescedVecTy =
3810         CurrNumEltPerOp == 1
3811             ? CurrVecTy
3812             : FixedVectorType::get(
3813                   IntegerType::get(Src->getContext(),
3814                                    EltTyBits * CurrNumEltPerOp),
3815                   CurrVecTy->getNumElements() / CurrNumEltPerOp);
3816     assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
3817                DL.getTypeSizeInBits(CurrVecTy) &&
3818            "coalesciing elements doesn't change vector width.");
3819 
3820     while (NumEltRemaining > 0) {
3821       assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
3822 
3823       // Can we use this vector size, as per the remaining element count?
3824       // Iff the vector is naturally aligned, we can do a wide load regardless.
3825       if (NumEltRemaining < CurrNumEltPerOp &&
3826           (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3827           CurrOpSizeBytes != 1)
3828         break; // Try smalled vector size.
3829 
3830       bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3831 
3832       // If we have fully processed the previous reg, we need to replenish it.
3833       if (SubVecEltsLeft == 0) {
3834         SubVecEltsLeft += CurrVecTy->getNumElements();
3835         // And that's free only for the 0'th subvector of a legalized vector.
3836         if (!Is0thSubVec)
3837           Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3838                                         : TTI::ShuffleKind::SK_ExtractSubvector,
3839                                  VTy, None, NumEltDone(), CurrVecTy);
3840       }
3841 
3842       // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3843       // for smaller widths (32/16/8) we have to insert/extract them separately.
3844       // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3845       // but let's pretend that it is also true for 16/8 bit wide ops...)
3846       if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3847         int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3848         assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
3849         int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3850         APInt DemandedElts =
3851             APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3852                               CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
3853         assert(DemandedElts.countPopulation() == 1 && "Inserting single value");
3854         Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
3855                                          !IsLoad);
3856       }
3857 
3858       // This isn't exactly right. We're using slow unaligned 32-byte accesses
3859       // as a proxy for a double-pumped AVX memory interface such as on
3860       // Sandybridge.
3861       if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
3862         Cost += 2;
3863       else
3864         Cost += 1;
3865 
3866       SubVecEltsLeft -= CurrNumEltPerOp;
3867       NumEltRemaining -= CurrNumEltPerOp;
3868       Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
3869     }
3870   }
3871 
3872   assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
3873 
3874   return Cost;
3875 }
3876 
3877 InstructionCost
3878 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
3879                                   unsigned AddressSpace,
3880                                   TTI::TargetCostKind CostKind) {
3881   bool IsLoad = (Instruction::Load == Opcode);
3882   bool IsStore = (Instruction::Store == Opcode);
3883 
3884   auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3885   if (!SrcVTy)
3886     // To calculate scalar take the regular cost, without mask
3887     return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3888 
3889   unsigned NumElem = SrcVTy->getNumElements();
3890   auto *MaskTy =
3891       FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3892   if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3893       (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
3894     // Scalarization
3895     APInt DemandedElts = APInt::getAllOnes(NumElem);
3896     InstructionCost MaskSplitCost =
3897         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3898     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
3899         Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3900         CmpInst::BAD_ICMP_PREDICATE, CostKind);
3901     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3902     InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3903     InstructionCost ValueSplitCost =
3904         getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3905     InstructionCost MemopCost =
3906         NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3907                                          Alignment, AddressSpace, CostKind);
3908     return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3909   }
3910 
3911   // Legalize the type.
3912   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3913   auto VT = TLI->getValueType(DL, SrcVTy);
3914   InstructionCost Cost = 0;
3915   if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3916       LT.second.getVectorNumElements() == NumElem)
3917     // Promotion requires extend/truncate for data and a shuffle for mask.
3918     Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
3919             getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
3920 
3921   else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
3922     auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3923                                            LT.second.getVectorNumElements());
3924     // Expanding requires fill mask with zeroes
3925     Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
3926   }
3927 
3928   // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3929   if (!ST->hasAVX512())
3930     return Cost + LT.first * (IsLoad ? 2 : 8);
3931 
3932   // AVX-512 masked load/store is cheapper
3933   return Cost + LT.first;
3934 }
3935 
3936 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
3937                                                       ScalarEvolution *SE,
3938                                                       const SCEV *Ptr) {
3939   // Address computations in vectorized code with non-consecutive addresses will
3940   // likely result in more instructions compared to scalar code where the
3941   // computation can more often be merged into the index mode. The resulting
3942   // extra micro-ops can significantly decrease throughput.
3943   const unsigned NumVectorInstToHideOverhead = 10;
3944 
3945   // Cost modeling of Strided Access Computation is hidden by the indexing
3946   // modes of X86 regardless of the stride value. We dont believe that there
3947   // is a difference between constant strided access in gerenal and constant
3948   // strided value which is less than or equal to 64.
3949   // Even in the case of (loop invariant) stride whose value is not known at
3950   // compile time, the address computation will not incur more than one extra
3951   // ADD instruction.
3952   if (Ty->isVectorTy() && SE) {
3953     if (!BaseT::isStridedAccess(Ptr))
3954       return NumVectorInstToHideOverhead;
3955     if (!BaseT::getConstantStrideStep(SE, Ptr))
3956       return 1;
3957   }
3958 
3959   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3960 }
3961 
3962 InstructionCost
3963 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3964                                        Optional<FastMathFlags> FMF,
3965                                        TTI::TargetCostKind CostKind) {
3966   if (TTI::requiresOrderedReduction(FMF))
3967     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3968 
3969   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3970   // and make it as the cost.
3971 
3972   static const CostTblEntry SLMCostTblNoPairWise[] = {
3973     { ISD::FADD,  MVT::v2f64,   3 },
3974     { ISD::ADD,   MVT::v2i64,   5 },
3975   };
3976 
3977   static const CostTblEntry SSE2CostTblNoPairWise[] = {
3978     { ISD::FADD,  MVT::v2f64,   2 },
3979     { ISD::FADD,  MVT::v2f32,   2 },
3980     { ISD::FADD,  MVT::v4f32,   4 },
3981     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
3982     { ISD::ADD,   MVT::v2i32,   2 }, // FIXME: chosen to be less than v4i32
3983     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
3984     { ISD::ADD,   MVT::v2i16,   2 },      // The data reported by the IACA tool is "4.3".
3985     { ISD::ADD,   MVT::v4i16,   3 },      // The data reported by the IACA tool is "4.3".
3986     { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
3987     { ISD::ADD,   MVT::v2i8,    2 },
3988     { ISD::ADD,   MVT::v4i8,    2 },
3989     { ISD::ADD,   MVT::v8i8,    2 },
3990     { ISD::ADD,   MVT::v16i8,   3 },
3991   };
3992 
3993   static const CostTblEntry AVX1CostTblNoPairWise[] = {
3994     { ISD::FADD,  MVT::v4f64,   3 },
3995     { ISD::FADD,  MVT::v4f32,   3 },
3996     { ISD::FADD,  MVT::v8f32,   4 },
3997     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
3998     { ISD::ADD,   MVT::v4i64,   3 },
3999     { ISD::ADD,   MVT::v8i32,   5 },
4000     { ISD::ADD,   MVT::v16i16,  5 },
4001     { ISD::ADD,   MVT::v32i8,   4 },
4002   };
4003 
4004   int ISD = TLI->InstructionOpcodeToISD(Opcode);
4005   assert(ISD && "Invalid opcode");
4006 
4007   // Before legalizing the type, give a chance to look up illegal narrow types
4008   // in the table.
4009   // FIXME: Is there a better way to do this?
4010   EVT VT = TLI->getValueType(DL, ValTy);
4011   if (VT.isSimple()) {
4012     MVT MTy = VT.getSimpleVT();
4013     if (ST->useSLMArithCosts())
4014       if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4015         return Entry->Cost;
4016 
4017     if (ST->hasAVX())
4018       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4019         return Entry->Cost;
4020 
4021     if (ST->hasSSE2())
4022       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4023         return Entry->Cost;
4024   }
4025 
4026   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4027 
4028   MVT MTy = LT.second;
4029 
4030   auto *ValVTy = cast<FixedVectorType>(ValTy);
4031 
4032   // Special case: vXi8 mul reductions are performed as vXi16.
4033   if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
4034     auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
4035     auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
4036     return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
4037                             TargetTransformInfo::CastContextHint::None,
4038                             CostKind) +
4039            getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
4040   }
4041 
4042   InstructionCost ArithmeticCost = 0;
4043   if (LT.first != 1 && MTy.isVector() &&
4044       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4045     // Type needs to be split. We need LT.first - 1 arithmetic ops.
4046     auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4047                                             MTy.getVectorNumElements());
4048     ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4049     ArithmeticCost *= LT.first - 1;
4050   }
4051 
4052   if (ST->useSLMArithCosts())
4053     if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4054       return ArithmeticCost + Entry->Cost;
4055 
4056   if (ST->hasAVX())
4057     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4058       return ArithmeticCost + Entry->Cost;
4059 
4060   if (ST->hasSSE2())
4061     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4062       return ArithmeticCost + Entry->Cost;
4063 
4064   // FIXME: These assume a naive kshift+binop lowering, which is probably
4065   // conservative in most cases.
4066   static const CostTblEntry AVX512BoolReduction[] = {
4067     { ISD::AND,  MVT::v2i1,   3 },
4068     { ISD::AND,  MVT::v4i1,   5 },
4069     { ISD::AND,  MVT::v8i1,   7 },
4070     { ISD::AND,  MVT::v16i1,  9 },
4071     { ISD::AND,  MVT::v32i1, 11 },
4072     { ISD::AND,  MVT::v64i1, 13 },
4073     { ISD::OR,   MVT::v2i1,   3 },
4074     { ISD::OR,   MVT::v4i1,   5 },
4075     { ISD::OR,   MVT::v8i1,   7 },
4076     { ISD::OR,   MVT::v16i1,  9 },
4077     { ISD::OR,   MVT::v32i1, 11 },
4078     { ISD::OR,   MVT::v64i1, 13 },
4079   };
4080 
4081   static const CostTblEntry AVX2BoolReduction[] = {
4082     { ISD::AND,  MVT::v16i16,  2 }, // vpmovmskb + cmp
4083     { ISD::AND,  MVT::v32i8,   2 }, // vpmovmskb + cmp
4084     { ISD::OR,   MVT::v16i16,  2 }, // vpmovmskb + cmp
4085     { ISD::OR,   MVT::v32i8,   2 }, // vpmovmskb + cmp
4086   };
4087 
4088   static const CostTblEntry AVX1BoolReduction[] = {
4089     { ISD::AND,  MVT::v4i64,   2 }, // vmovmskpd + cmp
4090     { ISD::AND,  MVT::v8i32,   2 }, // vmovmskps + cmp
4091     { ISD::AND,  MVT::v16i16,  4 }, // vextractf128 + vpand + vpmovmskb + cmp
4092     { ISD::AND,  MVT::v32i8,   4 }, // vextractf128 + vpand + vpmovmskb + cmp
4093     { ISD::OR,   MVT::v4i64,   2 }, // vmovmskpd + cmp
4094     { ISD::OR,   MVT::v8i32,   2 }, // vmovmskps + cmp
4095     { ISD::OR,   MVT::v16i16,  4 }, // vextractf128 + vpor + vpmovmskb + cmp
4096     { ISD::OR,   MVT::v32i8,   4 }, // vextractf128 + vpor + vpmovmskb + cmp
4097   };
4098 
4099   static const CostTblEntry SSE2BoolReduction[] = {
4100     { ISD::AND,  MVT::v2i64,   2 }, // movmskpd + cmp
4101     { ISD::AND,  MVT::v4i32,   2 }, // movmskps + cmp
4102     { ISD::AND,  MVT::v8i16,   2 }, // pmovmskb + cmp
4103     { ISD::AND,  MVT::v16i8,   2 }, // pmovmskb + cmp
4104     { ISD::OR,   MVT::v2i64,   2 }, // movmskpd + cmp
4105     { ISD::OR,   MVT::v4i32,   2 }, // movmskps + cmp
4106     { ISD::OR,   MVT::v8i16,   2 }, // pmovmskb + cmp
4107     { ISD::OR,   MVT::v16i8,   2 }, // pmovmskb + cmp
4108   };
4109 
4110   // Handle bool allof/anyof patterns.
4111   if (ValVTy->getElementType()->isIntegerTy(1)) {
4112     InstructionCost ArithmeticCost = 0;
4113     if (LT.first != 1 && MTy.isVector() &&
4114         MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4115       // Type needs to be split. We need LT.first - 1 arithmetic ops.
4116       auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4117                                               MTy.getVectorNumElements());
4118       ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4119       ArithmeticCost *= LT.first - 1;
4120     }
4121 
4122     if (ST->hasAVX512())
4123       if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
4124         return ArithmeticCost + Entry->Cost;
4125     if (ST->hasAVX2())
4126       if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
4127         return ArithmeticCost + Entry->Cost;
4128     if (ST->hasAVX())
4129       if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
4130         return ArithmeticCost + Entry->Cost;
4131     if (ST->hasSSE2())
4132       if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
4133         return ArithmeticCost + Entry->Cost;
4134 
4135     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4136   }
4137 
4138   unsigned NumVecElts = ValVTy->getNumElements();
4139   unsigned ScalarSize = ValVTy->getScalarSizeInBits();
4140 
4141   // Special case power of 2 reductions where the scalar type isn't changed
4142   // by type legalization.
4143   if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
4144     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4145 
4146   InstructionCost ReductionCost = 0;
4147 
4148   auto *Ty = ValVTy;
4149   if (LT.first != 1 && MTy.isVector() &&
4150       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4151     // Type needs to be split. We need LT.first - 1 arithmetic ops.
4152     Ty = FixedVectorType::get(ValVTy->getElementType(),
4153                               MTy.getVectorNumElements());
4154     ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
4155     ReductionCost *= LT.first - 1;
4156     NumVecElts = MTy.getVectorNumElements();
4157   }
4158 
4159   // Now handle reduction with the legal type, taking into account size changes
4160   // at each level.
4161   while (NumVecElts > 1) {
4162     // Determine the size of the remaining vector we need to reduce.
4163     unsigned Size = NumVecElts * ScalarSize;
4164     NumVecElts /= 2;
4165     // If we're reducing from 256/512 bits, use an extract_subvector.
4166     if (Size > 128) {
4167       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4168       ReductionCost +=
4169           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4170       Ty = SubTy;
4171     } else if (Size == 128) {
4172       // Reducing from 128 bits is a permute of v2f64/v2i64.
4173       FixedVectorType *ShufTy;
4174       if (ValVTy->isFloatingPointTy())
4175         ShufTy =
4176             FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
4177       else
4178         ShufTy =
4179             FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
4180       ReductionCost +=
4181           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4182     } else if (Size == 64) {
4183       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4184       FixedVectorType *ShufTy;
4185       if (ValVTy->isFloatingPointTy())
4186         ShufTy =
4187             FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
4188       else
4189         ShufTy =
4190             FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
4191       ReductionCost +=
4192           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4193     } else {
4194       // Reducing from smaller size is a shift by immediate.
4195       auto *ShiftTy = FixedVectorType::get(
4196           Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
4197       ReductionCost += getArithmeticInstrCost(
4198           Instruction::LShr, ShiftTy, CostKind,
4199           TargetTransformInfo::OK_AnyValue,
4200           TargetTransformInfo::OK_UniformConstantValue,
4201           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4202     }
4203 
4204     // Add the arithmetic op for this level.
4205     ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
4206   }
4207 
4208   // Add the final extract element to the cost.
4209   return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4210 }
4211 
4212 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
4213                                           bool IsUnsigned) {
4214   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
4215 
4216   MVT MTy = LT.second;
4217 
4218   int ISD;
4219   if (Ty->isIntOrIntVectorTy()) {
4220     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4221   } else {
4222     assert(Ty->isFPOrFPVectorTy() &&
4223            "Expected float point or integer vector type.");
4224     ISD = ISD::FMINNUM;
4225   }
4226 
4227   static const CostTblEntry SSE1CostTbl[] = {
4228     {ISD::FMINNUM, MVT::v4f32, 1},
4229   };
4230 
4231   static const CostTblEntry SSE2CostTbl[] = {
4232     {ISD::FMINNUM, MVT::v2f64, 1},
4233     {ISD::SMIN,    MVT::v8i16, 1},
4234     {ISD::UMIN,    MVT::v16i8, 1},
4235   };
4236 
4237   static const CostTblEntry SSE41CostTbl[] = {
4238     {ISD::SMIN,    MVT::v4i32, 1},
4239     {ISD::UMIN,    MVT::v4i32, 1},
4240     {ISD::UMIN,    MVT::v8i16, 1},
4241     {ISD::SMIN,    MVT::v16i8, 1},
4242   };
4243 
4244   static const CostTblEntry SSE42CostTbl[] = {
4245     {ISD::UMIN,    MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
4246   };
4247 
4248   static const CostTblEntry AVX1CostTbl[] = {
4249     {ISD::FMINNUM, MVT::v8f32,  1},
4250     {ISD::FMINNUM, MVT::v4f64,  1},
4251     {ISD::SMIN,    MVT::v8i32,  3},
4252     {ISD::UMIN,    MVT::v8i32,  3},
4253     {ISD::SMIN,    MVT::v16i16, 3},
4254     {ISD::UMIN,    MVT::v16i16, 3},
4255     {ISD::SMIN,    MVT::v32i8,  3},
4256     {ISD::UMIN,    MVT::v32i8,  3},
4257   };
4258 
4259   static const CostTblEntry AVX2CostTbl[] = {
4260     {ISD::SMIN,    MVT::v8i32,  1},
4261     {ISD::UMIN,    MVT::v8i32,  1},
4262     {ISD::SMIN,    MVT::v16i16, 1},
4263     {ISD::UMIN,    MVT::v16i16, 1},
4264     {ISD::SMIN,    MVT::v32i8,  1},
4265     {ISD::UMIN,    MVT::v32i8,  1},
4266   };
4267 
4268   static const CostTblEntry AVX512CostTbl[] = {
4269     {ISD::FMINNUM, MVT::v16f32, 1},
4270     {ISD::FMINNUM, MVT::v8f64,  1},
4271     {ISD::SMIN,    MVT::v2i64,  1},
4272     {ISD::UMIN,    MVT::v2i64,  1},
4273     {ISD::SMIN,    MVT::v4i64,  1},
4274     {ISD::UMIN,    MVT::v4i64,  1},
4275     {ISD::SMIN,    MVT::v8i64,  1},
4276     {ISD::UMIN,    MVT::v8i64,  1},
4277     {ISD::SMIN,    MVT::v16i32, 1},
4278     {ISD::UMIN,    MVT::v16i32, 1},
4279   };
4280 
4281   static const CostTblEntry AVX512BWCostTbl[] = {
4282     {ISD::SMIN,    MVT::v32i16, 1},
4283     {ISD::UMIN,    MVT::v32i16, 1},
4284     {ISD::SMIN,    MVT::v64i8,  1},
4285     {ISD::UMIN,    MVT::v64i8,  1},
4286   };
4287 
4288   // If we have a native MIN/MAX instruction for this type, use it.
4289   if (ST->hasBWI())
4290     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4291       return LT.first * Entry->Cost;
4292 
4293   if (ST->hasAVX512())
4294     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4295       return LT.first * Entry->Cost;
4296 
4297   if (ST->hasAVX2())
4298     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4299       return LT.first * Entry->Cost;
4300 
4301   if (ST->hasAVX())
4302     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4303       return LT.first * Entry->Cost;
4304 
4305   if (ST->hasSSE42())
4306     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4307       return LT.first * Entry->Cost;
4308 
4309   if (ST->hasSSE41())
4310     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4311       return LT.first * Entry->Cost;
4312 
4313   if (ST->hasSSE2())
4314     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4315       return LT.first * Entry->Cost;
4316 
4317   if (ST->hasSSE1())
4318     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4319       return LT.first * Entry->Cost;
4320 
4321   unsigned CmpOpcode;
4322   if (Ty->isFPOrFPVectorTy()) {
4323     CmpOpcode = Instruction::FCmp;
4324   } else {
4325     assert(Ty->isIntOrIntVectorTy() &&
4326            "expecting floating point or integer type for min/max reduction");
4327     CmpOpcode = Instruction::ICmp;
4328   }
4329 
4330   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4331   // Otherwise fall back to cmp+select.
4332   InstructionCost Result =
4333       getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
4334                          CostKind) +
4335       getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
4336                          CmpInst::BAD_ICMP_PREDICATE, CostKind);
4337   return Result;
4338 }
4339 
4340 InstructionCost
4341 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
4342                                    bool IsUnsigned,
4343                                    TTI::TargetCostKind CostKind) {
4344   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4345 
4346   MVT MTy = LT.second;
4347 
4348   int ISD;
4349   if (ValTy->isIntOrIntVectorTy()) {
4350     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4351   } else {
4352     assert(ValTy->isFPOrFPVectorTy() &&
4353            "Expected float point or integer vector type.");
4354     ISD = ISD::FMINNUM;
4355   }
4356 
4357   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4358   // and make it as the cost.
4359 
4360   static const CostTblEntry SSE2CostTblNoPairWise[] = {
4361       {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
4362       {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
4363       {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
4364   };
4365 
4366   static const CostTblEntry SSE41CostTblNoPairWise[] = {
4367       {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
4368       {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
4369       {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
4370       {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
4371       {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
4372       {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
4373       {ISD::SMIN, MVT::v2i8,  3}, // pminsb
4374       {ISD::SMIN, MVT::v4i8,  5}, // pminsb
4375       {ISD::SMIN, MVT::v8i8,  7}, // pminsb
4376       {ISD::SMIN, MVT::v16i8, 6},
4377       {ISD::UMIN, MVT::v2i8,  3}, // same as sse2
4378       {ISD::UMIN, MVT::v4i8,  5}, // same as sse2
4379       {ISD::UMIN, MVT::v8i8,  7}, // same as sse2
4380       {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
4381   };
4382 
4383   static const CostTblEntry AVX1CostTblNoPairWise[] = {
4384       {ISD::SMIN, MVT::v16i16, 6},
4385       {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
4386       {ISD::SMIN, MVT::v32i8, 8},
4387       {ISD::UMIN, MVT::v32i8, 8},
4388   };
4389 
4390   static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
4391       {ISD::SMIN, MVT::v32i16, 8},
4392       {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
4393       {ISD::SMIN, MVT::v64i8, 10},
4394       {ISD::UMIN, MVT::v64i8, 10},
4395   };
4396 
4397   // Before legalizing the type, give a chance to look up illegal narrow types
4398   // in the table.
4399   // FIXME: Is there a better way to do this?
4400   EVT VT = TLI->getValueType(DL, ValTy);
4401   if (VT.isSimple()) {
4402     MVT MTy = VT.getSimpleVT();
4403     if (ST->hasBWI())
4404       if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4405         return Entry->Cost;
4406 
4407     if (ST->hasAVX())
4408       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4409         return Entry->Cost;
4410 
4411     if (ST->hasSSE41())
4412       if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4413         return Entry->Cost;
4414 
4415     if (ST->hasSSE2())
4416       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4417         return Entry->Cost;
4418   }
4419 
4420   auto *ValVTy = cast<FixedVectorType>(ValTy);
4421   unsigned NumVecElts = ValVTy->getNumElements();
4422 
4423   auto *Ty = ValVTy;
4424   InstructionCost MinMaxCost = 0;
4425   if (LT.first != 1 && MTy.isVector() &&
4426       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4427     // Type needs to be split. We need LT.first - 1 operations ops.
4428     Ty = FixedVectorType::get(ValVTy->getElementType(),
4429                               MTy.getVectorNumElements());
4430     auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
4431                                            MTy.getVectorNumElements());
4432     MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4433     MinMaxCost *= LT.first - 1;
4434     NumVecElts = MTy.getVectorNumElements();
4435   }
4436 
4437   if (ST->hasBWI())
4438     if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4439       return MinMaxCost + Entry->Cost;
4440 
4441   if (ST->hasAVX())
4442     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4443       return MinMaxCost + Entry->Cost;
4444 
4445   if (ST->hasSSE41())
4446     if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4447       return MinMaxCost + Entry->Cost;
4448 
4449   if (ST->hasSSE2())
4450     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4451       return MinMaxCost + Entry->Cost;
4452 
4453   unsigned ScalarSize = ValTy->getScalarSizeInBits();
4454 
4455   // Special case power of 2 reductions where the scalar type isn't changed
4456   // by type legalization.
4457   if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4458       ScalarSize != MTy.getScalarSizeInBits())
4459     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
4460 
4461   // Now handle reduction with the legal type, taking into account size changes
4462   // at each level.
4463   while (NumVecElts > 1) {
4464     // Determine the size of the remaining vector we need to reduce.
4465     unsigned Size = NumVecElts * ScalarSize;
4466     NumVecElts /= 2;
4467     // If we're reducing from 256/512 bits, use an extract_subvector.
4468     if (Size > 128) {
4469       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4470       MinMaxCost +=
4471           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4472       Ty = SubTy;
4473     } else if (Size == 128) {
4474       // Reducing from 128 bits is a permute of v2f64/v2i64.
4475       VectorType *ShufTy;
4476       if (ValTy->isFloatingPointTy())
4477         ShufTy =
4478             FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4479       else
4480         ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4481       MinMaxCost +=
4482           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4483     } else if (Size == 64) {
4484       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4485       FixedVectorType *ShufTy;
4486       if (ValTy->isFloatingPointTy())
4487         ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4488       else
4489         ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4490       MinMaxCost +=
4491           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4492     } else {
4493       // Reducing from smaller size is a shift by immediate.
4494       auto *ShiftTy = FixedVectorType::get(
4495           Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4496       MinMaxCost += getArithmeticInstrCost(
4497           Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4498           TargetTransformInfo::OK_AnyValue,
4499           TargetTransformInfo::OK_UniformConstantValue,
4500           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4501     }
4502 
4503     // Add the arithmetic op for this level.
4504     auto *SubCondTy =
4505         FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4506     MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4507   }
4508 
4509   // Add the final extract element to the cost.
4510   return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4511 }
4512 
4513 /// Calculate the cost of materializing a 64-bit value. This helper
4514 /// method might only calculate a fraction of a larger immediate. Therefore it
4515 /// is valid to return a cost of ZERO.
4516 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4517   if (Val == 0)
4518     return TTI::TCC_Free;
4519 
4520   if (isInt<32>(Val))
4521     return TTI::TCC_Basic;
4522 
4523   return 2 * TTI::TCC_Basic;
4524 }
4525 
4526 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4527                                           TTI::TargetCostKind CostKind) {
4528   assert(Ty->isIntegerTy());
4529 
4530   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4531   if (BitSize == 0)
4532     return ~0U;
4533 
4534   // Never hoist constants larger than 128bit, because this might lead to
4535   // incorrect code generation or assertions in codegen.
4536   // Fixme: Create a cost model for types larger than i128 once the codegen
4537   // issues have been fixed.
4538   if (BitSize > 128)
4539     return TTI::TCC_Free;
4540 
4541   if (Imm == 0)
4542     return TTI::TCC_Free;
4543 
4544   // Sign-extend all constants to a multiple of 64-bit.
4545   APInt ImmVal = Imm;
4546   if (BitSize % 64 != 0)
4547     ImmVal = Imm.sext(alignTo(BitSize, 64));
4548 
4549   // Split the constant into 64-bit chunks and calculate the cost for each
4550   // chunk.
4551   InstructionCost Cost = 0;
4552   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4553     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4554     int64_t Val = Tmp.getSExtValue();
4555     Cost += getIntImmCost(Val);
4556   }
4557   // We need at least one instruction to materialize the constant.
4558   return std::max<InstructionCost>(1, Cost);
4559 }
4560 
4561 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4562                                               const APInt &Imm, Type *Ty,
4563                                               TTI::TargetCostKind CostKind,
4564                                               Instruction *Inst) {
4565   assert(Ty->isIntegerTy());
4566 
4567   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4568   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4569   // here, so that constant hoisting will ignore this constant.
4570   if (BitSize == 0)
4571     return TTI::TCC_Free;
4572 
4573   unsigned ImmIdx = ~0U;
4574   switch (Opcode) {
4575   default:
4576     return TTI::TCC_Free;
4577   case Instruction::GetElementPtr:
4578     // Always hoist the base address of a GetElementPtr. This prevents the
4579     // creation of new constants for every base constant that gets constant
4580     // folded with the offset.
4581     if (Idx == 0)
4582       return 2 * TTI::TCC_Basic;
4583     return TTI::TCC_Free;
4584   case Instruction::Store:
4585     ImmIdx = 0;
4586     break;
4587   case Instruction::ICmp:
4588     // This is an imperfect hack to prevent constant hoisting of
4589     // compares that might be trying to check if a 64-bit value fits in
4590     // 32-bits. The backend can optimize these cases using a right shift by 32.
4591     // Ideally we would check the compare predicate here. There also other
4592     // similar immediates the backend can use shifts for.
4593     if (Idx == 1 && Imm.getBitWidth() == 64) {
4594       uint64_t ImmVal = Imm.getZExtValue();
4595       if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4596         return TTI::TCC_Free;
4597     }
4598     ImmIdx = 1;
4599     break;
4600   case Instruction::And:
4601     // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4602     // by using a 32-bit operation with implicit zero extension. Detect such
4603     // immediates here as the normal path expects bit 31 to be sign extended.
4604     if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4605       return TTI::TCC_Free;
4606     ImmIdx = 1;
4607     break;
4608   case Instruction::Add:
4609   case Instruction::Sub:
4610     // For add/sub, we can use the opposite instruction for INT32_MIN.
4611     if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4612       return TTI::TCC_Free;
4613     ImmIdx = 1;
4614     break;
4615   case Instruction::UDiv:
4616   case Instruction::SDiv:
4617   case Instruction::URem:
4618   case Instruction::SRem:
4619     // Division by constant is typically expanded later into a different
4620     // instruction sequence. This completely changes the constants.
4621     // Report them as "free" to stop ConstantHoist from marking them as opaque.
4622     return TTI::TCC_Free;
4623   case Instruction::Mul:
4624   case Instruction::Or:
4625   case Instruction::Xor:
4626     ImmIdx = 1;
4627     break;
4628   // Always return TCC_Free for the shift value of a shift instruction.
4629   case Instruction::Shl:
4630   case Instruction::LShr:
4631   case Instruction::AShr:
4632     if (Idx == 1)
4633       return TTI::TCC_Free;
4634     break;
4635   case Instruction::Trunc:
4636   case Instruction::ZExt:
4637   case Instruction::SExt:
4638   case Instruction::IntToPtr:
4639   case Instruction::PtrToInt:
4640   case Instruction::BitCast:
4641   case Instruction::PHI:
4642   case Instruction::Call:
4643   case Instruction::Select:
4644   case Instruction::Ret:
4645   case Instruction::Load:
4646     break;
4647   }
4648 
4649   if (Idx == ImmIdx) {
4650     int NumConstants = divideCeil(BitSize, 64);
4651     InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4652     return (Cost <= NumConstants * TTI::TCC_Basic)
4653                ? static_cast<int>(TTI::TCC_Free)
4654                : Cost;
4655   }
4656 
4657   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4658 }
4659 
4660 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4661                                                 const APInt &Imm, Type *Ty,
4662                                                 TTI::TargetCostKind CostKind) {
4663   assert(Ty->isIntegerTy());
4664 
4665   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4666   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4667   // here, so that constant hoisting will ignore this constant.
4668   if (BitSize == 0)
4669     return TTI::TCC_Free;
4670 
4671   switch (IID) {
4672   default:
4673     return TTI::TCC_Free;
4674   case Intrinsic::sadd_with_overflow:
4675   case Intrinsic::uadd_with_overflow:
4676   case Intrinsic::ssub_with_overflow:
4677   case Intrinsic::usub_with_overflow:
4678   case Intrinsic::smul_with_overflow:
4679   case Intrinsic::umul_with_overflow:
4680     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4681       return TTI::TCC_Free;
4682     break;
4683   case Intrinsic::experimental_stackmap:
4684     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4685       return TTI::TCC_Free;
4686     break;
4687   case Intrinsic::experimental_patchpoint_void:
4688   case Intrinsic::experimental_patchpoint_i64:
4689     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4690       return TTI::TCC_Free;
4691     break;
4692   }
4693   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4694 }
4695 
4696 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4697                                            TTI::TargetCostKind CostKind,
4698                                            const Instruction *I) {
4699   if (CostKind != TTI::TCK_RecipThroughput)
4700     return Opcode == Instruction::PHI ? 0 : 1;
4701   // Branches are assumed to be predicted.
4702   return 0;
4703 }
4704 
4705 int X86TTIImpl::getGatherOverhead() const {
4706   // Some CPUs have more overhead for gather. The specified overhead is relative
4707   // to the Load operation. "2" is the number provided by Intel architects. This
4708   // parameter is used for cost estimation of Gather Op and comparison with
4709   // other alternatives.
4710   // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4711   // enable gather with a -march.
4712   if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4713     return 2;
4714 
4715   return 1024;
4716 }
4717 
4718 int X86TTIImpl::getScatterOverhead() const {
4719   if (ST->hasAVX512())
4720     return 2;
4721 
4722   return 1024;
4723 }
4724 
4725 // Return an average cost of Gather / Scatter instruction, maybe improved later.
4726 // FIXME: Add TargetCostKind support.
4727 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4728                                             const Value *Ptr, Align Alignment,
4729                                             unsigned AddressSpace) {
4730 
4731   assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
4732   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4733 
4734   // Try to reduce index size from 64 bit (default for GEP)
4735   // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4736   // operation will use 16 x 64 indices which do not fit in a zmm and needs
4737   // to split. Also check that the base pointer is the same for all lanes,
4738   // and that there's at most one variable index.
4739   auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4740     unsigned IndexSize = DL.getPointerSizeInBits();
4741     const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4742     if (IndexSize < 64 || !GEP)
4743       return IndexSize;
4744 
4745     unsigned NumOfVarIndices = 0;
4746     const Value *Ptrs = GEP->getPointerOperand();
4747     if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4748       return IndexSize;
4749     for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4750       if (isa<Constant>(GEP->getOperand(i)))
4751         continue;
4752       Type *IndxTy = GEP->getOperand(i)->getType();
4753       if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4754         IndxTy = IndexVTy->getElementType();
4755       if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4756           !isa<SExtInst>(GEP->getOperand(i))) ||
4757          ++NumOfVarIndices > 1)
4758         return IndexSize; // 64
4759     }
4760     return (unsigned)32;
4761   };
4762 
4763   // Trying to reduce IndexSize to 32 bits for vector 16.
4764   // By default the IndexSize is equal to pointer size.
4765   unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4766                            ? getIndexSizeInBits(Ptr, DL)
4767                            : DL.getPointerSizeInBits();
4768 
4769   auto *IndexVTy = FixedVectorType::get(
4770       IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4771   std::pair<InstructionCost, MVT> IdxsLT =
4772       TLI->getTypeLegalizationCost(DL, IndexVTy);
4773   std::pair<InstructionCost, MVT> SrcLT =
4774       TLI->getTypeLegalizationCost(DL, SrcVTy);
4775   InstructionCost::CostType SplitFactor =
4776       *std::max(IdxsLT.first, SrcLT.first).getValue();
4777   if (SplitFactor > 1) {
4778     // Handle splitting of vector of pointers
4779     auto *SplitSrcTy =
4780         FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4781     return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4782                                          AddressSpace);
4783   }
4784 
4785   // The gather / scatter cost is given by Intel architects. It is a rough
4786   // number since we are looking at one instruction in a time.
4787   const int GSOverhead = (Opcode == Instruction::Load)
4788                              ? getGatherOverhead()
4789                              : getScatterOverhead();
4790   return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4791                                            MaybeAlign(Alignment), AddressSpace,
4792                                            TTI::TCK_RecipThroughput);
4793 }
4794 
4795 /// Return the cost of full scalarization of gather / scatter operation.
4796 ///
4797 /// Opcode - Load or Store instruction.
4798 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4799 /// VariableMask - The mask is non-constant at compile time.
4800 /// Alignment - Alignment for one element.
4801 /// AddressSpace - pointer[s] address space.
4802 ///
4803 /// FIXME: Add TargetCostKind support.
4804 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4805                                             bool VariableMask, Align Alignment,
4806                                             unsigned AddressSpace) {
4807   Type *ScalarTy = SrcVTy->getScalarType();
4808   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4809   APInt DemandedElts = APInt::getAllOnes(VF);
4810   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4811 
4812   InstructionCost MaskUnpackCost = 0;
4813   if (VariableMask) {
4814     auto *MaskTy =
4815         FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4816     MaskUnpackCost = getScalarizationOverhead(
4817         MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true);
4818     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4819         Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4820         CmpInst::BAD_ICMP_PREDICATE, CostKind);
4821     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4822     MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4823   }
4824 
4825   InstructionCost AddressUnpackCost = getScalarizationOverhead(
4826       FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts,
4827       /*Insert=*/false, /*Extract=*/true);
4828 
4829   // The cost of the scalar loads/stores.
4830   InstructionCost MemoryOpCost =
4831       VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment),
4832                            AddressSpace, CostKind);
4833 
4834   // The cost of forming the vector from loaded scalars/
4835   // scalarizing the vector to perform scalar stores.
4836   InstructionCost InsertExtractCost =
4837       getScalarizationOverhead(cast<FixedVectorType>(SrcVTy), DemandedElts,
4838                                /*Insert=*/Opcode == Instruction::Load,
4839                                /*Extract=*/Opcode == Instruction::Store);
4840 
4841   return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4842 }
4843 
4844 /// Calculate the cost of Gather / Scatter operation
4845 InstructionCost X86TTIImpl::getGatherScatterOpCost(
4846     unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4847     Align Alignment, TTI::TargetCostKind CostKind,
4848     const Instruction *I = nullptr) {
4849   if (CostKind != TTI::TCK_RecipThroughput) {
4850     if ((Opcode == Instruction::Load &&
4851          isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4852         (Opcode == Instruction::Store &&
4853          isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4854       return 1;
4855     return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4856                                          Alignment, CostKind, I);
4857   }
4858 
4859   assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
4860   PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4861   if (!PtrTy && Ptr->getType()->isVectorTy())
4862     PtrTy = dyn_cast<PointerType>(
4863         cast<VectorType>(Ptr->getType())->getElementType());
4864   assert(PtrTy && "Unexpected type for Ptr argument");
4865   unsigned AddressSpace = PtrTy->getAddressSpace();
4866 
4867   if ((Opcode == Instruction::Load &&
4868        !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4869       (Opcode == Instruction::Store &&
4870        !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4871     return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4872                            AddressSpace);
4873 
4874   return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4875 }
4876 
4877 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4878                                TargetTransformInfo::LSRCost &C2) {
4879     // X86 specific here are "instruction number 1st priority".
4880     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4881                     C1.NumIVMuls, C1.NumBaseAdds,
4882                     C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4883            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4884                     C2.NumIVMuls, C2.NumBaseAdds,
4885                     C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4886 }
4887 
4888 bool X86TTIImpl::canMacroFuseCmp() {
4889   return ST->hasMacroFusion() || ST->hasBranchFusion();
4890 }
4891 
4892 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4893   if (!ST->hasAVX())
4894     return false;
4895 
4896   // The backend can't handle a single element vector.
4897   if (isa<VectorType>(DataTy) &&
4898       cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4899     return false;
4900   Type *ScalarTy = DataTy->getScalarType();
4901 
4902   if (ScalarTy->isPointerTy())
4903     return true;
4904 
4905   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4906     return true;
4907 
4908   if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16())
4909     return true;
4910 
4911   if (!ScalarTy->isIntegerTy())
4912     return false;
4913 
4914   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4915   return IntWidth == 32 || IntWidth == 64 ||
4916          ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4917 }
4918 
4919 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4920   return isLegalMaskedLoad(DataType, Alignment);
4921 }
4922 
4923 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4924   unsigned DataSize = DL.getTypeStoreSize(DataType);
4925   // The only supported nontemporal loads are for aligned vectors of 16 or 32
4926   // bytes.  Note that 32-byte nontemporal vector loads are supported by AVX2
4927   // (the equivalent stores only require AVX).
4928   if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4929     return DataSize == 16 ?  ST->hasSSE1() : ST->hasAVX2();
4930 
4931   return false;
4932 }
4933 
4934 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4935   unsigned DataSize = DL.getTypeStoreSize(DataType);
4936 
4937   // SSE4A supports nontemporal stores of float and double at arbitrary
4938   // alignment.
4939   if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4940     return true;
4941 
4942   // Besides the SSE4A subtarget exception above, only aligned stores are
4943   // available nontemporaly on any other subtarget.  And only stores with a size
4944   // of 4..32 bytes (powers of 2, only) are permitted.
4945   if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4946       !isPowerOf2_32(DataSize))
4947     return false;
4948 
4949   // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4950   // loads require AVX2).
4951   if (DataSize == 32)
4952     return ST->hasAVX();
4953   if (DataSize == 16)
4954     return ST->hasSSE1();
4955   return true;
4956 }
4957 
4958 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4959   if (!isa<VectorType>(DataTy))
4960     return false;
4961 
4962   if (!ST->hasAVX512())
4963     return false;
4964 
4965   // The backend can't handle a single element vector.
4966   if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4967     return false;
4968 
4969   Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4970 
4971   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4972     return true;
4973 
4974   if (!ScalarTy->isIntegerTy())
4975     return false;
4976 
4977   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4978   return IntWidth == 32 || IntWidth == 64 ||
4979          ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4980 }
4981 
4982 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4983   return isLegalMaskedExpandLoad(DataTy);
4984 }
4985 
4986 bool X86TTIImpl::supportsGather() const {
4987   // Some CPUs have better gather performance than others.
4988   // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4989   // enable gather with a -march.
4990   return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2());
4991 }
4992 
4993 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4994   if (!supportsGather())
4995     return false;
4996 
4997   // This function is called now in two cases: from the Loop Vectorizer
4998   // and from the Scalarizer.
4999   // When the Loop Vectorizer asks about legality of the feature,
5000   // the vectorization factor is not calculated yet. The Loop Vectorizer
5001   // sends a scalar type and the decision is based on the width of the
5002   // scalar element.
5003   // Later on, the cost model will estimate usage this intrinsic based on
5004   // the vector type.
5005   // The Scalarizer asks again about legality. It sends a vector type.
5006   // In this case we can reject non-power-of-2 vectors.
5007   // We also reject single element vectors as the type legalizer can't
5008   // scalarize it.
5009   if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
5010     unsigned NumElts = DataVTy->getNumElements();
5011     if (NumElts == 1)
5012       return false;
5013     // Gather / Scatter for vector 2 is not profitable on KNL / SKX
5014     // Vector-4 of gather/scatter instruction does not exist on KNL.
5015     // We can extend it to 8 elements, but zeroing upper bits of
5016     // the mask vector will add more instructions. Right now we give the scalar
5017     // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter
5018     // instruction is better in the VariableMask case.
5019     if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())))
5020       return false;
5021   }
5022   Type *ScalarTy = DataTy->getScalarType();
5023   if (ScalarTy->isPointerTy())
5024     return true;
5025 
5026   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5027     return true;
5028 
5029   if (!ScalarTy->isIntegerTy())
5030     return false;
5031 
5032   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5033   return IntWidth == 32 || IntWidth == 64;
5034 }
5035 
5036 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
5037   // AVX2 doesn't support scatter
5038   if (!ST->hasAVX512())
5039     return false;
5040   return isLegalMaskedGather(DataType, Alignment);
5041 }
5042 
5043 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
5044   EVT VT = TLI->getValueType(DL, DataType);
5045   return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
5046 }
5047 
5048 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
5049   return false;
5050 }
5051 
5052 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
5053                                      const Function *Callee) const {
5054   const TargetMachine &TM = getTLI()->getTargetMachine();
5055 
5056   // Work this as a subsetting of subtarget features.
5057   const FeatureBitset &CallerBits =
5058       TM.getSubtargetImpl(*Caller)->getFeatureBits();
5059   const FeatureBitset &CalleeBits =
5060       TM.getSubtargetImpl(*Callee)->getFeatureBits();
5061 
5062   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
5063   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
5064   return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
5065 }
5066 
5067 bool X86TTIImpl::areFunctionArgsABICompatible(
5068     const Function *Caller, const Function *Callee,
5069     SmallPtrSetImpl<Argument *> &Args) const {
5070   if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
5071     return false;
5072 
5073   // If we get here, we know the target features match. If one function
5074   // considers 512-bit vectors legal and the other does not, consider them
5075   // incompatible.
5076   const TargetMachine &TM = getTLI()->getTargetMachine();
5077 
5078   if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
5079       TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
5080     return true;
5081 
5082   // Consider the arguments compatible if they aren't vectors or aggregates.
5083   // FIXME: Look at the size of vectors.
5084   // FIXME: Look at the element types of aggregates to see if there are vectors.
5085   // FIXME: The API of this function seems intended to allow arguments
5086   // to be removed from the set, but the caller doesn't check if the set
5087   // becomes empty so that may not work in practice.
5088   return llvm::none_of(Args, [](Argument *A) {
5089     auto *EltTy = cast<PointerType>(A->getType())->getElementType();
5090     return EltTy->isVectorTy() || EltTy->isAggregateType();
5091   });
5092 }
5093 
5094 X86TTIImpl::TTI::MemCmpExpansionOptions
5095 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
5096   TTI::MemCmpExpansionOptions Options;
5097   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
5098   Options.NumLoadsPerBlock = 2;
5099   // All GPR and vector loads can be unaligned.
5100   Options.AllowOverlappingLoads = true;
5101   if (IsZeroCmp) {
5102     // Only enable vector loads for equality comparison. Right now the vector
5103     // version is not as fast for three way compare (see #33329).
5104     const unsigned PreferredWidth = ST->getPreferVectorWidth();
5105     if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
5106     if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
5107     if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
5108   }
5109   if (ST->is64Bit()) {
5110     Options.LoadSizes.push_back(8);
5111   }
5112   Options.LoadSizes.push_back(4);
5113   Options.LoadSizes.push_back(2);
5114   Options.LoadSizes.push_back(1);
5115   return Options;
5116 }
5117 
5118 bool X86TTIImpl::prefersVectorizedAddressing() const {
5119   return supportsGather();
5120 }
5121 
5122 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const {
5123   return false;
5124 }
5125 
5126 bool X86TTIImpl::enableInterleavedAccessVectorization() {
5127   // TODO: We expect this to be beneficial regardless of arch,
5128   // but there are currently some unexplained performance artifacts on Atom.
5129   // As a temporary solution, disable on Atom.
5130   return !(ST->isAtom());
5131 }
5132 
5133 // Get estimation for interleaved load/store operations and strided load.
5134 // \p Indices contains indices for strided load.
5135 // \p Factor - the factor of interleaving.
5136 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
5137 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
5138     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
5139     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
5140     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
5141   // VecTy for interleave memop is <VF*Factor x Elt>.
5142   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5143   // VecTy = <12 x i32>.
5144 
5145   // Calculate the number of memory operations (NumOfMemOps), required
5146   // for load/store the VecTy.
5147   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5148   unsigned VecTySize = DL.getTypeStoreSize(VecTy);
5149   unsigned LegalVTSize = LegalVT.getStoreSize();
5150   unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
5151 
5152   // Get the cost of one memory operation.
5153   auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
5154                                              LegalVT.getVectorNumElements());
5155   InstructionCost MemOpCost;
5156   if (UseMaskForCond || UseMaskForGaps)
5157     MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment,
5158                                       AddressSpace, CostKind);
5159   else
5160     MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment),
5161                                 AddressSpace, CostKind);
5162 
5163   unsigned VF = VecTy->getNumElements() / Factor;
5164   MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
5165 
5166   // FIXME: this is the most conservative estimate for the mask cost.
5167   InstructionCost MaskCost;
5168   if (UseMaskForCond || UseMaskForGaps) {
5169     APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements());
5170     for (unsigned Index : Indices) {
5171       assert(Index < Factor && "Invalid index for interleaved memory op");
5172       for (unsigned Elm = 0; Elm < VF; Elm++)
5173         DemandedLoadStoreElts.setBit(Index + Elm * Factor);
5174     }
5175 
5176     Type *I8Type = Type::getInt8Ty(VecTy->getContext());
5177 
5178     MaskCost = getReplicationShuffleCost(
5179         I8Type, Factor, VF,
5180         UseMaskForGaps ? DemandedLoadStoreElts
5181                        : APInt::getAllOnes(VecTy->getNumElements()),
5182         CostKind);
5183 
5184     // The Gaps mask is invariant and created outside the loop, therefore the
5185     // cost of creating it is not accounted for here. However if we have both
5186     // a MaskForGaps and some other mask that guards the execution of the
5187     // memory access, we need to account for the cost of And-ing the two masks
5188     // inside the loop.
5189     if (UseMaskForGaps) {
5190       auto *MaskVT = FixedVectorType::get(I8Type, VecTy->getNumElements());
5191       MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind);
5192     }
5193   }
5194 
5195   if (Opcode == Instruction::Load) {
5196     // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
5197     // contain the cost of the optimized shuffle sequence that the
5198     // X86InterleavedAccess pass will generate.
5199     // The cost of loads and stores are computed separately from the table.
5200 
5201     // X86InterleavedAccess support only the following interleaved-access group.
5202     static const CostTblEntry AVX512InterleavedLoadTbl[] = {
5203         {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
5204         {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
5205         {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
5206     };
5207 
5208     if (const auto *Entry =
5209             CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
5210       return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
5211     //If an entry does not exist, fallback to the default implementation.
5212 
5213     // Kind of shuffle depends on number of loaded values.
5214     // If we load the entire data in one register, we can use a 1-src shuffle.
5215     // Otherwise, we'll merge 2 sources in each operation.
5216     TTI::ShuffleKind ShuffleKind =
5217         (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
5218 
5219     InstructionCost ShuffleCost =
5220         getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
5221 
5222     unsigned NumOfLoadsInInterleaveGrp =
5223         Indices.size() ? Indices.size() : Factor;
5224     auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
5225                                           VecTy->getNumElements() / Factor);
5226     InstructionCost NumOfResults =
5227         getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
5228         NumOfLoadsInInterleaveGrp;
5229 
5230     // About a half of the loads may be folded in shuffles when we have only
5231     // one result. If we have more than one result, we do not fold loads at all.
5232     unsigned NumOfUnfoldedLoads =
5233         NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
5234 
5235     // Get a number of shuffle operations per result.
5236     unsigned NumOfShufflesPerResult =
5237         std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
5238 
5239     // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5240     // When we have more than one destination, we need additional instructions
5241     // to keep sources.
5242     InstructionCost NumOfMoves = 0;
5243     if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
5244       NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
5245 
5246     InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
5247                            MaskCost + NumOfUnfoldedLoads * MemOpCost +
5248                            NumOfMoves;
5249 
5250     return Cost;
5251   }
5252 
5253   // Store.
5254   assert(Opcode == Instruction::Store &&
5255          "Expected Store Instruction at this  point");
5256   // X86InterleavedAccess support only the following interleaved-access group.
5257   static const CostTblEntry AVX512InterleavedStoreTbl[] = {
5258       {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
5259       {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
5260       {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
5261 
5262       {4, MVT::v8i8, 10},  // interleave 4 x 8i8  into 32i8  (and store)
5263       {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8  (and store)
5264       {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
5265       {4, MVT::v64i8, 24}  // interleave 4 x 32i8 into 256i8 (and store)
5266   };
5267 
5268   if (const auto *Entry =
5269           CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
5270     return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
5271   //If an entry does not exist, fallback to the default implementation.
5272 
5273   // There is no strided stores meanwhile. And store can't be folded in
5274   // shuffle.
5275   unsigned NumOfSources = Factor; // The number of values to be merged.
5276   InstructionCost ShuffleCost =
5277       getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
5278   unsigned NumOfShufflesPerStore = NumOfSources - 1;
5279 
5280   // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5281   // We need additional instructions to keep sources.
5282   unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
5283   InstructionCost Cost =
5284       MaskCost +
5285       NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
5286       NumOfMoves;
5287   return Cost;
5288 }
5289 
5290 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
5291     unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
5292     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
5293     bool UseMaskForCond, bool UseMaskForGaps) {
5294   auto *VecTy = cast<FixedVectorType>(BaseTy);
5295 
5296   auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) {
5297     Type *EltTy = cast<VectorType>(VecTy)->getElementType();
5298     if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
5299         EltTy->isIntegerTy(32) || EltTy->isPointerTy())
5300       return true;
5301     if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) ||
5302         (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy()))
5303       return HasBW;
5304     return false;
5305   };
5306   if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
5307     return getInterleavedMemoryOpCostAVX512(
5308         Opcode, VecTy, Factor, Indices, Alignment,
5309         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5310 
5311   if (UseMaskForCond || UseMaskForGaps)
5312     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5313                                              Alignment, AddressSpace, CostKind,
5314                                              UseMaskForCond, UseMaskForGaps);
5315 
5316   // Get estimation for interleaved load/store operations for SSE-AVX2.
5317   // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow
5318   // computing the cost using a generic formula as a function of generic
5319   // shuffles. We therefore use a lookup table instead, filled according to
5320   // the instruction sequences that codegen currently generates.
5321 
5322   // VecTy for interleave memop is <VF*Factor x Elt>.
5323   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5324   // VecTy = <12 x i32>.
5325   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5326 
5327   // This function can be called with VecTy=<6xi128>, Factor=3, in which case
5328   // the VF=2, while v2i128 is an unsupported MVT vector type
5329   // (see MachineValueType.h::getVectorVT()).
5330   if (!LegalVT.isVector())
5331     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5332                                              Alignment, AddressSpace, CostKind);
5333 
5334   unsigned VF = VecTy->getNumElements() / Factor;
5335   Type *ScalarTy = VecTy->getElementType();
5336   // Deduplicate entries, model floats/pointers as appropriately-sized integers.
5337   if (!ScalarTy->isIntegerTy())
5338     ScalarTy =
5339         Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
5340 
5341   // Get the cost of all the memory operations.
5342   // FIXME: discount dead loads.
5343   InstructionCost MemOpCosts = getMemoryOpCost(
5344       Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5345 
5346   auto *VT = FixedVectorType::get(ScalarTy, VF);
5347   EVT ETy = TLI->getValueType(DL, VT);
5348   if (!ETy.isSimple())
5349     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5350                                              Alignment, AddressSpace, CostKind);
5351 
5352   // TODO: Complete for other data-types and strides.
5353   // Each combination of Stride, element bit width and VF results in a different
5354   // sequence; The cost tables are therefore accessed with:
5355   // Factor (stride) and VectorType=VFxiN.
5356   // The Cost accounts only for the shuffle sequence;
5357   // The cost of the loads/stores is accounted for separately.
5358   //
5359   static const CostTblEntry AVX2InterleavedLoadTbl[] = {
5360       {2, MVT::v2i8, 2},  // (load 4i8 and) deinterleave into 2 x 2i8
5361       {2, MVT::v4i8, 2},  // (load 8i8 and) deinterleave into 2 x 4i8
5362       {2, MVT::v8i8, 2},  // (load 16i8 and) deinterleave into 2 x 8i8
5363       {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8
5364       {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8
5365 
5366       {2, MVT::v8i16, 6},   // (load 16i16 and) deinterleave into 2 x 8i16
5367       {2, MVT::v16i16, 9},  // (load 32i16 and) deinterleave into 2 x 16i16
5368       {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16
5369 
5370       {2, MVT::v8i32, 4},   // (load 16i32 and) deinterleave into 2 x 8i32
5371       {2, MVT::v16i32, 8},  // (load 32i32 and) deinterleave into 2 x 16i32
5372       {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32
5373 
5374       {2, MVT::v4i64, 4},   // (load 8i64 and) deinterleave into 2 x 4i64
5375       {2, MVT::v8i64, 8},   // (load 16i64 and) deinterleave into 2 x 8i64
5376       {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64
5377       {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64
5378 
5379       {3, MVT::v2i8, 3},   // (load 6i8 and) deinterleave into 3 x 2i8
5380       {3, MVT::v4i8, 3},   // (load 12i8 and) deinterleave into 3 x 4i8
5381       {3, MVT::v8i8, 6},   // (load 24i8 and) deinterleave into 3 x 8i8
5382       {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
5383       {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8
5384 
5385       {3, MVT::v2i16, 5},   // (load 6i16 and) deinterleave into 3 x 2i16
5386       {3, MVT::v4i16, 7},   // (load 12i16 and) deinterleave into 3 x 4i16
5387       {3, MVT::v8i16, 9},   // (load 24i16 and) deinterleave into 3 x 8i16
5388       {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16
5389       {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16
5390 
5391       {3, MVT::v2i32, 3},   // (load 6i32 and) deinterleave into 3 x 2i32
5392       {3, MVT::v4i32, 3},   // (load 12i32 and) deinterleave into 3 x 4i32
5393       {3, MVT::v8i32, 7},   // (load 24i32 and) deinterleave into 3 x 8i32
5394       {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32
5395       {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32
5396 
5397       {3, MVT::v2i64, 1},   // (load 6i64 and) deinterleave into 3 x 2i64
5398       {3, MVT::v4i64, 5},   // (load 12i64 and) deinterleave into 3 x 4i64
5399       {3, MVT::v8i64, 10},  // (load 24i64 and) deinterleave into 3 x 8i64
5400       {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64
5401 
5402       {4, MVT::v2i8, 4},   // (load 8i8 and) deinterleave into 4 x 2i8
5403       {4, MVT::v4i8, 4},   // (load 16i8 and) deinterleave into 4 x 4i8
5404       {4, MVT::v8i8, 12},  // (load 32i8 and) deinterleave into 4 x 8i8
5405       {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8
5406       {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8
5407 
5408       {4, MVT::v2i16, 6},    // (load 8i16 and) deinterleave into 4 x 2i16
5409       {4, MVT::v4i16, 17},   // (load 16i16 and) deinterleave into 4 x 4i16
5410       {4, MVT::v8i16, 33},   // (load 32i16 and) deinterleave into 4 x 8i16
5411       {4, MVT::v16i16, 75},  // (load 64i16 and) deinterleave into 4 x 16i16
5412       {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16
5413 
5414       {4, MVT::v2i32, 4},   // (load 8i32 and) deinterleave into 4 x 2i32
5415       {4, MVT::v4i32, 8},   // (load 16i32 and) deinterleave into 4 x 4i32
5416       {4, MVT::v8i32, 16},  // (load 32i32 and) deinterleave into 4 x 8i32
5417       {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32
5418       {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32
5419 
5420       {4, MVT::v2i64, 6},  // (load 8i64 and) deinterleave into 4 x 2i64
5421       {4, MVT::v4i64, 8},  // (load 16i64 and) deinterleave into 4 x 4i64
5422       {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64
5423       {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64
5424 
5425       {6, MVT::v2i8, 6},   // (load 12i8 and) deinterleave into 6 x 2i8
5426       {6, MVT::v4i8, 14},  // (load 24i8 and) deinterleave into 6 x 4i8
5427       {6, MVT::v8i8, 18},  // (load 48i8 and) deinterleave into 6 x 8i8
5428       {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8
5429       {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8
5430 
5431       {6, MVT::v2i16, 13},   // (load 12i16 and) deinterleave into 6 x 2i16
5432       {6, MVT::v4i16, 9},    // (load 24i16 and) deinterleave into 6 x 4i16
5433       {6, MVT::v8i16, 39},   // (load 48i16 and) deinterleave into 6 x 8i16
5434       {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16
5435       {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16
5436 
5437       {6, MVT::v2i32, 6},   // (load 12i32 and) deinterleave into 6 x 2i32
5438       {6, MVT::v4i32, 15},  // (load 24i32 and) deinterleave into 6 x 4i32
5439       {6, MVT::v8i32, 31},  // (load 48i32 and) deinterleave into 6 x 8i32
5440       {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32
5441 
5442       {6, MVT::v2i64, 6},  // (load 12i64 and) deinterleave into 6 x 2i64
5443       {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64
5444       {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64
5445 
5446       {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
5447   };
5448 
5449   static const CostTblEntry SSSE3InterleavedLoadTbl[] = {
5450       {2, MVT::v4i16, 2},   // (load 8i16 and) deinterleave into 2 x 4i16
5451   };
5452 
5453   static const CostTblEntry SSE2InterleavedLoadTbl[] = {
5454       {2, MVT::v2i16, 2},   // (load 4i16 and) deinterleave into 2 x 2i16
5455       {2, MVT::v4i16, 7},   // (load 8i16 and) deinterleave into 2 x 4i16
5456 
5457       {2, MVT::v2i32, 2},   // (load 4i32 and) deinterleave into 2 x 2i32
5458       {2, MVT::v4i32, 2},   // (load 8i32 and) deinterleave into 2 x 4i32
5459 
5460       {2, MVT::v2i64, 2},   // (load 4i64 and) deinterleave into 2 x 2i64
5461   };
5462 
5463   static const CostTblEntry AVX2InterleavedStoreTbl[] = {
5464       {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store)
5465       {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store)
5466 
5467       {2, MVT::v8i16, 3},  // interleave 2 x 8i16 into 16i16 (and store)
5468       {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store)
5469       {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store)
5470 
5471       {2, MVT::v4i32, 2},   // interleave 2 x 4i32 into 8i32 (and store)
5472       {2, MVT::v8i32, 4},   // interleave 2 x 8i32 into 16i32 (and store)
5473       {2, MVT::v16i32, 8},  // interleave 2 x 16i32 into 32i32 (and store)
5474       {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store)
5475 
5476       {2, MVT::v2i64, 2},   // interleave 2 x 2i64 into 4i64 (and store)
5477       {2, MVT::v4i64, 4},   // interleave 2 x 4i64 into 8i64 (and store)
5478       {2, MVT::v8i64, 8},   // interleave 2 x 8i64 into 16i64 (and store)
5479       {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store)
5480       {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store)
5481 
5482       {3, MVT::v2i8, 4},   // interleave 3 x 2i8 into 6i8 (and store)
5483       {3, MVT::v4i8, 4},   // interleave 3 x 4i8 into 12i8 (and store)
5484       {3, MVT::v8i8, 6},   // interleave 3 x 8i8 into 24i8 (and store)
5485       {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
5486       {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
5487 
5488       {3, MVT::v2i16, 4},   // interleave 3 x 2i16 into 6i16 (and store)
5489       {3, MVT::v4i16, 6},   // interleave 3 x 4i16 into 12i16 (and store)
5490       {3, MVT::v8i16, 12},  // interleave 3 x 8i16 into 24i16 (and store)
5491       {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store)
5492       {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store)
5493 
5494       {3, MVT::v2i32, 4},   // interleave 3 x 2i32 into 6i32 (and store)
5495       {3, MVT::v4i32, 5},   // interleave 3 x 4i32 into 12i32 (and store)
5496       {3, MVT::v8i32, 11},  // interleave 3 x 8i32 into 24i32 (and store)
5497       {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store)
5498       {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store)
5499 
5500       {3, MVT::v2i64, 4},   // interleave 3 x 2i64 into 6i64 (and store)
5501       {3, MVT::v4i64, 6},   // interleave 3 x 4i64 into 12i64 (and store)
5502       {3, MVT::v8i64, 12},  // interleave 3 x 8i64 into 24i64 (and store)
5503       {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store)
5504 
5505       {4, MVT::v2i8, 4},   // interleave 4 x 2i8 into 8i8 (and store)
5506       {4, MVT::v4i8, 4},   // interleave 4 x 4i8 into 16i8 (and store)
5507       {4, MVT::v8i8, 4},   // interleave 4 x 8i8 into 32i8 (and store)
5508       {4, MVT::v16i8, 8},  // interleave 4 x 16i8 into 64i8 (and store)
5509       {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store)
5510 
5511       {4, MVT::v2i16, 2},   // interleave 4 x 2i16 into 8i16 (and store)
5512       {4, MVT::v4i16, 6},   // interleave 4 x 4i16 into 16i16 (and store)
5513       {4, MVT::v8i16, 10},  // interleave 4 x 8i16 into 32i16 (and store)
5514       {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store)
5515       {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store)
5516 
5517       {4, MVT::v2i32, 5},   // interleave 4 x 2i32 into 8i32 (and store)
5518       {4, MVT::v4i32, 6},   // interleave 4 x 4i32 into 16i32 (and store)
5519       {4, MVT::v8i32, 16},  // interleave 4 x 8i32 into 32i32 (and store)
5520       {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store)
5521       {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store)
5522 
5523       {4, MVT::v2i64, 6},  // interleave 4 x 2i64 into 8i64 (and store)
5524       {4, MVT::v4i64, 8},  // interleave 4 x 4i64 into 16i64 (and store)
5525       {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store)
5526       {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store)
5527 
5528       {6, MVT::v2i8, 7},   // interleave 6 x 2i8 into 12i8 (and store)
5529       {6, MVT::v4i8, 9},   // interleave 6 x 4i8 into 24i8 (and store)
5530       {6, MVT::v8i8, 16},  // interleave 6 x 8i8 into 48i8 (and store)
5531       {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store)
5532       {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store)
5533 
5534       {6, MVT::v2i16, 10},  // interleave 6 x 2i16 into 12i16 (and store)
5535       {6, MVT::v4i16, 15},  // interleave 6 x 4i16 into 24i16 (and store)
5536       {6, MVT::v8i16, 21},  // interleave 6 x 8i16 into 48i16 (and store)
5537       {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store)
5538       {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store)
5539 
5540       {6, MVT::v2i32, 9},   // interleave 6 x 2i32 into 12i32 (and store)
5541       {6, MVT::v4i32, 12},  // interleave 6 x 4i32 into 24i32 (and store)
5542       {6, MVT::v8i32, 33},  // interleave 6 x 8i32 into 48i32 (and store)
5543       {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store)
5544 
5545       {6, MVT::v2i64, 8},  // interleave 6 x 2i64 into 12i64 (and store)
5546       {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store)
5547       {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store)
5548   };
5549 
5550   static const CostTblEntry SSE2InterleavedStoreTbl[] = {
5551       {2, MVT::v2i8, 1},   // interleave 2 x 2i8 into 4i8 (and store)
5552       {2, MVT::v4i8, 1},   // interleave 2 x 4i8 into 8i8 (and store)
5553       {2, MVT::v8i8, 1},   // interleave 2 x 8i8 into 16i8 (and store)
5554 
5555       {2, MVT::v2i16, 1},  // interleave 2 x 2i16 into 4i16 (and store)
5556       {2, MVT::v4i16, 1},  // interleave 2 x 4i16 into 8i16 (and store)
5557 
5558       {2, MVT::v2i32, 1},  // interleave 2 x 2i32 into 4i32 (and store)
5559   };
5560 
5561   if (Opcode == Instruction::Load) {
5562     auto GetDiscountedCost = [Factor, NumMembers = Indices.size(),
5563                               MemOpCosts](const CostTblEntry *Entry) {
5564       // NOTE: this is just an approximation!
5565       //       It can over/under -estimate the cost!
5566       return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor);
5567     };
5568 
5569     if (ST->hasAVX2())
5570       if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor,
5571                                               ETy.getSimpleVT()))
5572         return GetDiscountedCost(Entry);
5573 
5574     if (ST->hasSSSE3())
5575       if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor,
5576                                               ETy.getSimpleVT()))
5577         return GetDiscountedCost(Entry);
5578 
5579     if (ST->hasSSE2())
5580       if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor,
5581                                               ETy.getSimpleVT()))
5582         return GetDiscountedCost(Entry);
5583   } else {
5584     assert(Opcode == Instruction::Store &&
5585            "Expected Store Instruction at this point");
5586     assert((!Indices.size() || Indices.size() == Factor) &&
5587            "Interleaved store only supports fully-interleaved groups.");
5588     if (ST->hasAVX2())
5589       if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor,
5590                                               ETy.getSimpleVT()))
5591         return MemOpCosts + Entry->Cost;
5592 
5593     if (ST->hasSSE2())
5594       if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor,
5595                                               ETy.getSimpleVT()))
5596         return MemOpCosts + Entry->Cost;
5597   }
5598 
5599   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5600                                            Alignment, AddressSpace, CostKind,
5601                                            UseMaskForCond, UseMaskForGaps);
5602 }
5603