1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
13 ///
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 ///   SSE 3   - Pentium4 / Athlon64
23 ///   SSE 4.1 - Penryn
24 ///   SSE 4.2 - Nehalem
25 ///   AVX     - Sandy Bridge
26 ///   AVX2    - Haswell
27 ///   AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 ///                   divss     sqrtss          rsqrtss
30 ///   AMD K7            11-16     19              3
31 ///   Piledriver        9-24      13-15           5
32 ///   Jaguar            14        16              2
33 ///   Pentium II,III    18        30              2
34 ///   Nehalem           7-14      7-18            3
35 ///   Haswell           10-13     11              5
36 /// TODO: Develop and implement  the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
40 
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/InstIterator.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/Support/Debug.h"
49 
50 using namespace llvm;
51 
52 #define DEBUG_TYPE "x86tti"
53 
54 //===----------------------------------------------------------------------===//
55 //
56 // X86 cost model.
57 //
58 //===----------------------------------------------------------------------===//
59 
60 TargetTransformInfo::PopcntSupportKind
61 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
62   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
63   // TODO: Currently the __builtin_popcount() implementation using SSE3
64   //   instructions is inefficient. Once the problem is fixed, we should
65   //   call ST->hasSSE3() instead of ST->hasPOPCNT().
66   return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
67 }
68 
69 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
70   TargetTransformInfo::CacheLevel Level) const {
71   switch (Level) {
72   case TargetTransformInfo::CacheLevel::L1D:
73     //   - Penryn
74     //   - Nehalem
75     //   - Westmere
76     //   - Sandy Bridge
77     //   - Ivy Bridge
78     //   - Haswell
79     //   - Broadwell
80     //   - Skylake
81     //   - Kabylake
82     return 32 * 1024;  //  32 KByte
83   case TargetTransformInfo::CacheLevel::L2D:
84     //   - Penryn
85     //   - Nehalem
86     //   - Westmere
87     //   - Sandy Bridge
88     //   - Ivy Bridge
89     //   - Haswell
90     //   - Broadwell
91     //   - Skylake
92     //   - Kabylake
93     return 256 * 1024; // 256 KByte
94   }
95 
96   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
97 }
98 
99 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
100   TargetTransformInfo::CacheLevel Level) const {
101   //   - Penryn
102   //   - Nehalem
103   //   - Westmere
104   //   - Sandy Bridge
105   //   - Ivy Bridge
106   //   - Haswell
107   //   - Broadwell
108   //   - Skylake
109   //   - Kabylake
110   switch (Level) {
111   case TargetTransformInfo::CacheLevel::L1D:
112     LLVM_FALLTHROUGH;
113   case TargetTransformInfo::CacheLevel::L2D:
114     return 8;
115   }
116 
117   llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
118 }
119 
120 unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
121   bool Vector = (ClassID == 1);
122   if (Vector && !ST->hasSSE1())
123     return 0;
124 
125   if (ST->is64Bit()) {
126     if (Vector && ST->hasAVX512())
127       return 32;
128     return 16;
129   }
130   return 8;
131 }
132 
133 TypeSize
134 X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
135   unsigned PreferVectorWidth = ST->getPreferVectorWidth();
136   switch (K) {
137   case TargetTransformInfo::RGK_Scalar:
138     return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
139   case TargetTransformInfo::RGK_FixedWidthVector:
140     if (ST->hasAVX512() && PreferVectorWidth >= 512)
141       return TypeSize::getFixed(512);
142     if (ST->hasAVX() && PreferVectorWidth >= 256)
143       return TypeSize::getFixed(256);
144     if (ST->hasSSE1() && PreferVectorWidth >= 128)
145       return TypeSize::getFixed(128);
146     return TypeSize::getFixed(0);
147   case TargetTransformInfo::RGK_ScalableVector:
148     return TypeSize::getScalable(0);
149   }
150 
151   llvm_unreachable("Unsupported register kind");
152 }
153 
154 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
155   return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
156       .getFixedSize();
157 }
158 
159 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
160   // If the loop will not be vectorized, don't interleave the loop.
161   // Let regular unroll to unroll the loop, which saves the overflow
162   // check and memory check cost.
163   if (VF == 1)
164     return 1;
165 
166   if (ST->isAtom())
167     return 1;
168 
169   // Sandybridge and Haswell have multiple execution ports and pipelined
170   // vector units.
171   if (ST->hasAVX())
172     return 4;
173 
174   return 2;
175 }
176 
177 InstructionCost X86TTIImpl::getArithmeticInstrCost(
178     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
179     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
180     TTI::OperandValueProperties Opd1PropInfo,
181     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
182     const Instruction *CxtI) {
183   // TODO: Handle more cost kinds.
184   if (CostKind != TTI::TCK_RecipThroughput)
185     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
186                                          Op2Info, Opd1PropInfo,
187                                          Opd2PropInfo, Args, CxtI);
188 
189   // vXi8 multiplications are always promoted to vXi16.
190   if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
191       Ty->getScalarSizeInBits() == 8) {
192     Type *WideVecTy =
193         VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
194     return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
195                             TargetTransformInfo::CastContextHint::None,
196                             CostKind) +
197            getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
198                             TargetTransformInfo::CastContextHint::None,
199                             CostKind) +
200            getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
201                                   Opd1PropInfo, Opd2PropInfo);
202   }
203 
204   // Legalize the type.
205   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
206 
207   int ISD = TLI->InstructionOpcodeToISD(Opcode);
208   assert(ISD && "Invalid opcode");
209 
210   if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
211       LT.second.getScalarType() == MVT::i32) {
212     // Check if the operands can be represented as a smaller datatype.
213     bool Op1Signed = false, Op2Signed = false;
214     unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
215     unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
216     unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
217 
218     // If both are representable as i15 and at least one is constant,
219     // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we
220     // can treat this as PMADDWD which has the same costs as a vXi16 multiply.
221     if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) {
222       bool Op1Constant =
223           isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
224       bool Op2Constant =
225           isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
226       bool Op1Sext = isa<SExtInst>(Args[0]) &&
227                      (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41()));
228       bool Op2Sext = isa<SExtInst>(Args[1]) &&
229                      (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41()));
230 
231       bool IsZeroExtended = !Op1Signed || !Op2Signed;
232       bool IsConstant = Op1Constant || Op2Constant;
233       bool IsSext = Op1Sext || Op2Sext;
234       if (IsConstant || IsZeroExtended || IsSext)
235         LT.second =
236             MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
237     }
238   }
239 
240   // Vector multiply by pow2 will be simplified to shifts.
241   if (ISD == ISD::MUL &&
242       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
243        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
244       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2)
245     return getArithmeticInstrCost(Instruction::Shl, Ty, CostKind, Op1Info,
246                                   Op2Info, TargetTransformInfo::OP_None,
247                                   TargetTransformInfo::OP_None);
248 
249   // On X86, vector signed division by constants power-of-two are
250   // normally expanded to the sequence SRA + SRL + ADD + SRA.
251   // The OperandValue properties may not be the same as that of the previous
252   // operation; conservatively assume OP_None.
253   if ((ISD == ISD::SDIV || ISD == ISD::SREM) &&
254       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
255        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
256       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
257     InstructionCost Cost =
258         2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
259                                    Op2Info, TargetTransformInfo::OP_None,
260                                    TargetTransformInfo::OP_None);
261     Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
262                                    Op2Info, TargetTransformInfo::OP_None,
263                                    TargetTransformInfo::OP_None);
264     Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
265                                    Op2Info, TargetTransformInfo::OP_None,
266                                    TargetTransformInfo::OP_None);
267 
268     if (ISD == ISD::SREM) {
269       // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
270       Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
271                                      Op2Info);
272       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
273                                      Op2Info);
274     }
275 
276     return Cost;
277   }
278 
279   // Vector unsigned division/remainder will be simplified to shifts/masks.
280   if ((ISD == ISD::UDIV || ISD == ISD::UREM) &&
281       (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
282        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
283       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
284     if (ISD == ISD::UDIV)
285       return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
286                                     Op2Info, TargetTransformInfo::OP_None,
287                                     TargetTransformInfo::OP_None);
288     // UREM
289     return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info,
290                                   Op2Info, TargetTransformInfo::OP_None,
291                                   TargetTransformInfo::OP_None);
292   }
293 
294   static const CostTblEntry GLMCostTable[] = {
295     { ISD::FDIV,  MVT::f32,   18 }, // divss
296     { ISD::FDIV,  MVT::v4f32, 35 }, // divps
297     { ISD::FDIV,  MVT::f64,   33 }, // divsd
298     { ISD::FDIV,  MVT::v2f64, 65 }, // divpd
299   };
300 
301   if (ST->useGLMDivSqrtCosts())
302     if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
303                                             LT.second))
304       return LT.first * Entry->Cost;
305 
306   static const CostTblEntry SLMCostTable[] = {
307     { ISD::MUL,   MVT::v4i32, 11 }, // pmulld
308     { ISD::MUL,   MVT::v8i16, 2  }, // pmullw
309     { ISD::FMUL,  MVT::f64,   2  }, // mulsd
310     { ISD::FMUL,  MVT::v2f64, 4  }, // mulpd
311     { ISD::FMUL,  MVT::v4f32, 2  }, // mulps
312     { ISD::FDIV,  MVT::f32,   17 }, // divss
313     { ISD::FDIV,  MVT::v4f32, 39 }, // divps
314     { ISD::FDIV,  MVT::f64,   32 }, // divsd
315     { ISD::FDIV,  MVT::v2f64, 69 }, // divpd
316     { ISD::FADD,  MVT::v2f64, 2  }, // addpd
317     { ISD::FSUB,  MVT::v2f64, 2  }, // subpd
318     // v2i64/v4i64 mul is custom lowered as a series of long:
319     // multiplies(3), shifts(3) and adds(2)
320     // slm muldq version throughput is 2 and addq throughput 4
321     // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
322     //       3X4 (addq throughput) = 17
323     { ISD::MUL,   MVT::v2i64, 17 },
324     // slm addq\subq throughput is 4
325     { ISD::ADD,   MVT::v2i64, 4  },
326     { ISD::SUB,   MVT::v2i64, 4  },
327   };
328 
329   if (ST->useSLMArithCosts()) {
330     if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
331       // Check if the operands can be shrinked into a smaller datatype.
332       // TODO: Merge this into generiic vXi32 MUL patterns above.
333       bool Op1Signed = false;
334       unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
335       bool Op2Signed = false;
336       unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
337 
338       bool SignedMode = Op1Signed || Op2Signed;
339       unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
340 
341       if (OpMinSize <= 7)
342         return LT.first * 3; // pmullw/sext
343       if (!SignedMode && OpMinSize <= 8)
344         return LT.first * 3; // pmullw/zext
345       if (OpMinSize <= 15)
346         return LT.first * 5; // pmullw/pmulhw/pshuf
347       if (!SignedMode && OpMinSize <= 16)
348         return LT.first * 5; // pmullw/pmulhw/pshuf
349     }
350 
351     if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
352                                             LT.second)) {
353       return LT.first * Entry->Cost;
354     }
355   }
356 
357   static const CostTblEntry AVX512BWUniformConstCostTable[] = {
358     { ISD::SHL,  MVT::v64i8,   2 }, // psllw + pand.
359     { ISD::SRL,  MVT::v64i8,   2 }, // psrlw + pand.
360     { ISD::SRA,  MVT::v64i8,   4 }, // psrlw, pand, pxor, psubb.
361   };
362 
363   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
364       ST->hasBWI()) {
365     if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
366                                             LT.second))
367       return LT.first * Entry->Cost;
368   }
369 
370   static const CostTblEntry AVX512UniformConstCostTable[] = {
371     { ISD::SRA,  MVT::v2i64,   1 },
372     { ISD::SRA,  MVT::v4i64,   1 },
373     { ISD::SRA,  MVT::v8i64,   1 },
374 
375     { ISD::SHL,  MVT::v64i8,   4 }, // psllw + pand.
376     { ISD::SRL,  MVT::v64i8,   4 }, // psrlw + pand.
377     { ISD::SRA,  MVT::v64i8,   8 }, // psrlw, pand, pxor, psubb.
378 
379     { ISD::SDIV, MVT::v16i32,  6 }, // pmuludq sequence
380     { ISD::SREM, MVT::v16i32,  8 }, // pmuludq+mul+sub sequence
381     { ISD::UDIV, MVT::v16i32,  5 }, // pmuludq sequence
382     { ISD::UREM, MVT::v16i32,  7 }, // pmuludq+mul+sub sequence
383   };
384 
385   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
386       ST->hasAVX512()) {
387     if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
388                                             LT.second))
389       return LT.first * Entry->Cost;
390   }
391 
392   static const CostTblEntry AVX2UniformConstCostTable[] = {
393     { ISD::SHL,  MVT::v32i8,   2 }, // psllw + pand.
394     { ISD::SRL,  MVT::v32i8,   2 }, // psrlw + pand.
395     { ISD::SRA,  MVT::v32i8,   4 }, // psrlw, pand, pxor, psubb.
396 
397     { ISD::SRA,  MVT::v4i64,   4 }, // 2 x psrad + shuffle.
398 
399     { ISD::SDIV, MVT::v8i32,   6 }, // pmuludq sequence
400     { ISD::SREM, MVT::v8i32,   8 }, // pmuludq+mul+sub sequence
401     { ISD::UDIV, MVT::v8i32,   5 }, // pmuludq sequence
402     { ISD::UREM, MVT::v8i32,   7 }, // pmuludq+mul+sub sequence
403   };
404 
405   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
406       ST->hasAVX2()) {
407     if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
408                                             LT.second))
409       return LT.first * Entry->Cost;
410   }
411 
412   static const CostTblEntry SSE2UniformConstCostTable[] = {
413     { ISD::SHL,  MVT::v16i8,     2 }, // psllw + pand.
414     { ISD::SRL,  MVT::v16i8,     2 }, // psrlw + pand.
415     { ISD::SRA,  MVT::v16i8,     4 }, // psrlw, pand, pxor, psubb.
416 
417     { ISD::SHL,  MVT::v32i8,   4+2 }, // 2*(psllw + pand) + split.
418     { ISD::SRL,  MVT::v32i8,   4+2 }, // 2*(psrlw + pand) + split.
419     { ISD::SRA,  MVT::v32i8,   8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
420 
421     { ISD::SDIV, MVT::v8i32,  12+2 }, // 2*pmuludq sequence + split.
422     { ISD::SREM, MVT::v8i32,  16+2 }, // 2*pmuludq+mul+sub sequence + split.
423     { ISD::SDIV, MVT::v4i32,     6 }, // pmuludq sequence
424     { ISD::SREM, MVT::v4i32,     8 }, // pmuludq+mul+sub sequence
425     { ISD::UDIV, MVT::v8i32,  10+2 }, // 2*pmuludq sequence + split.
426     { ISD::UREM, MVT::v8i32,  14+2 }, // 2*pmuludq+mul+sub sequence + split.
427     { ISD::UDIV, MVT::v4i32,     5 }, // pmuludq sequence
428     { ISD::UREM, MVT::v4i32,     7 }, // pmuludq+mul+sub sequence
429   };
430 
431   // XOP has faster vXi8 shifts.
432   if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
433       ST->hasSSE2() && !ST->hasXOP()) {
434     if (const auto *Entry =
435             CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
436       return LT.first * Entry->Cost;
437   }
438 
439   static const CostTblEntry AVX512BWConstCostTable[] = {
440     { ISD::SDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
441     { ISD::SREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
442     { ISD::UDIV, MVT::v64i8,  14 }, // 2*ext+2*pmulhw sequence
443     { ISD::UREM, MVT::v64i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
444     { ISD::SDIV, MVT::v32i16,  6 }, // vpmulhw sequence
445     { ISD::SREM, MVT::v32i16,  8 }, // vpmulhw+mul+sub sequence
446     { ISD::UDIV, MVT::v32i16,  6 }, // vpmulhuw sequence
447     { ISD::UREM, MVT::v32i16,  8 }, // vpmulhuw+mul+sub sequence
448   };
449 
450   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
451        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
452       ST->hasBWI()) {
453     if (const auto *Entry =
454             CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
455       return LT.first * Entry->Cost;
456   }
457 
458   static const CostTblEntry AVX512ConstCostTable[] = {
459     { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
460     { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
461     { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
462     { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
463     { ISD::SDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
464     { ISD::SREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
465     { ISD::UDIV, MVT::v64i8,  28 }, // 4*ext+4*pmulhw sequence
466     { ISD::UREM, MVT::v64i8,  32 }, // 4*ext+4*pmulhw+mul+sub sequence
467     { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
468     { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
469     { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
470     { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
471   };
472 
473   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
474        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
475       ST->hasAVX512()) {
476     if (const auto *Entry =
477             CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
478       return LT.first * Entry->Cost;
479   }
480 
481   static const CostTblEntry AVX2ConstCostTable[] = {
482     { ISD::SDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
483     { ISD::SREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
484     { ISD::UDIV, MVT::v32i8,  14 }, // 2*ext+2*pmulhw sequence
485     { ISD::UREM, MVT::v32i8,  16 }, // 2*ext+2*pmulhw+mul+sub sequence
486     { ISD::SDIV, MVT::v16i16,  6 }, // vpmulhw sequence
487     { ISD::SREM, MVT::v16i16,  8 }, // vpmulhw+mul+sub sequence
488     { ISD::UDIV, MVT::v16i16,  6 }, // vpmulhuw sequence
489     { ISD::UREM, MVT::v16i16,  8 }, // vpmulhuw+mul+sub sequence
490     { ISD::SDIV, MVT::v8i32,  15 }, // vpmuldq sequence
491     { ISD::SREM, MVT::v8i32,  19 }, // vpmuldq+mul+sub sequence
492     { ISD::UDIV, MVT::v8i32,  15 }, // vpmuludq sequence
493     { ISD::UREM, MVT::v8i32,  19 }, // vpmuludq+mul+sub sequence
494   };
495 
496   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
497        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
498       ST->hasAVX2()) {
499     if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
500       return LT.first * Entry->Cost;
501   }
502 
503   static const CostTblEntry SSE2ConstCostTable[] = {
504     { ISD::SDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
505     { ISD::SREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
506     { ISD::SDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
507     { ISD::SREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
508     { ISD::UDIV, MVT::v32i8,  28+2 }, // 4*ext+4*pmulhw sequence + split.
509     { ISD::UREM, MVT::v32i8,  32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
510     { ISD::UDIV, MVT::v16i8,    14 }, // 2*ext+2*pmulhw sequence
511     { ISD::UREM, MVT::v16i8,    16 }, // 2*ext+2*pmulhw+mul+sub sequence
512     { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
513     { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
514     { ISD::SDIV, MVT::v8i16,     6 }, // pmulhw sequence
515     { ISD::SREM, MVT::v8i16,     8 }, // pmulhw+mul+sub sequence
516     { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
517     { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
518     { ISD::UDIV, MVT::v8i16,     6 }, // pmulhuw sequence
519     { ISD::UREM, MVT::v8i16,     8 }, // pmulhuw+mul+sub sequence
520     { ISD::SDIV, MVT::v8i32,  38+2 }, // 2*pmuludq sequence + split.
521     { ISD::SREM, MVT::v8i32,  48+2 }, // 2*pmuludq+mul+sub sequence + split.
522     { ISD::SDIV, MVT::v4i32,    19 }, // pmuludq sequence
523     { ISD::SREM, MVT::v4i32,    24 }, // pmuludq+mul+sub sequence
524     { ISD::UDIV, MVT::v8i32,  30+2 }, // 2*pmuludq sequence + split.
525     { ISD::UREM, MVT::v8i32,  40+2 }, // 2*pmuludq+mul+sub sequence + split.
526     { ISD::UDIV, MVT::v4i32,    15 }, // pmuludq sequence
527     { ISD::UREM, MVT::v4i32,    20 }, // pmuludq+mul+sub sequence
528   };
529 
530   if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
531        Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
532       ST->hasSSE2()) {
533     // pmuldq sequence.
534     if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
535       return LT.first * 32;
536     if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
537       return LT.first * 38;
538     if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
539       return LT.first * 15;
540     if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
541       return LT.first * 20;
542 
543     if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
544       return LT.first * Entry->Cost;
545   }
546 
547   static const CostTblEntry AVX512BWShiftCostTable[] = {
548     { ISD::SHL,   MVT::v16i8,      4 }, // extend/vpsllvw/pack sequence.
549     { ISD::SRL,   MVT::v16i8,      4 }, // extend/vpsrlvw/pack sequence.
550     { ISD::SRA,   MVT::v16i8,      4 }, // extend/vpsravw/pack sequence.
551     { ISD::SHL,   MVT::v32i8,      4 }, // extend/vpsllvw/pack sequence.
552     { ISD::SRL,   MVT::v32i8,      4 }, // extend/vpsrlvw/pack sequence.
553     { ISD::SRA,   MVT::v32i8,      6 }, // extend/vpsravw/pack sequence.
554     { ISD::SHL,   MVT::v64i8,      6 }, // extend/vpsllvw/pack sequence.
555     { ISD::SRL,   MVT::v64i8,      7 }, // extend/vpsrlvw/pack sequence.
556     { ISD::SRA,   MVT::v64i8,     15 }, // extend/vpsravw/pack sequence.
557 
558     { ISD::SHL,   MVT::v8i16,      1 }, // vpsllvw
559     { ISD::SRL,   MVT::v8i16,      1 }, // vpsrlvw
560     { ISD::SRA,   MVT::v8i16,      1 }, // vpsravw
561     { ISD::SHL,   MVT::v16i16,     1 }, // vpsllvw
562     { ISD::SRL,   MVT::v16i16,     1 }, // vpsrlvw
563     { ISD::SRA,   MVT::v16i16,     1 }, // vpsravw
564     { ISD::SHL,   MVT::v32i16,     1 }, // vpsllvw
565     { ISD::SRL,   MVT::v32i16,     1 }, // vpsrlvw
566     { ISD::SRA,   MVT::v32i16,     1 }, // vpsravw
567   };
568 
569   if (ST->hasBWI())
570     if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
571       return LT.first * Entry->Cost;
572 
573   static const CostTblEntry AVX2UniformCostTable[] = {
574     // Uniform splats are cheaper for the following instructions.
575     { ISD::SHL,  MVT::v16i16, 1 }, // psllw.
576     { ISD::SRL,  MVT::v16i16, 1 }, // psrlw.
577     { ISD::SRA,  MVT::v16i16, 1 }, // psraw.
578     { ISD::SHL,  MVT::v32i16, 2 }, // 2*psllw.
579     { ISD::SRL,  MVT::v32i16, 2 }, // 2*psrlw.
580     { ISD::SRA,  MVT::v32i16, 2 }, // 2*psraw.
581 
582     { ISD::SHL,  MVT::v8i32,  1 }, // pslld
583     { ISD::SRL,  MVT::v8i32,  1 }, // psrld
584     { ISD::SRA,  MVT::v8i32,  1 }, // psrad
585     { ISD::SHL,  MVT::v4i64,  1 }, // psllq
586     { ISD::SRL,  MVT::v4i64,  1 }, // psrlq
587   };
588 
589   if (ST->hasAVX2() &&
590       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
591        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
592     if (const auto *Entry =
593             CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
594       return LT.first * Entry->Cost;
595   }
596 
597   static const CostTblEntry SSE2UniformCostTable[] = {
598     // Uniform splats are cheaper for the following instructions.
599     { ISD::SHL,  MVT::v8i16,  1 }, // psllw.
600     { ISD::SHL,  MVT::v4i32,  1 }, // pslld
601     { ISD::SHL,  MVT::v2i64,  1 }, // psllq.
602 
603     { ISD::SRL,  MVT::v8i16,  1 }, // psrlw.
604     { ISD::SRL,  MVT::v4i32,  1 }, // psrld.
605     { ISD::SRL,  MVT::v2i64,  1 }, // psrlq.
606 
607     { ISD::SRA,  MVT::v8i16,  1 }, // psraw.
608     { ISD::SRA,  MVT::v4i32,  1 }, // psrad.
609   };
610 
611   if (ST->hasSSE2() &&
612       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
613        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
614     if (const auto *Entry =
615             CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
616       return LT.first * Entry->Cost;
617   }
618 
619   static const CostTblEntry AVX512DQCostTable[] = {
620     { ISD::MUL,  MVT::v2i64, 2 }, // pmullq
621     { ISD::MUL,  MVT::v4i64, 2 }, // pmullq
622     { ISD::MUL,  MVT::v8i64, 2 }  // pmullq
623   };
624 
625   // Look for AVX512DQ lowering tricks for custom cases.
626   if (ST->hasDQI())
627     if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
628       return LT.first * Entry->Cost;
629 
630   static const CostTblEntry AVX512BWCostTable[] = {
631     { ISD::SHL,   MVT::v64i8,     11 }, // vpblendvb sequence.
632     { ISD::SRL,   MVT::v64i8,     11 }, // vpblendvb sequence.
633     { ISD::SRA,   MVT::v64i8,     24 }, // vpblendvb sequence.
634   };
635 
636   // Look for AVX512BW lowering tricks for custom cases.
637   if (ST->hasBWI())
638     if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
639       return LT.first * Entry->Cost;
640 
641   static const CostTblEntry AVX512CostTable[] = {
642     { ISD::SHL,     MVT::v4i32,      1 },
643     { ISD::SRL,     MVT::v4i32,      1 },
644     { ISD::SRA,     MVT::v4i32,      1 },
645     { ISD::SHL,     MVT::v8i32,      1 },
646     { ISD::SRL,     MVT::v8i32,      1 },
647     { ISD::SRA,     MVT::v8i32,      1 },
648     { ISD::SHL,     MVT::v16i32,     1 },
649     { ISD::SRL,     MVT::v16i32,     1 },
650     { ISD::SRA,     MVT::v16i32,     1 },
651 
652     { ISD::SHL,     MVT::v2i64,      1 },
653     { ISD::SRL,     MVT::v2i64,      1 },
654     { ISD::SHL,     MVT::v4i64,      1 },
655     { ISD::SRL,     MVT::v4i64,      1 },
656     { ISD::SHL,     MVT::v8i64,      1 },
657     { ISD::SRL,     MVT::v8i64,      1 },
658 
659     { ISD::SRA,     MVT::v2i64,      1 },
660     { ISD::SRA,     MVT::v4i64,      1 },
661     { ISD::SRA,     MVT::v8i64,      1 },
662 
663     { ISD::MUL,     MVT::v16i32,     1 }, // pmulld (Skylake from agner.org)
664     { ISD::MUL,     MVT::v8i32,      1 }, // pmulld (Skylake from agner.org)
665     { ISD::MUL,     MVT::v4i32,      1 }, // pmulld (Skylake from agner.org)
666     { ISD::MUL,     MVT::v8i64,      6 }, // 3*pmuludq/3*shift/2*add
667     { ISD::MUL,     MVT::i64,        1 }, // Skylake from http://www.agner.org/
668 
669     { ISD::FNEG,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
670     { ISD::FADD,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
671     { ISD::FSUB,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
672     { ISD::FMUL,    MVT::v8f64,      1 }, // Skylake from http://www.agner.org/
673     { ISD::FDIV,    MVT::f64,        4 }, // Skylake from http://www.agner.org/
674     { ISD::FDIV,    MVT::v2f64,      4 }, // Skylake from http://www.agner.org/
675     { ISD::FDIV,    MVT::v4f64,      8 }, // Skylake from http://www.agner.org/
676     { ISD::FDIV,    MVT::v8f64,     16 }, // Skylake from http://www.agner.org/
677 
678     { ISD::FNEG,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
679     { ISD::FADD,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
680     { ISD::FSUB,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
681     { ISD::FMUL,    MVT::v16f32,     1 }, // Skylake from http://www.agner.org/
682     { ISD::FDIV,    MVT::f32,        3 }, // Skylake from http://www.agner.org/
683     { ISD::FDIV,    MVT::v4f32,      3 }, // Skylake from http://www.agner.org/
684     { ISD::FDIV,    MVT::v8f32,      5 }, // Skylake from http://www.agner.org/
685     { ISD::FDIV,    MVT::v16f32,    10 }, // Skylake from http://www.agner.org/
686   };
687 
688   if (ST->hasAVX512())
689     if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
690       return LT.first * Entry->Cost;
691 
692   static const CostTblEntry AVX2ShiftCostTable[] = {
693     // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
694     // customize them to detect the cases where shift amount is a scalar one.
695     { ISD::SHL,     MVT::v4i32,    2 }, // vpsllvd (Haswell from agner.org)
696     { ISD::SRL,     MVT::v4i32,    2 }, // vpsrlvd (Haswell from agner.org)
697     { ISD::SRA,     MVT::v4i32,    2 }, // vpsravd (Haswell from agner.org)
698     { ISD::SHL,     MVT::v8i32,    2 }, // vpsllvd (Haswell from agner.org)
699     { ISD::SRL,     MVT::v8i32,    2 }, // vpsrlvd (Haswell from agner.org)
700     { ISD::SRA,     MVT::v8i32,    2 }, // vpsravd (Haswell from agner.org)
701     { ISD::SHL,     MVT::v2i64,    1 }, // vpsllvq (Haswell from agner.org)
702     { ISD::SRL,     MVT::v2i64,    1 }, // vpsrlvq (Haswell from agner.org)
703     { ISD::SHL,     MVT::v4i64,    1 }, // vpsllvq (Haswell from agner.org)
704     { ISD::SRL,     MVT::v4i64,    1 }, // vpsrlvq (Haswell from agner.org)
705   };
706 
707   if (ST->hasAVX512()) {
708     if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
709         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
710          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
711       // On AVX512, a packed v32i16 shift left by a constant build_vector
712       // is lowered into a vector multiply (vpmullw).
713       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
714                                     Op1Info, Op2Info,
715                                     TargetTransformInfo::OP_None,
716                                     TargetTransformInfo::OP_None);
717   }
718 
719   // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
720   if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
721     if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
722         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
723          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
724       // On AVX2, a packed v16i16 shift left by a constant build_vector
725       // is lowered into a vector multiply (vpmullw).
726       return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
727                                     Op1Info, Op2Info,
728                                     TargetTransformInfo::OP_None,
729                                     TargetTransformInfo::OP_None);
730 
731     if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
732       return LT.first * Entry->Cost;
733   }
734 
735   static const CostTblEntry XOPShiftCostTable[] = {
736     // 128bit shifts take 1cy, but right shifts require negation beforehand.
737     { ISD::SHL,     MVT::v16i8,    1 },
738     { ISD::SRL,     MVT::v16i8,    2 },
739     { ISD::SRA,     MVT::v16i8,    2 },
740     { ISD::SHL,     MVT::v8i16,    1 },
741     { ISD::SRL,     MVT::v8i16,    2 },
742     { ISD::SRA,     MVT::v8i16,    2 },
743     { ISD::SHL,     MVT::v4i32,    1 },
744     { ISD::SRL,     MVT::v4i32,    2 },
745     { ISD::SRA,     MVT::v4i32,    2 },
746     { ISD::SHL,     MVT::v2i64,    1 },
747     { ISD::SRL,     MVT::v2i64,    2 },
748     { ISD::SRA,     MVT::v2i64,    2 },
749     // 256bit shifts require splitting if AVX2 didn't catch them above.
750     { ISD::SHL,     MVT::v32i8,  2+2 },
751     { ISD::SRL,     MVT::v32i8,  4+2 },
752     { ISD::SRA,     MVT::v32i8,  4+2 },
753     { ISD::SHL,     MVT::v16i16, 2+2 },
754     { ISD::SRL,     MVT::v16i16, 4+2 },
755     { ISD::SRA,     MVT::v16i16, 4+2 },
756     { ISD::SHL,     MVT::v8i32,  2+2 },
757     { ISD::SRL,     MVT::v8i32,  4+2 },
758     { ISD::SRA,     MVT::v8i32,  4+2 },
759     { ISD::SHL,     MVT::v4i64,  2+2 },
760     { ISD::SRL,     MVT::v4i64,  4+2 },
761     { ISD::SRA,     MVT::v4i64,  4+2 },
762   };
763 
764   // Look for XOP lowering tricks.
765   if (ST->hasXOP()) {
766     // If the right shift is constant then we'll fold the negation so
767     // it's as cheap as a left shift.
768     int ShiftISD = ISD;
769     if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
770         (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
771          Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
772       ShiftISD = ISD::SHL;
773     if (const auto *Entry =
774             CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
775       return LT.first * Entry->Cost;
776   }
777 
778   static const CostTblEntry SSE2UniformShiftCostTable[] = {
779     // Uniform splats are cheaper for the following instructions.
780     { ISD::SHL,  MVT::v16i16, 2+2 }, // 2*psllw + split.
781     { ISD::SHL,  MVT::v8i32,  2+2 }, // 2*pslld + split.
782     { ISD::SHL,  MVT::v4i64,  2+2 }, // 2*psllq + split.
783 
784     { ISD::SRL,  MVT::v16i16, 2+2 }, // 2*psrlw + split.
785     { ISD::SRL,  MVT::v8i32,  2+2 }, // 2*psrld + split.
786     { ISD::SRL,  MVT::v4i64,  2+2 }, // 2*psrlq + split.
787 
788     { ISD::SRA,  MVT::v16i16, 2+2 }, // 2*psraw + split.
789     { ISD::SRA,  MVT::v8i32,  2+2 }, // 2*psrad + split.
790     { ISD::SRA,  MVT::v2i64,    4 }, // 2*psrad + shuffle.
791     { ISD::SRA,  MVT::v4i64,  8+2 }, // 2*(2*psrad + shuffle) + split.
792   };
793 
794   if (ST->hasSSE2() &&
795       ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
796        (Op2Info == TargetTransformInfo::OK_UniformValue))) {
797 
798     // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
799     if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
800       return LT.first * 4; // 2*psrad + shuffle.
801 
802     if (const auto *Entry =
803             CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
804       return LT.first * Entry->Cost;
805   }
806 
807   if (ISD == ISD::SHL &&
808       Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
809     MVT VT = LT.second;
810     // Vector shift left by non uniform constant can be lowered
811     // into vector multiply.
812     if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
813         ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
814       ISD = ISD::MUL;
815   }
816 
817   static const CostTblEntry AVX2CostTable[] = {
818     { ISD::SHL,  MVT::v16i8,      6 }, // vpblendvb sequence.
819     { ISD::SHL,  MVT::v32i8,      6 }, // vpblendvb sequence.
820     { ISD::SHL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
821     { ISD::SHL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
822     { ISD::SHL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
823     { ISD::SHL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
824 
825     { ISD::SRL,  MVT::v16i8,      6 }, // vpblendvb sequence.
826     { ISD::SRL,  MVT::v32i8,      6 }, // vpblendvb sequence.
827     { ISD::SRL,  MVT::v64i8,     12 }, // 2*vpblendvb sequence.
828     { ISD::SRL,  MVT::v8i16,      5 }, // extend/vpsrlvd/pack sequence.
829     { ISD::SRL,  MVT::v16i16,     7 }, // extend/vpsrlvd/pack sequence.
830     { ISD::SRL,  MVT::v32i16,    14 }, // 2*extend/vpsrlvd/pack sequence.
831 
832     { ISD::SRA,  MVT::v16i8,     17 }, // vpblendvb sequence.
833     { ISD::SRA,  MVT::v32i8,     17 }, // vpblendvb sequence.
834     { ISD::SRA,  MVT::v64i8,     34 }, // 2*vpblendvb sequence.
835     { ISD::SRA,  MVT::v8i16,      5 }, // extend/vpsravd/pack sequence.
836     { ISD::SRA,  MVT::v16i16,     7 }, // extend/vpsravd/pack sequence.
837     { ISD::SRA,  MVT::v32i16,    14 }, // 2*extend/vpsravd/pack sequence.
838     { ISD::SRA,  MVT::v2i64,      2 }, // srl/xor/sub sequence.
839     { ISD::SRA,  MVT::v4i64,      2 }, // srl/xor/sub sequence.
840 
841     { ISD::SUB,  MVT::v32i8,      1 }, // psubb
842     { ISD::ADD,  MVT::v32i8,      1 }, // paddb
843     { ISD::SUB,  MVT::v16i16,     1 }, // psubw
844     { ISD::ADD,  MVT::v16i16,     1 }, // paddw
845     { ISD::SUB,  MVT::v8i32,      1 }, // psubd
846     { ISD::ADD,  MVT::v8i32,      1 }, // paddd
847     { ISD::SUB,  MVT::v4i64,      1 }, // psubq
848     { ISD::ADD,  MVT::v4i64,      1 }, // paddq
849 
850     { ISD::MUL,  MVT::v16i16,     1 }, // pmullw
851     { ISD::MUL,  MVT::v8i32,      2 }, // pmulld (Haswell from agner.org)
852     { ISD::MUL,  MVT::v4i64,      6 }, // 3*pmuludq/3*shift/2*add
853 
854     { ISD::FNEG, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
855     { ISD::FNEG, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
856     { ISD::FADD, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
857     { ISD::FADD, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
858     { ISD::FSUB, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
859     { ISD::FSUB, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
860     { ISD::FMUL, MVT::f64,        1 }, // Haswell from http://www.agner.org/
861     { ISD::FMUL, MVT::v2f64,      1 }, // Haswell from http://www.agner.org/
862     { ISD::FMUL, MVT::v4f64,      1 }, // Haswell from http://www.agner.org/
863     { ISD::FMUL, MVT::v8f32,      1 }, // Haswell from http://www.agner.org/
864 
865     { ISD::FDIV, MVT::f32,        7 }, // Haswell from http://www.agner.org/
866     { ISD::FDIV, MVT::v4f32,      7 }, // Haswell from http://www.agner.org/
867     { ISD::FDIV, MVT::v8f32,     14 }, // Haswell from http://www.agner.org/
868     { ISD::FDIV, MVT::f64,       14 }, // Haswell from http://www.agner.org/
869     { ISD::FDIV, MVT::v2f64,     14 }, // Haswell from http://www.agner.org/
870     { ISD::FDIV, MVT::v4f64,     28 }, // Haswell from http://www.agner.org/
871   };
872 
873   // Look for AVX2 lowering tricks for custom cases.
874   if (ST->hasAVX2())
875     if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
876       return LT.first * Entry->Cost;
877 
878   static const CostTblEntry AVX1CostTable[] = {
879     // We don't have to scalarize unsupported ops. We can issue two half-sized
880     // operations and we only need to extract the upper YMM half.
881     // Two ops + 1 extract + 1 insert = 4.
882     { ISD::MUL,     MVT::v16i16,     4 },
883     { ISD::MUL,     MVT::v8i32,      5 }, // BTVER2 from http://www.agner.org/
884     { ISD::MUL,     MVT::v4i64,     12 },
885 
886     { ISD::SUB,     MVT::v32i8,      4 },
887     { ISD::ADD,     MVT::v32i8,      4 },
888     { ISD::SUB,     MVT::v16i16,     4 },
889     { ISD::ADD,     MVT::v16i16,     4 },
890     { ISD::SUB,     MVT::v8i32,      4 },
891     { ISD::ADD,     MVT::v8i32,      4 },
892     { ISD::SUB,     MVT::v4i64,      4 },
893     { ISD::ADD,     MVT::v4i64,      4 },
894 
895     { ISD::SHL,     MVT::v32i8,     22 }, // pblendvb sequence + split.
896     { ISD::SHL,     MVT::v8i16,      6 }, // pblendvb sequence.
897     { ISD::SHL,     MVT::v16i16,    13 }, // pblendvb sequence + split.
898     { ISD::SHL,     MVT::v4i32,      3 }, // pslld/paddd/cvttps2dq/pmulld
899     { ISD::SHL,     MVT::v8i32,      9 }, // pslld/paddd/cvttps2dq/pmulld + split
900     { ISD::SHL,     MVT::v2i64,      2 }, // Shift each lane + blend.
901     { ISD::SHL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
902 
903     { ISD::SRL,     MVT::v32i8,     23 }, // pblendvb sequence + split.
904     { ISD::SRL,     MVT::v16i16,    28 }, // pblendvb sequence + split.
905     { ISD::SRL,     MVT::v4i32,      6 }, // Shift each lane + blend.
906     { ISD::SRL,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
907     { ISD::SRL,     MVT::v2i64,      2 }, // Shift each lane + blend.
908     { ISD::SRL,     MVT::v4i64,      6 }, // Shift each lane + blend + split.
909 
910     { ISD::SRA,     MVT::v32i8,     44 }, // pblendvb sequence + split.
911     { ISD::SRA,     MVT::v16i16,    28 }, // pblendvb sequence + split.
912     { ISD::SRA,     MVT::v4i32,      6 }, // Shift each lane + blend.
913     { ISD::SRA,     MVT::v8i32,     14 }, // Shift each lane + blend + split.
914     { ISD::SRA,     MVT::v2i64,      5 }, // Shift each lane + blend.
915     { ISD::SRA,     MVT::v4i64,     12 }, // Shift each lane + blend + split.
916 
917     { ISD::FNEG,    MVT::v4f64,      2 }, // BTVER2 from http://www.agner.org/
918     { ISD::FNEG,    MVT::v8f32,      2 }, // BTVER2 from http://www.agner.org/
919 
920     { ISD::FMUL,    MVT::f64,        2 }, // BTVER2 from http://www.agner.org/
921     { ISD::FMUL,    MVT::v2f64,      2 }, // BTVER2 from http://www.agner.org/
922     { ISD::FMUL,    MVT::v4f64,      4 }, // BTVER2 from http://www.agner.org/
923 
924     { ISD::FDIV,    MVT::f32,       14 }, // SNB from http://www.agner.org/
925     { ISD::FDIV,    MVT::v4f32,     14 }, // SNB from http://www.agner.org/
926     { ISD::FDIV,    MVT::v8f32,     28 }, // SNB from http://www.agner.org/
927     { ISD::FDIV,    MVT::f64,       22 }, // SNB from http://www.agner.org/
928     { ISD::FDIV,    MVT::v2f64,     22 }, // SNB from http://www.agner.org/
929     { ISD::FDIV,    MVT::v4f64,     44 }, // SNB from http://www.agner.org/
930   };
931 
932   if (ST->hasAVX())
933     if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
934       return LT.first * Entry->Cost;
935 
936   static const CostTblEntry SSE42CostTable[] = {
937     { ISD::FADD, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
938     { ISD::FADD, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
939     { ISD::FADD, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
940     { ISD::FADD, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
941 
942     { ISD::FSUB, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
943     { ISD::FSUB, MVT::f32 ,    1 }, // Nehalem from http://www.agner.org/
944     { ISD::FSUB, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
945     { ISD::FSUB, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
946 
947     { ISD::FMUL, MVT::f64,     1 }, // Nehalem from http://www.agner.org/
948     { ISD::FMUL, MVT::f32,     1 }, // Nehalem from http://www.agner.org/
949     { ISD::FMUL, MVT::v2f64,   1 }, // Nehalem from http://www.agner.org/
950     { ISD::FMUL, MVT::v4f32,   1 }, // Nehalem from http://www.agner.org/
951 
952     { ISD::FDIV,  MVT::f32,   14 }, // Nehalem from http://www.agner.org/
953     { ISD::FDIV,  MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
954     { ISD::FDIV,  MVT::f64,   22 }, // Nehalem from http://www.agner.org/
955     { ISD::FDIV,  MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
956 
957     { ISD::MUL,   MVT::v2i64,  6 }  // 3*pmuludq/3*shift/2*add
958   };
959 
960   if (ST->hasSSE42())
961     if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
962       return LT.first * Entry->Cost;
963 
964   static const CostTblEntry SSE41CostTable[] = {
965     { ISD::SHL,  MVT::v16i8,      10 }, // pblendvb sequence.
966     { ISD::SHL,  MVT::v8i16,      11 }, // pblendvb sequence.
967     { ISD::SHL,  MVT::v4i32,       4 }, // pslld/paddd/cvttps2dq/pmulld
968 
969     { ISD::SRL,  MVT::v16i8,      11 }, // pblendvb sequence.
970     { ISD::SRL,  MVT::v8i16,      13 }, // pblendvb sequence.
971     { ISD::SRL,  MVT::v4i32,      16 }, // Shift each lane + blend.
972 
973     { ISD::SRA,  MVT::v16i8,      21 }, // pblendvb sequence.
974     { ISD::SRA,  MVT::v8i16,      13 }, // pblendvb sequence.
975 
976     { ISD::MUL,  MVT::v4i32,       2 }  // pmulld (Nehalem from agner.org)
977   };
978 
979   if (ST->hasSSE41())
980     if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
981       return LT.first * Entry->Cost;
982 
983   static const CostTblEntry SSE2CostTable[] = {
984     // We don't correctly identify costs of casts because they are marked as
985     // custom.
986     { ISD::SHL,  MVT::v16i8,      13 }, // cmpgtb sequence.
987     { ISD::SHL,  MVT::v8i16,      25 }, // cmpgtw sequence.
988     { ISD::SHL,  MVT::v4i32,      16 }, // pslld/paddd/cvttps2dq/pmuludq.
989     { ISD::SHL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
990 
991     { ISD::SRL,  MVT::v16i8,      14 }, // cmpgtb sequence.
992     { ISD::SRL,  MVT::v8i16,      16 }, // cmpgtw sequence.
993     { ISD::SRL,  MVT::v4i32,      12 }, // Shift each lane + blend.
994     { ISD::SRL,  MVT::v2i64,       4 }, // splat+shuffle sequence.
995 
996     { ISD::SRA,  MVT::v16i8,      27 }, // unpacked cmpgtb sequence.
997     { ISD::SRA,  MVT::v8i16,      16 }, // cmpgtw sequence.
998     { ISD::SRA,  MVT::v4i32,      12 }, // Shift each lane + blend.
999     { ISD::SRA,  MVT::v2i64,       8 }, // srl/xor/sub splat+shuffle sequence.
1000 
1001     { ISD::MUL,  MVT::v8i16,       1 }, // pmullw
1002     { ISD::MUL,  MVT::v4i32,       6 }, // 3*pmuludq/4*shuffle
1003     { ISD::MUL,  MVT::v2i64,       8 }, // 3*pmuludq/3*shift/2*add
1004 
1005     { ISD::FDIV, MVT::f32,        23 }, // Pentium IV from http://www.agner.org/
1006     { ISD::FDIV, MVT::v4f32,      39 }, // Pentium IV from http://www.agner.org/
1007     { ISD::FDIV, MVT::f64,        38 }, // Pentium IV from http://www.agner.org/
1008     { ISD::FDIV, MVT::v2f64,      69 }, // Pentium IV from http://www.agner.org/
1009 
1010     { ISD::FNEG, MVT::f32,         1 }, // Pentium IV from http://www.agner.org/
1011     { ISD::FNEG, MVT::f64,         1 }, // Pentium IV from http://www.agner.org/
1012     { ISD::FNEG, MVT::v4f32,       1 }, // Pentium IV from http://www.agner.org/
1013     { ISD::FNEG, MVT::v2f64,       1 }, // Pentium IV from http://www.agner.org/
1014 
1015     { ISD::FADD, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
1016     { ISD::FADD, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
1017 
1018     { ISD::FSUB, MVT::f32,         2 }, // Pentium IV from http://www.agner.org/
1019     { ISD::FSUB, MVT::f64,         2 }, // Pentium IV from http://www.agner.org/
1020   };
1021 
1022   if (ST->hasSSE2())
1023     if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
1024       return LT.first * Entry->Cost;
1025 
1026   static const CostTblEntry SSE1CostTable[] = {
1027     { ISD::FDIV, MVT::f32,   17 }, // Pentium III from http://www.agner.org/
1028     { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
1029 
1030     { ISD::FNEG, MVT::f32,    2 }, // Pentium III from http://www.agner.org/
1031     { ISD::FNEG, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1032 
1033     { ISD::FADD, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
1034     { ISD::FADD, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1035 
1036     { ISD::FSUB, MVT::f32,    1 }, // Pentium III from http://www.agner.org/
1037     { ISD::FSUB, MVT::v4f32,  2 }, // Pentium III from http://www.agner.org/
1038   };
1039 
1040   if (ST->hasSSE1())
1041     if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1042       return LT.first * Entry->Cost;
1043 
1044   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1045     { ISD::ADD,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1046     { ISD::SUB,  MVT::i64,    1 }, // Core (Merom) from http://www.agner.org/
1047     { ISD::MUL,  MVT::i64,    2 }, // Nehalem from http://www.agner.org/
1048   };
1049 
1050   if (ST->is64Bit())
1051     if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1052       return LT.first * Entry->Cost;
1053 
1054   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1055     { ISD::ADD,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1056     { ISD::ADD,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1057     { ISD::ADD,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1058 
1059     { ISD::SUB,  MVT::i8,    1 }, // Pentium III from http://www.agner.org/
1060     { ISD::SUB,  MVT::i16,   1 }, // Pentium III from http://www.agner.org/
1061     { ISD::SUB,  MVT::i32,   1 }, // Pentium III from http://www.agner.org/
1062   };
1063 
1064   if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1065     return LT.first * Entry->Cost;
1066 
1067   // It is not a good idea to vectorize division. We have to scalarize it and
1068   // in the process we will often end up having to spilling regular
1069   // registers. The overhead of division is going to dominate most kernels
1070   // anyways so try hard to prevent vectorization of division - it is
1071   // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1072   // to hide "20 cycles" for each lane.
1073   if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
1074                                ISD == ISD::UDIV || ISD == ISD::UREM)) {
1075     InstructionCost ScalarCost = getArithmeticInstrCost(
1076         Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1077         TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1078     return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1079   }
1080 
1081   // Fallback to the default implementation.
1082   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1083 }
1084 
1085 InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1086                                            VectorType *BaseTp,
1087                                            ArrayRef<int> Mask, int Index,
1088                                            VectorType *SubTp) {
1089   // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1090   // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1091   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1092 
1093   Kind = improveShuffleKindFromMask(Kind, Mask);
1094   // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1095   if (Kind == TTI::SK_Transpose)
1096     Kind = TTI::SK_PermuteTwoSrc;
1097 
1098   // For Broadcasts we are splatting the first element from the first input
1099   // register, so only need to reference that input and all the output
1100   // registers are the same.
1101   if (Kind == TTI::SK_Broadcast)
1102     LT.first = 1;
1103 
1104   // Subvector extractions are free if they start at the beginning of a
1105   // vector and cheap if the subvectors are aligned.
1106   if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1107     int NumElts = LT.second.getVectorNumElements();
1108     if ((Index % NumElts) == 0)
1109       return 0;
1110     std::pair<InstructionCost, MVT> SubLT =
1111         TLI->getTypeLegalizationCost(DL, SubTp);
1112     if (SubLT.second.isVector()) {
1113       int NumSubElts = SubLT.second.getVectorNumElements();
1114       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1115         return SubLT.first;
1116       // Handle some cases for widening legalization. For now we only handle
1117       // cases where the original subvector was naturally aligned and evenly
1118       // fit in its legalized subvector type.
1119       // FIXME: Remove some of the alignment restrictions.
1120       // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1121       // vectors.
1122       int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1123       if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1124           (NumSubElts % OrigSubElts) == 0 &&
1125           LT.second.getVectorElementType() ==
1126               SubLT.second.getVectorElementType() &&
1127           LT.second.getVectorElementType().getSizeInBits() ==
1128               BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1129         assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&
1130                "Unexpected number of elements!");
1131         auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1132                                            LT.second.getVectorNumElements());
1133         auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1134                                            SubLT.second.getVectorNumElements());
1135         int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1136         InstructionCost ExtractCost = getShuffleCost(
1137             TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1138 
1139         // If the original size is 32-bits or more, we can use pshufd. Otherwise
1140         // if we have SSSE3 we can use pshufb.
1141         if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1142           return ExtractCost + 1; // pshufd or pshufb
1143 
1144         assert(SubTp->getPrimitiveSizeInBits() == 16 &&
1145                "Unexpected vector size");
1146 
1147         return ExtractCost + 2; // worst case pshufhw + pshufd
1148       }
1149     }
1150   }
1151 
1152   // Subvector insertions are cheap if the subvectors are aligned.
1153   // Note that in general, the insertion starting at the beginning of a vector
1154   // isn't free, because we need to preserve the rest of the wide vector.
1155   if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1156     int NumElts = LT.second.getVectorNumElements();
1157     std::pair<InstructionCost, MVT> SubLT =
1158         TLI->getTypeLegalizationCost(DL, SubTp);
1159     if (SubLT.second.isVector()) {
1160       int NumSubElts = SubLT.second.getVectorNumElements();
1161       if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1162         return SubLT.first;
1163     }
1164 
1165     // If the insertion isn't aligned, treat it like a 2-op shuffle.
1166     Kind = TTI::SK_PermuteTwoSrc;
1167   }
1168 
1169   // Handle some common (illegal) sub-vector types as they are often very cheap
1170   // to shuffle even on targets without PSHUFB.
1171   EVT VT = TLI->getValueType(DL, BaseTp);
1172   if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1173       !ST->hasSSSE3()) {
1174      static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1175       {TTI::SK_Broadcast,        MVT::v4i16, 1}, // pshuflw
1176       {TTI::SK_Broadcast,        MVT::v2i16, 1}, // pshuflw
1177       {TTI::SK_Broadcast,        MVT::v8i8,  2}, // punpck/pshuflw
1178       {TTI::SK_Broadcast,        MVT::v4i8,  2}, // punpck/pshuflw
1179       {TTI::SK_Broadcast,        MVT::v2i8,  1}, // punpck
1180 
1181       {TTI::SK_Reverse,          MVT::v4i16, 1}, // pshuflw
1182       {TTI::SK_Reverse,          MVT::v2i16, 1}, // pshuflw
1183       {TTI::SK_Reverse,          MVT::v4i8,  3}, // punpck/pshuflw/packus
1184       {TTI::SK_Reverse,          MVT::v2i8,  1}, // punpck
1185 
1186       {TTI::SK_PermuteTwoSrc,    MVT::v4i16, 2}, // punpck/pshuflw
1187       {TTI::SK_PermuteTwoSrc,    MVT::v2i16, 2}, // punpck/pshuflw
1188       {TTI::SK_PermuteTwoSrc,    MVT::v8i8,  7}, // punpck/pshuflw
1189       {TTI::SK_PermuteTwoSrc,    MVT::v4i8,  4}, // punpck/pshuflw
1190       {TTI::SK_PermuteTwoSrc,    MVT::v2i8,  2}, // punpck
1191 
1192       {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1193       {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1194       {TTI::SK_PermuteSingleSrc, MVT::v8i8,  5}, // punpck/pshuflw
1195       {TTI::SK_PermuteSingleSrc, MVT::v4i8,  3}, // punpck/pshuflw
1196       {TTI::SK_PermuteSingleSrc, MVT::v2i8,  1}, // punpck
1197     };
1198 
1199     if (ST->hasSSE2())
1200       if (const auto *Entry =
1201               CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1202         return Entry->Cost;
1203   }
1204 
1205   // We are going to permute multiple sources and the result will be in multiple
1206   // destinations. Providing an accurate cost only for splits where the element
1207   // type remains the same.
1208   if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1209     MVT LegalVT = LT.second;
1210     if (LegalVT.isVector() &&
1211         LegalVT.getVectorElementType().getSizeInBits() ==
1212             BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1213         LegalVT.getVectorNumElements() <
1214             cast<FixedVectorType>(BaseTp)->getNumElements()) {
1215 
1216       unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1217       unsigned LegalVTSize = LegalVT.getStoreSize();
1218       // Number of source vectors after legalization:
1219       unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1220       // Number of destination vectors after legalization:
1221       InstructionCost NumOfDests = LT.first;
1222 
1223       auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1224                                               LegalVT.getVectorNumElements());
1225 
1226       InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1227       return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1228                                             None, 0, nullptr);
1229     }
1230 
1231     return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1232   }
1233 
1234   // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1235   if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1236     // We assume that source and destination have the same vector type.
1237     InstructionCost NumOfDests = LT.first;
1238     InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1239     LT.first = NumOfDests * NumOfShufflesPerDest;
1240   }
1241 
1242   static const CostTblEntry AVX512FP16ShuffleTbl[] = {
1243       {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1244       {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1245       {TTI::SK_Broadcast, MVT::v8f16, 1},  // vpbroadcastw
1246 
1247       {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1248       {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw
1249       {TTI::SK_Reverse, MVT::v8f16, 1},  // vpshufb
1250 
1251       {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1252       {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1253       {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1},  // vpshufb
1254 
1255       {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1256       {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w
1257       {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2}   // vpermt2w
1258   };
1259 
1260   if (!ST->useSoftFloat() && ST->hasFP16())
1261     if (const auto *Entry =
1262             CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second))
1263       return LT.first * Entry->Cost;
1264 
1265   static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1266       {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1267       {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1268 
1269       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1270       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1271 
1272       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1273       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1274       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2}  // vpermt2b
1275   };
1276 
1277   if (ST->hasVBMI())
1278     if (const auto *Entry =
1279             CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1280       return LT.first * Entry->Cost;
1281 
1282   static const CostTblEntry AVX512BWShuffleTbl[] = {
1283       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1284       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1285 
1286       {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1287       {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1288       {TTI::SK_Reverse, MVT::v64i8, 2},  // pshufb + vshufi64x2
1289 
1290       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1291       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1292       {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8},  // extend to v32i16
1293 
1294       {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1295       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1296       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2},  // vpermt2w
1297       {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1298 
1299       {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1300       {TTI::SK_Select, MVT::v64i8,  1}, // vblendmb
1301   };
1302 
1303   if (ST->hasBWI())
1304     if (const auto *Entry =
1305             CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1306       return LT.first * Entry->Cost;
1307 
1308   static const CostTblEntry AVX512ShuffleTbl[] = {
1309       {TTI::SK_Broadcast, MVT::v8f64, 1},  // vbroadcastpd
1310       {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1311       {TTI::SK_Broadcast, MVT::v8i64, 1},  // vpbroadcastq
1312       {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1313       {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1314       {TTI::SK_Broadcast, MVT::v64i8, 1},  // vpbroadcastb
1315 
1316       {TTI::SK_Reverse, MVT::v8f64, 1},  // vpermpd
1317       {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1318       {TTI::SK_Reverse, MVT::v8i64, 1},  // vpermq
1319       {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1320       {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1321       {TTI::SK_Reverse, MVT::v64i8,  7}, // per mca
1322 
1323       {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1},  // vpermpd
1324       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1325       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1},  // vpermpd
1326       {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1327       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1328       {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1},  // vpermps
1329       {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1},  // vpermq
1330       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1331       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1},  // vpermq
1332       {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1333       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1334       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1},  // vpermd
1335       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1},  // pshufb
1336 
1337       {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1},  // vpermt2pd
1338       {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1339       {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1},  // vpermt2q
1340       {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1341       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1},  // vpermt2pd
1342       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1},  // vpermt2ps
1343       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1},  // vpermt2q
1344       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1},  // vpermt2d
1345       {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1},  // vpermt2pd
1346       {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1},  // vpermt2ps
1347       {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1},  // vpermt2q
1348       {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1},  // vpermt2d
1349 
1350       // FIXME: This just applies the type legalization cost rules above
1351       // assuming these completely split.
1352       {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1353       {TTI::SK_PermuteSingleSrc, MVT::v64i8,  14},
1354       {TTI::SK_PermuteTwoSrc,    MVT::v32i16, 42},
1355       {TTI::SK_PermuteTwoSrc,    MVT::v64i8,  42},
1356 
1357       {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1358       {TTI::SK_Select, MVT::v64i8,  1}, // vpternlogq
1359       {TTI::SK_Select, MVT::v8f64,  1}, // vblendmpd
1360       {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1361       {TTI::SK_Select, MVT::v8i64,  1}, // vblendmq
1362       {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1363   };
1364 
1365   if (ST->hasAVX512())
1366     if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1367       return LT.first * Entry->Cost;
1368 
1369   static const CostTblEntry AVX2ShuffleTbl[] = {
1370       {TTI::SK_Broadcast, MVT::v4f64, 1},  // vbroadcastpd
1371       {TTI::SK_Broadcast, MVT::v8f32, 1},  // vbroadcastps
1372       {TTI::SK_Broadcast, MVT::v4i64, 1},  // vpbroadcastq
1373       {TTI::SK_Broadcast, MVT::v8i32, 1},  // vpbroadcastd
1374       {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1375       {TTI::SK_Broadcast, MVT::v32i8, 1},  // vpbroadcastb
1376 
1377       {TTI::SK_Reverse, MVT::v4f64, 1},  // vpermpd
1378       {TTI::SK_Reverse, MVT::v8f32, 1},  // vpermps
1379       {TTI::SK_Reverse, MVT::v4i64, 1},  // vpermq
1380       {TTI::SK_Reverse, MVT::v8i32, 1},  // vpermd
1381       {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1382       {TTI::SK_Reverse, MVT::v32i8, 2},  // vperm2i128 + pshufb
1383 
1384       {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1385       {TTI::SK_Select, MVT::v32i8, 1},  // vpblendvb
1386 
1387       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1},  // vpermpd
1388       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1},  // vpermps
1389       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1},  // vpermq
1390       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1},  // vpermd
1391       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1392                                                   // + vpblendvb
1393       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vperm2i128 + 2*vpshufb
1394                                                   // + vpblendvb
1395 
1396       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},  // 2*vpermpd + vblendpd
1397       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3},  // 2*vpermps + vblendps
1398       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},  // 2*vpermq + vpblendd
1399       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3},  // 2*vpermd + vpblendd
1400       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1401                                                // + vpblendvb
1402       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7},  // 2*vperm2i128 + 4*vpshufb
1403                                                // + vpblendvb
1404   };
1405 
1406   if (ST->hasAVX2())
1407     if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1408       return LT.first * Entry->Cost;
1409 
1410   static const CostTblEntry XOPShuffleTbl[] = {
1411       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vpermil2pd
1412       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2},  // vperm2f128 + vpermil2ps
1413       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vpermil2pd
1414       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2},  // vperm2f128 + vpermil2ps
1415       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1416                                                   // + vinsertf128
1417       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4},  // vextractf128 + 2*vpperm
1418                                                   // + vinsertf128
1419 
1420       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1421                                                // + vinsertf128
1422       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1},  // vpperm
1423       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9},  // 2*vextractf128 + 6*vpperm
1424                                                // + vinsertf128
1425       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1},  // vpperm
1426   };
1427 
1428   if (ST->hasXOP())
1429     if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1430       return LT.first * Entry->Cost;
1431 
1432   static const CostTblEntry AVX1ShuffleTbl[] = {
1433       {TTI::SK_Broadcast, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1434       {TTI::SK_Broadcast, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1435       {TTI::SK_Broadcast, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1436       {TTI::SK_Broadcast, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1437       {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1438       {TTI::SK_Broadcast, MVT::v32i8, 2},  // vpshufb + vinsertf128
1439 
1440       {TTI::SK_Reverse, MVT::v4f64, 2},  // vperm2f128 + vpermilpd
1441       {TTI::SK_Reverse, MVT::v8f32, 2},  // vperm2f128 + vpermilps
1442       {TTI::SK_Reverse, MVT::v4i64, 2},  // vperm2f128 + vpermilpd
1443       {TTI::SK_Reverse, MVT::v8i32, 2},  // vperm2f128 + vpermilps
1444       {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1445                                          // + vinsertf128
1446       {TTI::SK_Reverse, MVT::v32i8, 4},  // vextractf128 + 2*pshufb
1447                                          // + vinsertf128
1448 
1449       {TTI::SK_Select, MVT::v4i64, 1},  // vblendpd
1450       {TTI::SK_Select, MVT::v4f64, 1},  // vblendpd
1451       {TTI::SK_Select, MVT::v8i32, 1},  // vblendps
1452       {TTI::SK_Select, MVT::v8f32, 1},  // vblendps
1453       {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1454       {TTI::SK_Select, MVT::v32i8, 3},  // vpand + vpandn + vpor
1455 
1456       {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2},  // vperm2f128 + vshufpd
1457       {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2},  // vperm2f128 + vshufpd
1458       {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4},  // 2*vperm2f128 + 2*vshufps
1459       {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4},  // 2*vperm2f128 + 2*vshufps
1460       {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1461                                                   // + 2*por + vinsertf128
1462       {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8},  // vextractf128 + 4*pshufb
1463                                                   // + 2*por + vinsertf128
1464 
1465       {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3},   // 2*vperm2f128 + vshufpd
1466       {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3},   // 2*vperm2f128 + vshufpd
1467       {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4},   // 2*vperm2f128 + 2*vshufps
1468       {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4},   // 2*vperm2f128 + 2*vshufps
1469       {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1470                                                 // + 4*por + vinsertf128
1471       {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15},  // 2*vextractf128 + 8*pshufb
1472                                                 // + 4*por + vinsertf128
1473   };
1474 
1475   if (ST->hasAVX())
1476     if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1477       return LT.first * Entry->Cost;
1478 
1479   static const CostTblEntry SSE41ShuffleTbl[] = {
1480       {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1481       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1482       {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1483       {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1484       {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1485       {TTI::SK_Select, MVT::v16i8, 1}  // pblendvb
1486   };
1487 
1488   if (ST->hasSSE41())
1489     if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1490       return LT.first * Entry->Cost;
1491 
1492   static const CostTblEntry SSSE3ShuffleTbl[] = {
1493       {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1494       {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1495 
1496       {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1497       {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1498 
1499       {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1500       {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1501 
1502       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1503       {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1504 
1505       {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1506       {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1507   };
1508 
1509   if (ST->hasSSSE3())
1510     if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1511       return LT.first * Entry->Cost;
1512 
1513   static const CostTblEntry SSE2ShuffleTbl[] = {
1514       {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1515       {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1516       {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1517       {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1518       {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1519 
1520       {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1521       {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1522       {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1523       {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1524       {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1525                                         // + 2*pshufd + 2*unpck + packus
1526 
1527       {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1528       {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1529       {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1530       {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1531       {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1532 
1533       {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1534       {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1535       {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1536       {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1537                                                   // + pshufd/unpck
1538     { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1539                                                   // + 2*pshufd + 2*unpck + 2*packus
1540 
1541     { TTI::SK_PermuteTwoSrc,    MVT::v2f64,  1 }, // shufpd
1542     { TTI::SK_PermuteTwoSrc,    MVT::v2i64,  1 }, // shufpd
1543     { TTI::SK_PermuteTwoSrc,    MVT::v4i32,  2 }, // 2*{unpck,movsd,pshufd}
1544     { TTI::SK_PermuteTwoSrc,    MVT::v8i16,  8 }, // blend+permute
1545     { TTI::SK_PermuteTwoSrc,    MVT::v16i8, 13 }, // blend+permute
1546   };
1547 
1548   if (ST->hasSSE2())
1549     if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1550       return LT.first * Entry->Cost;
1551 
1552   static const CostTblEntry SSE1ShuffleTbl[] = {
1553     { TTI::SK_Broadcast,        MVT::v4f32, 1 }, // shufps
1554     { TTI::SK_Reverse,          MVT::v4f32, 1 }, // shufps
1555     { TTI::SK_Select,           MVT::v4f32, 2 }, // 2*shufps
1556     { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1557     { TTI::SK_PermuteTwoSrc,    MVT::v4f32, 2 }, // 2*shufps
1558   };
1559 
1560   if (ST->hasSSE1())
1561     if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1562       return LT.first * Entry->Cost;
1563 
1564   return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1565 }
1566 
1567 InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1568                                              Type *Src,
1569                                              TTI::CastContextHint CCH,
1570                                              TTI::TargetCostKind CostKind,
1571                                              const Instruction *I) {
1572   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1573   assert(ISD && "Invalid opcode");
1574 
1575   // TODO: Allow non-throughput costs that aren't binary.
1576   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1577     if (CostKind != TTI::TCK_RecipThroughput)
1578       return Cost == 0 ? 0 : 1;
1579     return Cost;
1580   };
1581 
1582   // The cost tables include both specific, custom (non-legal) src/dst type
1583   // conversions and generic, legalized types. We test for customs first, before
1584   // falling back to legalization.
1585   // FIXME: Need a better design of the cost table to handle non-simple types of
1586   // potential massive combinations (elem_num x src_type x dst_type).
1587   static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1588     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1589     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1590 
1591     // Mask sign extend has an instruction.
1592     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   1 },
1593     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v2i1,   1 },
1594     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   1 },
1595     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v2i1,   1 },
1596     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   1 },
1597     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v4i1,   1 },
1598     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   1 },
1599     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v4i1,   1 },
1600     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   1 },
1601     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v8i1,   1 },
1602     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   1 },
1603     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  1 },
1604     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1605     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1,  1 },
1606     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1,  1 },
1607     { ISD::SIGN_EXTEND, MVT::v64i8,  MVT::v64i1,  1 },
1608     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1,  1 },
1609 
1610     // Mask zero extend is a sext + shift.
1611     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   2 },
1612     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v2i1,   2 },
1613     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   2 },
1614     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v2i1,   2 },
1615     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   2 },
1616     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v4i1,   2 },
1617     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   2 },
1618     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v4i1,   2 },
1619     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   2 },
1620     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v8i1,   2 },
1621     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   2 },
1622     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  2 },
1623     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  2 },
1624     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1,  2 },
1625     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1,  2 },
1626     { ISD::ZERO_EXTEND, MVT::v64i8,  MVT::v64i1,  2 },
1627     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1,  2 },
1628 
1629     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 },
1630     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v16i8,  2 },
1631     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 },
1632     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v8i16,  2 },
1633     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 },
1634     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v16i8,  2 },
1635     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 },
1636     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v8i16,  2 },
1637     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 },
1638     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v16i8,  2 },
1639     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 },
1640     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 },
1641     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 },
1642     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 },
1643     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i16, 2 },
1644     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v64i8,  2 },
1645     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v32i16, 2 },
1646 
1647     { ISD::TRUNCATE,    MVT::v32i8,  MVT::v32i16, 2 },
1648     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // widen to zmm
1649     { ISD::TRUNCATE,    MVT::v2i8,   MVT::v2i16,  2 }, // vpmovwb
1650     { ISD::TRUNCATE,    MVT::v4i8,   MVT::v4i16,  2 }, // vpmovwb
1651     { ISD::TRUNCATE,    MVT::v8i8,   MVT::v8i16,  2 }, // vpmovwb
1652   };
1653 
1654   static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1655     // Mask sign extend has an instruction.
1656     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 },
1657     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v2i1,   1 },
1658     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 },
1659     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 },
1660     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 },
1661     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v16i1,  1 },
1662     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i1,   1 },
1663     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  1 },
1664 
1665     // Mask zero extend is a sext + shift.
1666     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 },
1667     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v2i1,   2 },
1668     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 },
1669     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 },
1670     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 },
1671     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v16i1,  2 },
1672     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i1,   2 },
1673     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 },
1674 
1675     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i64,  2 },
1676     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v4i32,  2 },
1677     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i32,  2 },
1678     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i64,  2 },
1679     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
1680     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i64,  2 },
1681     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i32, 2 },
1682     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v8i64,  2 },
1683 
1684     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1685     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1686 
1687     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64,  1 },
1688     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  1 },
1689 
1690     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f32,  1 },
1691     { ISD::FP_TO_SINT,  MVT::v8i64,  MVT::v8f64,  1 },
1692 
1693     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f32,  1 },
1694     { ISD::FP_TO_UINT,  MVT::v8i64,  MVT::v8f64,  1 },
1695   };
1696 
1697   // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1698   // 256-bit wide vectors.
1699 
1700   static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1701     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v8f32,  1 },
1702     { ISD::FP_EXTEND, MVT::v8f64,   MVT::v16f32, 3 },
1703     { ISD::FP_ROUND,  MVT::v8f32,   MVT::v8f64,  1 },
1704 
1705     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1706     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1707     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1708     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  3 }, // sext+vpslld+vptestmd
1709     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1710     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1711     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1712     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1713     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // zmm vpslld+vptestmd
1714     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // zmm vpslld+vptestmd
1715     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // zmm vpslld+vptestmd
1716     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i32, 2 }, // vpslld+vptestmd
1717     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // zmm vpsllq+vptestmq
1718     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // zmm vpsllq+vptestmq
1719     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i64,  2 }, // vpsllq+vptestmq
1720     { ISD::TRUNCATE,  MVT::v2i8,    MVT::v2i32,  2 }, // vpmovdb
1721     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i32,  2 }, // vpmovdb
1722     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i32, 2 }, // vpmovdb
1723     { ISD::TRUNCATE,  MVT::v32i8,   MVT::v16i32, 2 }, // vpmovdb
1724     { ISD::TRUNCATE,  MVT::v64i8,   MVT::v16i32, 2 }, // vpmovdb
1725     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v16i32, 2 }, // vpmovdw
1726     { ISD::TRUNCATE,  MVT::v32i16,  MVT::v16i32, 2 }, // vpmovdw
1727     { ISD::TRUNCATE,  MVT::v2i8,    MVT::v2i64,  2 }, // vpmovqb
1728     { ISD::TRUNCATE,  MVT::v2i16,   MVT::v2i64,  1 }, // vpshufb
1729     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i64,  2 }, // vpmovqb
1730     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v8i64,  2 }, // vpmovqb
1731     { ISD::TRUNCATE,  MVT::v32i8,   MVT::v8i64,  2 }, // vpmovqb
1732     { ISD::TRUNCATE,  MVT::v64i8,   MVT::v8i64,  2 }, // vpmovqb
1733     { ISD::TRUNCATE,  MVT::v8i16,   MVT::v8i64,  2 }, // vpmovqw
1734     { ISD::TRUNCATE,  MVT::v16i16,  MVT::v8i64,  2 }, // vpmovqw
1735     { ISD::TRUNCATE,  MVT::v32i16,  MVT::v8i64,  2 }, // vpmovqw
1736     { ISD::TRUNCATE,  MVT::v8i32,   MVT::v8i64,  1 }, // vpmovqd
1737     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // zmm vpmovqd
1738     { ISD::TRUNCATE,  MVT::v16i8,   MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1739 
1740     { ISD::TRUNCATE,  MVT::v16i8,  MVT::v16i16,  3 }, // extend to v16i32
1741     { ISD::TRUNCATE,  MVT::v32i8,  MVT::v32i16,  8 },
1742     { ISD::TRUNCATE,  MVT::v64i8,  MVT::v32i16,  8 },
1743 
1744     // Sign extend is zmm vpternlogd+vptruncdb.
1745     // Zero extend is zmm broadcast load+vptruncdw.
1746     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   3 },
1747     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   4 },
1748     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   3 },
1749     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   4 },
1750     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   3 },
1751     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   4 },
1752     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  3 },
1753     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  4 },
1754 
1755     // Sign extend is zmm vpternlogd+vptruncdw.
1756     // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1757     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   3 },
1758     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1759     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   3 },
1760     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1761     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   3 },
1762     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1763     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  3 },
1764     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
1765 
1766     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // zmm vpternlogd
1767     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // zmm vpternlogd+psrld
1768     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // zmm vpternlogd
1769     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // zmm vpternlogd+psrld
1770     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // zmm vpternlogd
1771     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // zmm vpternlogd+psrld
1772     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // zmm vpternlogq
1773     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // zmm vpternlogq+psrlq
1774     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // zmm vpternlogq
1775     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // zmm vpternlogq+psrlq
1776 
1777     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1,  1 }, // vpternlogd
1778     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1,  2 }, // vpternlogd+psrld
1779     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i1,   1 }, // vpternlogq
1780     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i1,   2 }, // vpternlogq+psrlq
1781 
1782     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1783     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8,  1 },
1784     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1785     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1786     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1787     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,   1 },
1788     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1789     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16,  1 },
1790     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1791     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i32,  1 },
1792 
1793     { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8,  3 }, // FIXME: May not be right
1794     { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8,  3 }, // FIXME: May not be right
1795 
1796     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1797     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1798     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v16i8,  2 },
1799     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i8,  1 },
1800     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1801     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 1 },
1802     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1803     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1804 
1805     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i1,   4 },
1806     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i1,  3 },
1807     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v16i8,  2 },
1808     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i8,  1 },
1809     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i16,  2 },
1810     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 1 },
1811     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  1 },
1812     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 1 },
1813     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i64, 26 },
1814     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i64,  5 },
1815 
1816     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 2 },
1817     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f64, 7 },
1818     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v32f64,15 },
1819     { ISD::FP_TO_SINT,  MVT::v64i8,  MVT::v64f32,11 },
1820     { ISD::FP_TO_SINT,  MVT::v64i8,  MVT::v64f64,31 },
1821     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f64,  3 },
1822     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f64, 7 },
1823     { ISD::FP_TO_SINT,  MVT::v32i16, MVT::v32f32, 5 },
1824     { ISD::FP_TO_SINT,  MVT::v32i16, MVT::v32f64,15 },
1825     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  1 },
1826     { ISD::FP_TO_SINT,  MVT::v16i32, MVT::v16f64, 3 },
1827 
1828     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
1829     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f64,  3 },
1830     { ISD::FP_TO_UINT,  MVT::v8i8,   MVT::v8f64,  3 },
1831     { ISD::FP_TO_UINT,  MVT::v16i32, MVT::v16f32, 1 },
1832     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 3 },
1833     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v16f32, 3 },
1834   };
1835 
1836   static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1837     // Mask sign extend has an instruction.
1838     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   1 },
1839     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v2i1,   1 },
1840     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   1 },
1841     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v2i1,   1 },
1842     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   1 },
1843     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v4i1,   1 },
1844     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   1 },
1845     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v4i1,   1 },
1846     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   1 },
1847     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v8i1,   1 },
1848     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   1 },
1849     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1,  1 },
1850     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
1851     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v32i1,  1 },
1852     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v32i1,  1 },
1853     { ISD::SIGN_EXTEND, MVT::v32i8,  MVT::v64i1,  1 },
1854     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v64i1,  1 },
1855 
1856     // Mask zero extend is a sext + shift.
1857     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   2 },
1858     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v2i1,   2 },
1859     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   2 },
1860     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v2i1,   2 },
1861     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   2 },
1862     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v4i1,   2 },
1863     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   2 },
1864     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v4i1,   2 },
1865     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   2 },
1866     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v8i1,   2 },
1867     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   2 },
1868     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1,  2 },
1869     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  2 },
1870     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v32i1,  2 },
1871     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v32i1,  2 },
1872     { ISD::ZERO_EXTEND, MVT::v32i8,  MVT::v64i1,  2 },
1873     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v64i1,  2 },
1874 
1875     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   2 },
1876     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v16i8,  2 },
1877     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 },
1878     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v8i16,  2 },
1879     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 },
1880     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v16i8,  2 },
1881     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  2 },
1882     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v8i16,  2 },
1883     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   2 },
1884     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v16i8,  2 },
1885     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i16,  2 },
1886     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i8,  2 },
1887     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 2 },
1888     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v32i8,  2 },
1889     { ISD::TRUNCATE,    MVT::v32i1,  MVT::v16i16, 2 },
1890     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v32i8,  2 },
1891     { ISD::TRUNCATE,    MVT::v64i1,  MVT::v16i16, 2 },
1892 
1893     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 },
1894   };
1895 
1896   static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1897     // Mask sign extend has an instruction.
1898     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 },
1899     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v2i1,   1 },
1900     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 },
1901     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i1,  1 },
1902     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 },
1903     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i1,   1 },
1904     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i1,  1 },
1905     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 },
1906 
1907     // Mask zero extend is a sext + shift.
1908     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 },
1909     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v2i1,   2 },
1910     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 },
1911     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i1,  2 },
1912     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 },
1913     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i1,   2 },
1914     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i1,  2 },
1915     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 },
1916 
1917     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v4i64,  2 },
1918     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v8i32,  2 },
1919     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i64,  2 },
1920     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v4i32,  2 },
1921     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i32,  2 },
1922     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i64,  2 },
1923     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v4i64,  2 },
1924     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
1925 
1926     { ISD::SINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1927     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1928     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1929     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1930 
1931     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  1 },
1932     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  1 },
1933     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  1 },
1934     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  1 },
1935 
1936     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v4f32,  1 },
1937     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f32,  1 },
1938     { ISD::FP_TO_SINT,  MVT::v2i64,  MVT::v2f64,  1 },
1939     { ISD::FP_TO_SINT,  MVT::v4i64,  MVT::v4f64,  1 },
1940 
1941     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v4f32,  1 },
1942     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f32,  1 },
1943     { ISD::FP_TO_UINT,  MVT::v2i64,  MVT::v2f64,  1 },
1944     { ISD::FP_TO_UINT,  MVT::v4i64,  MVT::v4f64,  1 },
1945   };
1946 
1947   static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1948     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i8,   3 }, // sext+vpslld+vptestmd
1949     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i8,   3 }, // sext+vpslld+vptestmd
1950     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i8,   3 }, // sext+vpslld+vptestmd
1951     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i8,  8 }, // split+2*v8i8
1952     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i16,  3 }, // sext+vpsllq+vptestmq
1953     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i16,  3 }, // sext+vpsllq+vptestmq
1954     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i16,  3 }, // sext+vpsllq+vptestmq
1955     { ISD::TRUNCATE,  MVT::v16i1,   MVT::v16i16, 8 }, // split+2*v8i16
1956     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i32,  2 }, // vpslld+vptestmd
1957     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i32,  2 }, // vpslld+vptestmd
1958     { ISD::TRUNCATE,  MVT::v8i1,    MVT::v8i32,  2 }, // vpslld+vptestmd
1959     { ISD::TRUNCATE,  MVT::v2i1,    MVT::v2i64,  2 }, // vpsllq+vptestmq
1960     { ISD::TRUNCATE,  MVT::v4i1,    MVT::v4i64,  2 }, // vpsllq+vptestmq
1961     { ISD::TRUNCATE,  MVT::v4i32,   MVT::v4i64,  1 }, // vpmovqd
1962     { ISD::TRUNCATE,  MVT::v4i8,    MVT::v4i64,  2 }, // vpmovqb
1963     { ISD::TRUNCATE,  MVT::v4i16,   MVT::v4i64,  2 }, // vpmovqw
1964     { ISD::TRUNCATE,  MVT::v8i8,    MVT::v8i32,  2 }, // vpmovwb
1965 
1966     // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1967     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1968     { ISD::SIGN_EXTEND, MVT::v2i8,   MVT::v2i1,   5 },
1969     { ISD::ZERO_EXTEND, MVT::v2i8,   MVT::v2i1,   6 },
1970     { ISD::SIGN_EXTEND, MVT::v4i8,   MVT::v4i1,   5 },
1971     { ISD::ZERO_EXTEND, MVT::v4i8,   MVT::v4i1,   6 },
1972     { ISD::SIGN_EXTEND, MVT::v8i8,   MVT::v8i1,   5 },
1973     { ISD::ZERO_EXTEND, MVT::v8i8,   MVT::v8i1,   6 },
1974     { ISD::SIGN_EXTEND, MVT::v16i8,  MVT::v16i1, 10 },
1975     { ISD::ZERO_EXTEND, MVT::v16i8,  MVT::v16i1, 12 },
1976 
1977     // sign extend is vpcmpeq+maskedmove+vpmovdw
1978     // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1979     { ISD::SIGN_EXTEND, MVT::v2i16,  MVT::v2i1,   4 },
1980     { ISD::ZERO_EXTEND, MVT::v2i16,  MVT::v2i1,   5 },
1981     { ISD::SIGN_EXTEND, MVT::v4i16,  MVT::v4i1,   4 },
1982     { ISD::ZERO_EXTEND, MVT::v4i16,  MVT::v4i1,   5 },
1983     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v8i1,   4 },
1984     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v8i1,   5 },
1985     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1986     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1987 
1988     { ISD::SIGN_EXTEND, MVT::v2i32,  MVT::v2i1,   1 }, // vpternlogd
1989     { ISD::ZERO_EXTEND, MVT::v2i32,  MVT::v2i1,   2 }, // vpternlogd+psrld
1990     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v4i1,   1 }, // vpternlogd
1991     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v4i1,   2 }, // vpternlogd+psrld
1992     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   1 }, // vpternlogd
1993     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   2 }, // vpternlogd+psrld
1994     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v2i1,   1 }, // vpternlogq
1995     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v2i1,   2 }, // vpternlogq+psrlq
1996     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   1 }, // vpternlogq
1997     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   2 }, // vpternlogq+psrlq
1998 
1999     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  1 },
2000     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  1 },
2001     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  1 },
2002     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  1 },
2003     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
2004     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  1 },
2005     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  1 },
2006     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  1 },
2007     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
2008     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  1 },
2009     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
2010     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  1 },
2011 
2012     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2013     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  1 },
2014     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2015     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  1 },
2016 
2017     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
2018     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
2019     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2020     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  1 },
2021     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2022     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  1 },
2023     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  1 },
2024     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
2025     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
2026     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
2027     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64,  5 },
2028     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
2029     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64,  5 },
2030 
2031     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v8f32,  2 },
2032     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v16f32, 2 },
2033     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v32f32, 5 },
2034 
2035     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    1 },
2036     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    1 },
2037     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  1 },
2038     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  1 },
2039     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  1 },
2040     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  1 },
2041     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f64,  1 },
2042   };
2043 
2044   static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
2045     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
2046     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   3 },
2047     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
2048     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   3 },
2049     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
2050     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  1 },
2051 
2052     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  2 },
2053     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  2 },
2054     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  2 },
2055     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  2 },
2056     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
2057     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  2 },
2058     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  2 },
2059     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  2 },
2060     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
2061     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  2 },
2062     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
2063     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
2064     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  2 },
2065     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  2 },
2066 
2067     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  2 },
2068 
2069     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 4 },
2070     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 4 },
2071     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i16,  1 },
2072     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  1 },
2073     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  1 },
2074     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i32,  4 },
2075     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i64,  4 },
2076     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  1 },
2077     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v2i64,  1 },
2078     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i64,  5 },
2079     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  1 },
2080     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  2 },
2081 
2082     { ISD::FP_EXTEND,   MVT::v8f64,  MVT::v8f32,  3 },
2083     { ISD::FP_ROUND,    MVT::v8f32,  MVT::v8f64,  3 },
2084 
2085     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v8f32,  1 },
2086     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f64,  1 },
2087     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f32,  1 },
2088     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  3 },
2089 
2090     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    3 },
2091     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    3 },
2092     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v8f32,  1 },
2093     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  3 },
2094     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2095     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  4 },
2096     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  3 },
2097     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v4f64,  4 },
2098 
2099     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  2 },
2100     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  2 },
2101     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  2 },
2102     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
2103     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  1 },
2104     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  1 },
2105     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  3 },
2106 
2107     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  2 },
2108     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  2 },
2109     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  2 },
2110     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  2 },
2111     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  2 },
2112     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  1 },
2113     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  2 },
2114     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2115     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  2 },
2116     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  4 },
2117   };
2118 
2119   static const TypeConversionCostTblEntry AVXConversionTbl[] = {
2120     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i1,   6 },
2121     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i1,   4 },
2122     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i1,   7 },
2123     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i1,   4 },
2124     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
2125     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1,  4 },
2126 
2127     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v16i8,  3 },
2128     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v16i8,  3 },
2129     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v16i8,  3 },
2130     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v16i8,  3 },
2131     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2132     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8,  3 },
2133     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v8i16,  3 },
2134     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v8i16,  3 },
2135     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2136     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16,  3 },
2137     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2138     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32,  3 },
2139 
2140     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i64,  4 },
2141     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i32,  5 },
2142     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i16, 4 },
2143     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i64,  9 },
2144     { ISD::TRUNCATE,    MVT::v16i1,  MVT::v16i64, 11 },
2145 
2146     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32, 6 },
2147     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 6 },
2148     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 2 }, // and+extract+packuswb
2149     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i32,  5 },
2150     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2151     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i64,  5 },
2152     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i64,  3 }, // and+extract+2*packusdw
2153     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v4i64,  2 },
2154 
2155     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i1,   3 },
2156     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i1,   3 },
2157     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i1,   8 },
2158     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  4 },
2159     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v16i8,  2 },
2160     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  4 },
2161     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v8i16,  2 },
2162     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2163     { ISD::SINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  2 },
2164     { ISD::SINT_TO_FP,  MVT::v8f64,  MVT::v8i32,  4 },
2165     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v2i64,  5 },
2166     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i64,  8 },
2167 
2168     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i1,   7 },
2169     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i1,   7 },
2170     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i1,   6 },
2171     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v16i8,  4 },
2172     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v16i8,  2 },
2173     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i16,  4 },
2174     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v8i16,  2 },
2175     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  4 },
2176     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i32,  4 },
2177     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  5 },
2178     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  6 },
2179     { ISD::UINT_TO_FP,  MVT::v8f32,  MVT::v8i32,  8 },
2180     { ISD::UINT_TO_FP,  MVT::v8f64,  MVT::v8i32, 10 },
2181     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i64, 10 },
2182     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64, 18 },
2183     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  5 },
2184     { ISD::UINT_TO_FP,  MVT::v4f64,  MVT::v4i64, 10 },
2185 
2186     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v8f32,  2 },
2187     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f64,  2 },
2188     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v8f32,  2 },
2189     { ISD::FP_TO_SINT,  MVT::v32i8,  MVT::v4f64,  2 },
2190     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v8f32,  2 },
2191     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f64,  2 },
2192     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v8f32,  2 },
2193     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v4f64,  2 },
2194     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f64,  2 },
2195     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f32,  2 },
2196     { ISD::FP_TO_SINT,  MVT::v8i32,  MVT::v8f64,  5 },
2197 
2198     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v8f32,  2 },
2199     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f64,  2 },
2200     { ISD::FP_TO_UINT,  MVT::v32i8,  MVT::v8f32,  2 },
2201     { ISD::FP_TO_UINT,  MVT::v32i8,  MVT::v4f64,  2 },
2202     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v8f32,  2 },
2203     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f64,  2 },
2204     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v8f32,  2 },
2205     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v4f64,  2 },
2206     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  3 },
2207     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2208     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f64,  6 },
2209     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v8f32,  7 },
2210     { ISD::FP_TO_UINT,  MVT::v8i32,  MVT::v4f64,  7 },
2211 
2212     { ISD::FP_EXTEND,   MVT::v4f64,  MVT::v4f32,  1 },
2213     { ISD::FP_ROUND,    MVT::v4f32,  MVT::v4f64,  1 },
2214   };
2215 
2216   static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2217     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8,   1 },
2218     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8,   1 },
2219     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8,   1 },
2220     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8,   1 },
2221     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8,   1 },
2222     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8,   1 },
2223     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16,   1 },
2224     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16,   1 },
2225     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16,   1 },
2226     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16,   1 },
2227     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32,   1 },
2228     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32,   1 },
2229 
2230     // These truncates end up widening elements.
2231     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   1 }, // PMOVXZBQ
2232     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  1 }, // PMOVXZWQ
2233     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   1 }, // PMOVXZBD
2234 
2235     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  2 },
2236     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  2 },
2237     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  2 },
2238 
2239     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i32,    1 },
2240     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i32,    1 },
2241     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i64,    1 },
2242     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i64,    1 },
2243     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  1 },
2244     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2245     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  1 },
2246     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2247     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  1 },
2248     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  1 },
2249     { ISD::SINT_TO_FP,  MVT::v4f64,  MVT::v4i32,  2 },
2250 
2251     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i32,    1 },
2252     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i32,    1 },
2253     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    4 },
2254     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    4 },
2255     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  1 },
2256     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  1 },
2257     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  1 },
2258     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  1 },
2259     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  3 },
2260     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  3 },
2261     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  2 },
2262     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v2i64, 12 },
2263     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i64, 22 },
2264     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  4 },
2265 
2266     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f32,    1 },
2267     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f32,    1 },
2268     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f64,    1 },
2269     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f64,    1 },
2270     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f32,  2 },
2271     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v2f64,  2 },
2272     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f32,  1 },
2273     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v2f64,  1 },
2274     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f32,  1 },
2275     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v2f64,  1 },
2276 
2277     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f32,    1 },
2278     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2279     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f64,    1 },
2280     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,    4 },
2281     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f32,  2 },
2282     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v2f64,  2 },
2283     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f32,  1 },
2284     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v2f64,  1 },
2285     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  4 },
2286     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  4 },
2287   };
2288 
2289   static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2290     // These are somewhat magic numbers justified by comparing the
2291     // output of llvm-mca for our various supported scheduler models
2292     // and basing it off the worst case scenario.
2293     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i32,    3 },
2294     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i32,    3 },
2295     { ISD::SINT_TO_FP,  MVT::f32,    MVT::i64,    3 },
2296     { ISD::SINT_TO_FP,  MVT::f64,    MVT::i64,    3 },
2297     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  3 },
2298     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  4 },
2299     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  3 },
2300     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  4 },
2301     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  3 },
2302     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  4 },
2303     { ISD::SINT_TO_FP,  MVT::v4f32,  MVT::v2i64,  8 },
2304     { ISD::SINT_TO_FP,  MVT::v2f64,  MVT::v2i64,  8 },
2305 
2306     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i32,    3 },
2307     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i32,    3 },
2308     { ISD::UINT_TO_FP,  MVT::f32,    MVT::i64,    8 },
2309     { ISD::UINT_TO_FP,  MVT::f64,    MVT::i64,    9 },
2310     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v16i8,  4 },
2311     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v16i8,  4 },
2312     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v8i16,  4 },
2313     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v8i16,  4 },
2314     { ISD::UINT_TO_FP,  MVT::v2f32,  MVT::v2i32,  7 },
2315     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v4i32,  7 },
2316     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v4i32,  5 },
2317     { ISD::UINT_TO_FP,  MVT::v2f64,  MVT::v2i64, 15 },
2318     { ISD::UINT_TO_FP,  MVT::v4f32,  MVT::v2i64, 18 },
2319 
2320     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f32,    4 },
2321     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f32,    4 },
2322     { ISD::FP_TO_SINT,  MVT::i32,    MVT::f64,    4 },
2323     { ISD::FP_TO_SINT,  MVT::i64,    MVT::f64,    4 },
2324     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v4f32,  6 },
2325     { ISD::FP_TO_SINT,  MVT::v16i8,  MVT::v2f64,  6 },
2326     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v4f32,  5 },
2327     { ISD::FP_TO_SINT,  MVT::v8i16,  MVT::v2f64,  5 },
2328     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v4f32,  4 },
2329     { ISD::FP_TO_SINT,  MVT::v4i32,  MVT::v2f64,  4 },
2330 
2331     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f32,    4 },
2332     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f32,    4 },
2333     { ISD::FP_TO_UINT,  MVT::i32,    MVT::f64,    4 },
2334     { ISD::FP_TO_UINT,  MVT::i64,    MVT::f64,   15 },
2335     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v4f32,  6 },
2336     { ISD::FP_TO_UINT,  MVT::v16i8,  MVT::v2f64,  6 },
2337     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v4f32,  5 },
2338     { ISD::FP_TO_UINT,  MVT::v8i16,  MVT::v2f64,  5 },
2339     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v4f32,  8 },
2340     { ISD::FP_TO_UINT,  MVT::v4i32,  MVT::v2f64,  8 },
2341 
2342     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v16i8,  4 },
2343     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v16i8,  4 },
2344     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v16i8,  2 },
2345     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v16i8,  3 },
2346     { ISD::ZERO_EXTEND, MVT::v8i16,  MVT::v16i8,  1 },
2347     { ISD::SIGN_EXTEND, MVT::v8i16,  MVT::v16i8,  2 },
2348     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v8i16,  2 },
2349     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v8i16,  3 },
2350     { ISD::ZERO_EXTEND, MVT::v4i32,  MVT::v8i16,  1 },
2351     { ISD::SIGN_EXTEND, MVT::v4i32,  MVT::v8i16,  2 },
2352     { ISD::ZERO_EXTEND, MVT::v2i64,  MVT::v4i32,  1 },
2353     { ISD::SIGN_EXTEND, MVT::v2i64,  MVT::v4i32,  2 },
2354 
2355     // These truncates are really widening elements.
2356     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i32,  1 }, // PSHUFD
2357     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i16,  2 }, // PUNPCKLWD+DQ
2358     { ISD::TRUNCATE,    MVT::v2i1,   MVT::v2i8,   3 }, // PUNPCKLBW+WD+PSHUFD
2359     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i16,  1 }, // PUNPCKLWD
2360     { ISD::TRUNCATE,    MVT::v4i1,   MVT::v4i8,   2 }, // PUNPCKLBW+WD
2361     { ISD::TRUNCATE,    MVT::v8i1,   MVT::v8i8,   1 }, // PUNPCKLBW
2362 
2363     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v8i16,  2 }, // PAND+PACKUSWB
2364     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i16, 3 },
2365     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v4i32,  3 }, // PAND+2*PACKUSWB
2366     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v16i32, 7 },
2367     { ISD::TRUNCATE,    MVT::v2i16,  MVT::v2i32,  1 },
2368     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v4i32,  3 },
2369     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v8i32,  5 },
2370     { ISD::TRUNCATE,    MVT::v16i16, MVT::v16i32,10 },
2371     { ISD::TRUNCATE,    MVT::v16i8,  MVT::v2i64,  4 }, // PAND+3*PACKUSWB
2372     { ISD::TRUNCATE,    MVT::v8i16,  MVT::v2i64,  2 }, // PSHUFD+PSHUFLW
2373     { ISD::TRUNCATE,    MVT::v4i32,  MVT::v2i64,  1 }, // PSHUFD
2374   };
2375 
2376   // Attempt to map directly to (simple) MVT types to let us match custom entries.
2377   EVT SrcTy = TLI->getValueType(DL, Src);
2378   EVT DstTy = TLI->getValueType(DL, Dst);
2379 
2380   // The function getSimpleVT only handles simple value types.
2381   if (SrcTy.isSimple() && DstTy.isSimple()) {
2382     MVT SimpleSrcTy = SrcTy.getSimpleVT();
2383     MVT SimpleDstTy = DstTy.getSimpleVT();
2384 
2385     if (ST->useAVX512Regs()) {
2386       if (ST->hasBWI())
2387         if (const auto *Entry = ConvertCostTableLookup(
2388                 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2389           return AdjustCost(Entry->Cost);
2390 
2391       if (ST->hasDQI())
2392         if (const auto *Entry = ConvertCostTableLookup(
2393                 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2394           return AdjustCost(Entry->Cost);
2395 
2396       if (ST->hasAVX512())
2397         if (const auto *Entry = ConvertCostTableLookup(
2398                 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2399           return AdjustCost(Entry->Cost);
2400     }
2401 
2402     if (ST->hasBWI())
2403       if (const auto *Entry = ConvertCostTableLookup(
2404               AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2405         return AdjustCost(Entry->Cost);
2406 
2407     if (ST->hasDQI())
2408       if (const auto *Entry = ConvertCostTableLookup(
2409               AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2410         return AdjustCost(Entry->Cost);
2411 
2412     if (ST->hasAVX512())
2413       if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2414                                                      SimpleDstTy, SimpleSrcTy))
2415         return AdjustCost(Entry->Cost);
2416 
2417     if (ST->hasAVX2()) {
2418       if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2419                                                      SimpleDstTy, SimpleSrcTy))
2420         return AdjustCost(Entry->Cost);
2421     }
2422 
2423     if (ST->hasAVX()) {
2424       if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2425                                                      SimpleDstTy, SimpleSrcTy))
2426         return AdjustCost(Entry->Cost);
2427     }
2428 
2429     if (ST->hasSSE41()) {
2430       if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2431                                                      SimpleDstTy, SimpleSrcTy))
2432         return AdjustCost(Entry->Cost);
2433     }
2434 
2435     if (ST->hasSSE2()) {
2436       if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2437                                                      SimpleDstTy, SimpleSrcTy))
2438         return AdjustCost(Entry->Cost);
2439     }
2440   }
2441 
2442   // Fall back to legalized types.
2443   std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2444   std::pair<InstructionCost, MVT> LTDest =
2445       TLI->getTypeLegalizationCost(DL, Dst);
2446 
2447   if (ST->useAVX512Regs()) {
2448     if (ST->hasBWI())
2449       if (const auto *Entry = ConvertCostTableLookup(
2450               AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2451         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2452 
2453     if (ST->hasDQI())
2454       if (const auto *Entry = ConvertCostTableLookup(
2455               AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2456         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2457 
2458     if (ST->hasAVX512())
2459       if (const auto *Entry = ConvertCostTableLookup(
2460               AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
2461         return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2462   }
2463 
2464   if (ST->hasBWI())
2465     if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2466                                                    LTDest.second, LTSrc.second))
2467       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2468 
2469   if (ST->hasDQI())
2470     if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2471                                                    LTDest.second, LTSrc.second))
2472       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2473 
2474   if (ST->hasAVX512())
2475     if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2476                                                    LTDest.second, LTSrc.second))
2477       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2478 
2479   if (ST->hasAVX2())
2480     if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2481                                                    LTDest.second, LTSrc.second))
2482       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2483 
2484   if (ST->hasAVX())
2485     if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2486                                                    LTDest.second, LTSrc.second))
2487       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2488 
2489   if (ST->hasSSE41())
2490     if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2491                                                    LTDest.second, LTSrc.second))
2492       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2493 
2494   if (ST->hasSSE2())
2495     if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2496                                                    LTDest.second, LTSrc.second))
2497       return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2498 
2499   // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2500   // sitofp.
2501   if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
2502       1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
2503     Type *ExtSrc = Src->getWithNewBitWidth(32);
2504     unsigned ExtOpc =
2505         (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
2506 
2507     // For scalar loads the extend would be free.
2508     InstructionCost ExtCost = 0;
2509     if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
2510       ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
2511 
2512     return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
2513                                       TTI::CastContextHint::None, CostKind);
2514   }
2515 
2516   // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
2517   // i32.
2518   if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
2519       1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
2520     Type *TruncDst = Dst->getWithNewBitWidth(32);
2521     return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
2522            getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
2523                             TTI::CastContextHint::None, CostKind);
2524   }
2525 
2526   return AdjustCost(
2527       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2528 }
2529 
2530 InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2531                                                Type *CondTy,
2532                                                CmpInst::Predicate VecPred,
2533                                                TTI::TargetCostKind CostKind,
2534                                                const Instruction *I) {
2535   // TODO: Handle other cost kinds.
2536   if (CostKind != TTI::TCK_RecipThroughput)
2537     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2538                                      I);
2539 
2540   // Legalize the type.
2541   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2542 
2543   MVT MTy = LT.second;
2544 
2545   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2546   assert(ISD && "Invalid opcode");
2547 
2548   unsigned ExtraCost = 0;
2549   if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
2550     // Some vector comparison predicates cost extra instructions.
2551     // TODO: Should we invert this and assume worst case cmp costs
2552     // and reduce for particular predicates?
2553     if (MTy.isVector() &&
2554         !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2555           (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2556           ST->hasBWI())) {
2557       // Fallback to I if a specific predicate wasn't specified.
2558       CmpInst::Predicate Pred = VecPred;
2559       if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE ||
2560                 Pred == CmpInst::BAD_FCMP_PREDICATE))
2561         Pred = cast<CmpInst>(I)->getPredicate();
2562 
2563       switch (Pred) {
2564       case CmpInst::Predicate::ICMP_NE:
2565         // xor(cmpeq(x,y),-1)
2566         ExtraCost = 1;
2567         break;
2568       case CmpInst::Predicate::ICMP_SGE:
2569       case CmpInst::Predicate::ICMP_SLE:
2570         // xor(cmpgt(x,y),-1)
2571         ExtraCost = 1;
2572         break;
2573       case CmpInst::Predicate::ICMP_ULT:
2574       case CmpInst::Predicate::ICMP_UGT:
2575         // cmpgt(xor(x,signbit),xor(y,signbit))
2576         // xor(cmpeq(pmaxu(x,y),x),-1)
2577         ExtraCost = 2;
2578         break;
2579       case CmpInst::Predicate::ICMP_ULE:
2580       case CmpInst::Predicate::ICMP_UGE:
2581         if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2582             (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2583           // cmpeq(psubus(x,y),0)
2584           // cmpeq(pminu(x,y),x)
2585           ExtraCost = 1;
2586         } else {
2587           // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2588           ExtraCost = 3;
2589         }
2590         break;
2591       case CmpInst::Predicate::BAD_ICMP_PREDICATE:
2592       case CmpInst::Predicate::BAD_FCMP_PREDICATE:
2593         // Assume worst case scenario and add the maximum extra cost.
2594         ExtraCost = 3;
2595         break;
2596       default:
2597         break;
2598       }
2599     }
2600   }
2601 
2602   static const CostTblEntry SLMCostTbl[] = {
2603     // slm pcmpeq/pcmpgt throughput is 2
2604     { ISD::SETCC,   MVT::v2i64,   2 },
2605   };
2606 
2607   static const CostTblEntry AVX512BWCostTbl[] = {
2608     { ISD::SETCC,   MVT::v32i16,  1 },
2609     { ISD::SETCC,   MVT::v64i8,   1 },
2610 
2611     { ISD::SELECT,  MVT::v32i16,  1 },
2612     { ISD::SELECT,  MVT::v64i8,   1 },
2613   };
2614 
2615   static const CostTblEntry AVX512CostTbl[] = {
2616     { ISD::SETCC,   MVT::v8i64,   1 },
2617     { ISD::SETCC,   MVT::v16i32,  1 },
2618     { ISD::SETCC,   MVT::v8f64,   1 },
2619     { ISD::SETCC,   MVT::v16f32,  1 },
2620 
2621     { ISD::SELECT,  MVT::v8i64,   1 },
2622     { ISD::SELECT,  MVT::v16i32,  1 },
2623     { ISD::SELECT,  MVT::v8f64,   1 },
2624     { ISD::SELECT,  MVT::v16f32,  1 },
2625 
2626     { ISD::SETCC,   MVT::v32i16,  2 }, // FIXME: should probably be 4
2627     { ISD::SETCC,   MVT::v64i8,   2 }, // FIXME: should probably be 4
2628 
2629     { ISD::SELECT,  MVT::v32i16,  2 }, // FIXME: should be 3
2630     { ISD::SELECT,  MVT::v64i8,   2 }, // FIXME: should be 3
2631   };
2632 
2633   static const CostTblEntry AVX2CostTbl[] = {
2634     { ISD::SETCC,   MVT::v4i64,   1 },
2635     { ISD::SETCC,   MVT::v8i32,   1 },
2636     { ISD::SETCC,   MVT::v16i16,  1 },
2637     { ISD::SETCC,   MVT::v32i8,   1 },
2638 
2639     { ISD::SELECT,  MVT::v4i64,   1 }, // pblendvb
2640     { ISD::SELECT,  MVT::v8i32,   1 }, // pblendvb
2641     { ISD::SELECT,  MVT::v16i16,  1 }, // pblendvb
2642     { ISD::SELECT,  MVT::v32i8,   1 }, // pblendvb
2643   };
2644 
2645   static const CostTblEntry AVX1CostTbl[] = {
2646     { ISD::SETCC,   MVT::v4f64,   1 },
2647     { ISD::SETCC,   MVT::v8f32,   1 },
2648     // AVX1 does not support 8-wide integer compare.
2649     { ISD::SETCC,   MVT::v4i64,   4 },
2650     { ISD::SETCC,   MVT::v8i32,   4 },
2651     { ISD::SETCC,   MVT::v16i16,  4 },
2652     { ISD::SETCC,   MVT::v32i8,   4 },
2653 
2654     { ISD::SELECT,  MVT::v4f64,   1 }, // vblendvpd
2655     { ISD::SELECT,  MVT::v8f32,   1 }, // vblendvps
2656     { ISD::SELECT,  MVT::v4i64,   1 }, // vblendvpd
2657     { ISD::SELECT,  MVT::v8i32,   1 }, // vblendvps
2658     { ISD::SELECT,  MVT::v16i16,  3 }, // vandps + vandnps + vorps
2659     { ISD::SELECT,  MVT::v32i8,   3 }, // vandps + vandnps + vorps
2660   };
2661 
2662   static const CostTblEntry SSE42CostTbl[] = {
2663     { ISD::SETCC,   MVT::v2f64,   1 },
2664     { ISD::SETCC,   MVT::v4f32,   1 },
2665     { ISD::SETCC,   MVT::v2i64,   1 },
2666   };
2667 
2668   static const CostTblEntry SSE41CostTbl[] = {
2669     { ISD::SELECT,  MVT::v2f64,   1 }, // blendvpd
2670     { ISD::SELECT,  MVT::v4f32,   1 }, // blendvps
2671     { ISD::SELECT,  MVT::v2i64,   1 }, // pblendvb
2672     { ISD::SELECT,  MVT::v4i32,   1 }, // pblendvb
2673     { ISD::SELECT,  MVT::v8i16,   1 }, // pblendvb
2674     { ISD::SELECT,  MVT::v16i8,   1 }, // pblendvb
2675   };
2676 
2677   static const CostTblEntry SSE2CostTbl[] = {
2678     { ISD::SETCC,   MVT::v2f64,   2 },
2679     { ISD::SETCC,   MVT::f64,     1 },
2680     { ISD::SETCC,   MVT::v2i64,   8 },
2681     { ISD::SETCC,   MVT::v4i32,   1 },
2682     { ISD::SETCC,   MVT::v8i16,   1 },
2683     { ISD::SETCC,   MVT::v16i8,   1 },
2684 
2685     { ISD::SELECT,  MVT::v2f64,   3 }, // andpd + andnpd + orpd
2686     { ISD::SELECT,  MVT::v2i64,   3 }, // pand + pandn + por
2687     { ISD::SELECT,  MVT::v4i32,   3 }, // pand + pandn + por
2688     { ISD::SELECT,  MVT::v8i16,   3 }, // pand + pandn + por
2689     { ISD::SELECT,  MVT::v16i8,   3 }, // pand + pandn + por
2690   };
2691 
2692   static const CostTblEntry SSE1CostTbl[] = {
2693     { ISD::SETCC,   MVT::v4f32,   2 },
2694     { ISD::SETCC,   MVT::f32,     1 },
2695 
2696     { ISD::SELECT,  MVT::v4f32,   3 }, // andps + andnps + orps
2697   };
2698 
2699   if (ST->useSLMArithCosts())
2700     if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2701       return LT.first * (ExtraCost + Entry->Cost);
2702 
2703   if (ST->hasBWI())
2704     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2705       return LT.first * (ExtraCost + Entry->Cost);
2706 
2707   if (ST->hasAVX512())
2708     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2709       return LT.first * (ExtraCost + Entry->Cost);
2710 
2711   if (ST->hasAVX2())
2712     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2713       return LT.first * (ExtraCost + Entry->Cost);
2714 
2715   if (ST->hasAVX())
2716     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2717       return LT.first * (ExtraCost + Entry->Cost);
2718 
2719   if (ST->hasSSE42())
2720     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2721       return LT.first * (ExtraCost + Entry->Cost);
2722 
2723   if (ST->hasSSE41())
2724     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2725       return LT.first * (ExtraCost + Entry->Cost);
2726 
2727   if (ST->hasSSE2())
2728     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2729       return LT.first * (ExtraCost + Entry->Cost);
2730 
2731   if (ST->hasSSE1())
2732     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2733       return LT.first * (ExtraCost + Entry->Cost);
2734 
2735   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2736 }
2737 
2738 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2739 
2740 InstructionCost
2741 X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2742                                            TTI::TargetCostKind CostKind) {
2743 
2744   // Costs should match the codegen from:
2745   // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2746   // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2747   // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2748   // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2749   // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2750 
2751   // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2752   //       specialized in these tables yet.
2753   static const CostTblEntry AVX512BITALGCostTbl[] = {
2754     { ISD::CTPOP,      MVT::v32i16,  1 },
2755     { ISD::CTPOP,      MVT::v64i8,   1 },
2756     { ISD::CTPOP,      MVT::v16i16,  1 },
2757     { ISD::CTPOP,      MVT::v32i8,   1 },
2758     { ISD::CTPOP,      MVT::v8i16,   1 },
2759     { ISD::CTPOP,      MVT::v16i8,   1 },
2760   };
2761   static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = {
2762     { ISD::CTPOP,      MVT::v8i64,   1 },
2763     { ISD::CTPOP,      MVT::v16i32,  1 },
2764     { ISD::CTPOP,      MVT::v4i64,   1 },
2765     { ISD::CTPOP,      MVT::v8i32,   1 },
2766     { ISD::CTPOP,      MVT::v2i64,   1 },
2767     { ISD::CTPOP,      MVT::v4i32,   1 },
2768   };
2769   static const CostTblEntry AVX512CDCostTbl[] = {
2770     { ISD::CTLZ,       MVT::v8i64,   1 },
2771     { ISD::CTLZ,       MVT::v16i32,  1 },
2772     { ISD::CTLZ,       MVT::v32i16,  8 },
2773     { ISD::CTLZ,       MVT::v64i8,  20 },
2774     { ISD::CTLZ,       MVT::v4i64,   1 },
2775     { ISD::CTLZ,       MVT::v8i32,   1 },
2776     { ISD::CTLZ,       MVT::v16i16,  4 },
2777     { ISD::CTLZ,       MVT::v32i8,  10 },
2778     { ISD::CTLZ,       MVT::v2i64,   1 },
2779     { ISD::CTLZ,       MVT::v4i32,   1 },
2780     { ISD::CTLZ,       MVT::v8i16,   4 },
2781     { ISD::CTLZ,       MVT::v16i8,   4 },
2782   };
2783   static const CostTblEntry AVX512BWCostTbl[] = {
2784     { ISD::ABS,        MVT::v32i16,  1 },
2785     { ISD::ABS,        MVT::v64i8,   1 },
2786     { ISD::BITREVERSE, MVT::v8i64,   3 },
2787     { ISD::BITREVERSE, MVT::v16i32,  3 },
2788     { ISD::BITREVERSE, MVT::v32i16,  3 },
2789     { ISD::BITREVERSE, MVT::v64i8,   2 },
2790     { ISD::BSWAP,      MVT::v8i64,   1 },
2791     { ISD::BSWAP,      MVT::v16i32,  1 },
2792     { ISD::BSWAP,      MVT::v32i16,  1 },
2793     { ISD::CTLZ,       MVT::v8i64,  23 },
2794     { ISD::CTLZ,       MVT::v16i32, 22 },
2795     { ISD::CTLZ,       MVT::v32i16, 18 },
2796     { ISD::CTLZ,       MVT::v64i8,  17 },
2797     { ISD::CTPOP,      MVT::v8i64,   7 },
2798     { ISD::CTPOP,      MVT::v16i32, 11 },
2799     { ISD::CTPOP,      MVT::v32i16,  9 },
2800     { ISD::CTPOP,      MVT::v64i8,   6 },
2801     { ISD::CTTZ,       MVT::v8i64,  10 },
2802     { ISD::CTTZ,       MVT::v16i32, 14 },
2803     { ISD::CTTZ,       MVT::v32i16, 12 },
2804     { ISD::CTTZ,       MVT::v64i8,   9 },
2805     { ISD::SADDSAT,    MVT::v32i16,  1 },
2806     { ISD::SADDSAT,    MVT::v64i8,   1 },
2807     { ISD::SMAX,       MVT::v32i16,  1 },
2808     { ISD::SMAX,       MVT::v64i8,   1 },
2809     { ISD::SMIN,       MVT::v32i16,  1 },
2810     { ISD::SMIN,       MVT::v64i8,   1 },
2811     { ISD::SSUBSAT,    MVT::v32i16,  1 },
2812     { ISD::SSUBSAT,    MVT::v64i8,   1 },
2813     { ISD::UADDSAT,    MVT::v32i16,  1 },
2814     { ISD::UADDSAT,    MVT::v64i8,   1 },
2815     { ISD::UMAX,       MVT::v32i16,  1 },
2816     { ISD::UMAX,       MVT::v64i8,   1 },
2817     { ISD::UMIN,       MVT::v32i16,  1 },
2818     { ISD::UMIN,       MVT::v64i8,   1 },
2819     { ISD::USUBSAT,    MVT::v32i16,  1 },
2820     { ISD::USUBSAT,    MVT::v64i8,   1 },
2821   };
2822   static const CostTblEntry AVX512CostTbl[] = {
2823     { ISD::ABS,        MVT::v8i64,   1 },
2824     { ISD::ABS,        MVT::v16i32,  1 },
2825     { ISD::ABS,        MVT::v32i16,  2 },
2826     { ISD::ABS,        MVT::v64i8,   2 },
2827     { ISD::ABS,        MVT::v4i64,   1 },
2828     { ISD::ABS,        MVT::v2i64,   1 },
2829     { ISD::BITREVERSE, MVT::v8i64,  36 },
2830     { ISD::BITREVERSE, MVT::v16i32, 24 },
2831     { ISD::BITREVERSE, MVT::v32i16, 10 },
2832     { ISD::BITREVERSE, MVT::v64i8,  10 },
2833     { ISD::BSWAP,      MVT::v8i64,   4 },
2834     { ISD::BSWAP,      MVT::v16i32,  4 },
2835     { ISD::BSWAP,      MVT::v32i16,  4 },
2836     { ISD::CTLZ,       MVT::v8i64,  29 },
2837     { ISD::CTLZ,       MVT::v16i32, 35 },
2838     { ISD::CTLZ,       MVT::v32i16, 28 },
2839     { ISD::CTLZ,       MVT::v64i8,  18 },
2840     { ISD::CTPOP,      MVT::v8i64,  16 },
2841     { ISD::CTPOP,      MVT::v16i32, 24 },
2842     { ISD::CTPOP,      MVT::v32i16, 18 },
2843     { ISD::CTPOP,      MVT::v64i8,  12 },
2844     { ISD::CTTZ,       MVT::v8i64,  20 },
2845     { ISD::CTTZ,       MVT::v16i32, 28 },
2846     { ISD::CTTZ,       MVT::v32i16, 24 },
2847     { ISD::CTTZ,       MVT::v64i8,  18 },
2848     { ISD::SMAX,       MVT::v8i64,   1 },
2849     { ISD::SMAX,       MVT::v16i32,  1 },
2850     { ISD::SMAX,       MVT::v32i16,  2 },
2851     { ISD::SMAX,       MVT::v64i8,   2 },
2852     { ISD::SMAX,       MVT::v4i64,   1 },
2853     { ISD::SMAX,       MVT::v2i64,   1 },
2854     { ISD::SMIN,       MVT::v8i64,   1 },
2855     { ISD::SMIN,       MVT::v16i32,  1 },
2856     { ISD::SMIN,       MVT::v32i16,  2 },
2857     { ISD::SMIN,       MVT::v64i8,   2 },
2858     { ISD::SMIN,       MVT::v4i64,   1 },
2859     { ISD::SMIN,       MVT::v2i64,   1 },
2860     { ISD::UMAX,       MVT::v8i64,   1 },
2861     { ISD::UMAX,       MVT::v16i32,  1 },
2862     { ISD::UMAX,       MVT::v32i16,  2 },
2863     { ISD::UMAX,       MVT::v64i8,   2 },
2864     { ISD::UMAX,       MVT::v4i64,   1 },
2865     { ISD::UMAX,       MVT::v2i64,   1 },
2866     { ISD::UMIN,       MVT::v8i64,   1 },
2867     { ISD::UMIN,       MVT::v16i32,  1 },
2868     { ISD::UMIN,       MVT::v32i16,  2 },
2869     { ISD::UMIN,       MVT::v64i8,   2 },
2870     { ISD::UMIN,       MVT::v4i64,   1 },
2871     { ISD::UMIN,       MVT::v2i64,   1 },
2872     { ISD::USUBSAT,    MVT::v16i32,  2 }, // pmaxud + psubd
2873     { ISD::USUBSAT,    MVT::v2i64,   2 }, // pmaxuq + psubq
2874     { ISD::USUBSAT,    MVT::v4i64,   2 }, // pmaxuq + psubq
2875     { ISD::USUBSAT,    MVT::v8i64,   2 }, // pmaxuq + psubq
2876     { ISD::UADDSAT,    MVT::v16i32,  3 }, // not + pminud + paddd
2877     { ISD::UADDSAT,    MVT::v2i64,   3 }, // not + pminuq + paddq
2878     { ISD::UADDSAT,    MVT::v4i64,   3 }, // not + pminuq + paddq
2879     { ISD::UADDSAT,    MVT::v8i64,   3 }, // not + pminuq + paddq
2880     { ISD::SADDSAT,    MVT::v32i16,  2 },
2881     { ISD::SADDSAT,    MVT::v64i8,   2 },
2882     { ISD::SSUBSAT,    MVT::v32i16,  2 },
2883     { ISD::SSUBSAT,    MVT::v64i8,   2 },
2884     { ISD::UADDSAT,    MVT::v32i16,  2 },
2885     { ISD::UADDSAT,    MVT::v64i8,   2 },
2886     { ISD::USUBSAT,    MVT::v32i16,  2 },
2887     { ISD::USUBSAT,    MVT::v64i8,   2 },
2888     { ISD::FMAXNUM,    MVT::f32,     2 },
2889     { ISD::FMAXNUM,    MVT::v4f32,   2 },
2890     { ISD::FMAXNUM,    MVT::v8f32,   2 },
2891     { ISD::FMAXNUM,    MVT::v16f32,  2 },
2892     { ISD::FMAXNUM,    MVT::f64,     2 },
2893     { ISD::FMAXNUM,    MVT::v2f64,   2 },
2894     { ISD::FMAXNUM,    MVT::v4f64,   2 },
2895     { ISD::FMAXNUM,    MVT::v8f64,   2 },
2896   };
2897   static const CostTblEntry XOPCostTbl[] = {
2898     { ISD::BITREVERSE, MVT::v4i64,   4 },
2899     { ISD::BITREVERSE, MVT::v8i32,   4 },
2900     { ISD::BITREVERSE, MVT::v16i16,  4 },
2901     { ISD::BITREVERSE, MVT::v32i8,   4 },
2902     { ISD::BITREVERSE, MVT::v2i64,   1 },
2903     { ISD::BITREVERSE, MVT::v4i32,   1 },
2904     { ISD::BITREVERSE, MVT::v8i16,   1 },
2905     { ISD::BITREVERSE, MVT::v16i8,   1 },
2906     { ISD::BITREVERSE, MVT::i64,     3 },
2907     { ISD::BITREVERSE, MVT::i32,     3 },
2908     { ISD::BITREVERSE, MVT::i16,     3 },
2909     { ISD::BITREVERSE, MVT::i8,      3 }
2910   };
2911   static const CostTblEntry AVX2CostTbl[] = {
2912     { ISD::ABS,        MVT::v4i64,   2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2913     { ISD::ABS,        MVT::v8i32,   1 },
2914     { ISD::ABS,        MVT::v16i16,  1 },
2915     { ISD::ABS,        MVT::v32i8,   1 },
2916     { ISD::BITREVERSE, MVT::v2i64,   3 },
2917     { ISD::BITREVERSE, MVT::v4i64,   3 },
2918     { ISD::BITREVERSE, MVT::v4i32,   3 },
2919     { ISD::BITREVERSE, MVT::v8i32,   3 },
2920     { ISD::BITREVERSE, MVT::v8i16,   3 },
2921     { ISD::BITREVERSE, MVT::v16i16,  3 },
2922     { ISD::BITREVERSE, MVT::v16i8,   3 },
2923     { ISD::BITREVERSE, MVT::v32i8,   3 },
2924     { ISD::BSWAP,      MVT::v4i64,   1 },
2925     { ISD::BSWAP,      MVT::v8i32,   1 },
2926     { ISD::BSWAP,      MVT::v16i16,  1 },
2927     { ISD::CTLZ,       MVT::v2i64,   7 },
2928     { ISD::CTLZ,       MVT::v4i64,   7 },
2929     { ISD::CTLZ,       MVT::v4i32,   5 },
2930     { ISD::CTLZ,       MVT::v8i32,   5 },
2931     { ISD::CTLZ,       MVT::v8i16,   4 },
2932     { ISD::CTLZ,       MVT::v16i16,  4 },
2933     { ISD::CTLZ,       MVT::v16i8,   3 },
2934     { ISD::CTLZ,       MVT::v32i8,   3 },
2935     { ISD::CTPOP,      MVT::v2i64,   3 },
2936     { ISD::CTPOP,      MVT::v4i64,   3 },
2937     { ISD::CTPOP,      MVT::v4i32,   7 },
2938     { ISD::CTPOP,      MVT::v8i32,   7 },
2939     { ISD::CTPOP,      MVT::v8i16,   3 },
2940     { ISD::CTPOP,      MVT::v16i16,  3 },
2941     { ISD::CTPOP,      MVT::v16i8,   2 },
2942     { ISD::CTPOP,      MVT::v32i8,   2 },
2943     { ISD::CTTZ,       MVT::v2i64,   4 },
2944     { ISD::CTTZ,       MVT::v4i64,   4 },
2945     { ISD::CTTZ,       MVT::v4i32,   7 },
2946     { ISD::CTTZ,       MVT::v8i32,   7 },
2947     { ISD::CTTZ,       MVT::v8i16,   4 },
2948     { ISD::CTTZ,       MVT::v16i16,  4 },
2949     { ISD::CTTZ,       MVT::v16i8,   3 },
2950     { ISD::CTTZ,       MVT::v32i8,   3 },
2951     { ISD::SADDSAT,    MVT::v16i16,  1 },
2952     { ISD::SADDSAT,    MVT::v32i8,   1 },
2953     { ISD::SMAX,       MVT::v8i32,   1 },
2954     { ISD::SMAX,       MVT::v16i16,  1 },
2955     { ISD::SMAX,       MVT::v32i8,   1 },
2956     { ISD::SMIN,       MVT::v8i32,   1 },
2957     { ISD::SMIN,       MVT::v16i16,  1 },
2958     { ISD::SMIN,       MVT::v32i8,   1 },
2959     { ISD::SSUBSAT,    MVT::v16i16,  1 },
2960     { ISD::SSUBSAT,    MVT::v32i8,   1 },
2961     { ISD::UADDSAT,    MVT::v16i16,  1 },
2962     { ISD::UADDSAT,    MVT::v32i8,   1 },
2963     { ISD::UADDSAT,    MVT::v8i32,   3 }, // not + pminud + paddd
2964     { ISD::UMAX,       MVT::v8i32,   1 },
2965     { ISD::UMAX,       MVT::v16i16,  1 },
2966     { ISD::UMAX,       MVT::v32i8,   1 },
2967     { ISD::UMIN,       MVT::v8i32,   1 },
2968     { ISD::UMIN,       MVT::v16i16,  1 },
2969     { ISD::UMIN,       MVT::v32i8,   1 },
2970     { ISD::USUBSAT,    MVT::v16i16,  1 },
2971     { ISD::USUBSAT,    MVT::v32i8,   1 },
2972     { ISD::USUBSAT,    MVT::v8i32,   2 }, // pmaxud + psubd
2973     { ISD::FMAXNUM,    MVT::v8f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2974     { ISD::FMAXNUM,    MVT::v4f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2975     { ISD::FSQRT,      MVT::f32,     7 }, // Haswell from http://www.agner.org/
2976     { ISD::FSQRT,      MVT::v4f32,   7 }, // Haswell from http://www.agner.org/
2977     { ISD::FSQRT,      MVT::v8f32,  14 }, // Haswell from http://www.agner.org/
2978     { ISD::FSQRT,      MVT::f64,    14 }, // Haswell from http://www.agner.org/
2979     { ISD::FSQRT,      MVT::v2f64,  14 }, // Haswell from http://www.agner.org/
2980     { ISD::FSQRT,      MVT::v4f64,  28 }, // Haswell from http://www.agner.org/
2981   };
2982   static const CostTblEntry AVX1CostTbl[] = {
2983     { ISD::ABS,        MVT::v4i64,   5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2984     { ISD::ABS,        MVT::v8i32,   3 },
2985     { ISD::ABS,        MVT::v16i16,  3 },
2986     { ISD::ABS,        MVT::v32i8,   3 },
2987     { ISD::BITREVERSE, MVT::v4i64,  12 }, // 2 x 128-bit Op + extract/insert
2988     { ISD::BITREVERSE, MVT::v8i32,  12 }, // 2 x 128-bit Op + extract/insert
2989     { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2990     { ISD::BITREVERSE, MVT::v32i8,  12 }, // 2 x 128-bit Op + extract/insert
2991     { ISD::BSWAP,      MVT::v4i64,   4 },
2992     { ISD::BSWAP,      MVT::v8i32,   4 },
2993     { ISD::BSWAP,      MVT::v16i16,  4 },
2994     { ISD::CTLZ,       MVT::v4i64,  48 }, // 2 x 128-bit Op + extract/insert
2995     { ISD::CTLZ,       MVT::v8i32,  38 }, // 2 x 128-bit Op + extract/insert
2996     { ISD::CTLZ,       MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2997     { ISD::CTLZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
2998     { ISD::CTPOP,      MVT::v4i64,  16 }, // 2 x 128-bit Op + extract/insert
2999     { ISD::CTPOP,      MVT::v8i32,  24 }, // 2 x 128-bit Op + extract/insert
3000     { ISD::CTPOP,      MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
3001     { ISD::CTPOP,      MVT::v32i8,  14 }, // 2 x 128-bit Op + extract/insert
3002     { ISD::CTTZ,       MVT::v4i64,  22 }, // 2 x 128-bit Op + extract/insert
3003     { ISD::CTTZ,       MVT::v8i32,  30 }, // 2 x 128-bit Op + extract/insert
3004     { ISD::CTTZ,       MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
3005     { ISD::CTTZ,       MVT::v32i8,  20 }, // 2 x 128-bit Op + extract/insert
3006     { ISD::SADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3007     { ISD::SADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3008     { ISD::SMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
3009     { ISD::SMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3010     { ISD::SMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3011     { ISD::SMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
3012     { ISD::SMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3013     { ISD::SMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3014     { ISD::SSUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3015     { ISD::SSUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3016     { ISD::UADDSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3017     { ISD::UADDSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3018     { ISD::UADDSAT,    MVT::v8i32,   8 }, // 2 x 128-bit Op + extract/insert
3019     { ISD::UMAX,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
3020     { ISD::UMAX,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3021     { ISD::UMAX,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3022     { ISD::UMIN,       MVT::v8i32,   4 }, // 2 x 128-bit Op + extract/insert
3023     { ISD::UMIN,       MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3024     { ISD::UMIN,       MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3025     { ISD::USUBSAT,    MVT::v16i16,  4 }, // 2 x 128-bit Op + extract/insert
3026     { ISD::USUBSAT,    MVT::v32i8,   4 }, // 2 x 128-bit Op + extract/insert
3027     { ISD::USUBSAT,    MVT::v8i32,   6 }, // 2 x 128-bit Op + extract/insert
3028     { ISD::FMAXNUM,    MVT::f32,     3 }, // MAXSS + CMPUNORDSS + BLENDVPS
3029     { ISD::FMAXNUM,    MVT::v4f32,   3 }, // MAXPS + CMPUNORDPS + BLENDVPS
3030     { ISD::FMAXNUM,    MVT::v8f32,   5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
3031     { ISD::FMAXNUM,    MVT::f64,     3 }, // MAXSD + CMPUNORDSD + BLENDVPD
3032     { ISD::FMAXNUM,    MVT::v2f64,   3 }, // MAXPD + CMPUNORDPD + BLENDVPD
3033     { ISD::FMAXNUM,    MVT::v4f64,   5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
3034     { ISD::FSQRT,      MVT::f32,    14 }, // SNB from http://www.agner.org/
3035     { ISD::FSQRT,      MVT::v4f32,  14 }, // SNB from http://www.agner.org/
3036     { ISD::FSQRT,      MVT::v8f32,  28 }, // SNB from http://www.agner.org/
3037     { ISD::FSQRT,      MVT::f64,    21 }, // SNB from http://www.agner.org/
3038     { ISD::FSQRT,      MVT::v2f64,  21 }, // SNB from http://www.agner.org/
3039     { ISD::FSQRT,      MVT::v4f64,  43 }, // SNB from http://www.agner.org/
3040   };
3041   static const CostTblEntry GLMCostTbl[] = {
3042     { ISD::FSQRT, MVT::f32,   19 }, // sqrtss
3043     { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
3044     { ISD::FSQRT, MVT::f64,   34 }, // sqrtsd
3045     { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
3046   };
3047   static const CostTblEntry SLMCostTbl[] = {
3048     { ISD::FSQRT, MVT::f32,   20 }, // sqrtss
3049     { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
3050     { ISD::FSQRT, MVT::f64,   35 }, // sqrtsd
3051     { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
3052   };
3053   static const CostTblEntry SSE42CostTbl[] = {
3054     { ISD::USUBSAT,    MVT::v4i32,   2 }, // pmaxud + psubd
3055     { ISD::UADDSAT,    MVT::v4i32,   3 }, // not + pminud + paddd
3056     { ISD::FSQRT,      MVT::f32,    18 }, // Nehalem from http://www.agner.org/
3057     { ISD::FSQRT,      MVT::v4f32,  18 }, // Nehalem from http://www.agner.org/
3058   };
3059   static const CostTblEntry SSE41CostTbl[] = {
3060     { ISD::ABS,        MVT::v2i64,   2 }, // BLENDVPD(X,PSUBQ(0,X),X)
3061     { ISD::SMAX,       MVT::v4i32,   1 },
3062     { ISD::SMAX,       MVT::v16i8,   1 },
3063     { ISD::SMIN,       MVT::v4i32,   1 },
3064     { ISD::SMIN,       MVT::v16i8,   1 },
3065     { ISD::UMAX,       MVT::v4i32,   1 },
3066     { ISD::UMAX,       MVT::v8i16,   1 },
3067     { ISD::UMIN,       MVT::v4i32,   1 },
3068     { ISD::UMIN,       MVT::v8i16,   1 },
3069   };
3070   static const CostTblEntry SSSE3CostTbl[] = {
3071     { ISD::ABS,        MVT::v4i32,   1 },
3072     { ISD::ABS,        MVT::v8i16,   1 },
3073     { ISD::ABS,        MVT::v16i8,   1 },
3074     { ISD::BITREVERSE, MVT::v2i64,   5 },
3075     { ISD::BITREVERSE, MVT::v4i32,   5 },
3076     { ISD::BITREVERSE, MVT::v8i16,   5 },
3077     { ISD::BITREVERSE, MVT::v16i8,   5 },
3078     { ISD::BSWAP,      MVT::v2i64,   1 },
3079     { ISD::BSWAP,      MVT::v4i32,   1 },
3080     { ISD::BSWAP,      MVT::v8i16,   1 },
3081     { ISD::CTLZ,       MVT::v2i64,  23 },
3082     { ISD::CTLZ,       MVT::v4i32,  18 },
3083     { ISD::CTLZ,       MVT::v8i16,  14 },
3084     { ISD::CTLZ,       MVT::v16i8,   9 },
3085     { ISD::CTPOP,      MVT::v2i64,   7 },
3086     { ISD::CTPOP,      MVT::v4i32,  11 },
3087     { ISD::CTPOP,      MVT::v8i16,   9 },
3088     { ISD::CTPOP,      MVT::v16i8,   6 },
3089     { ISD::CTTZ,       MVT::v2i64,  10 },
3090     { ISD::CTTZ,       MVT::v4i32,  14 },
3091     { ISD::CTTZ,       MVT::v8i16,  12 },
3092     { ISD::CTTZ,       MVT::v16i8,   9 }
3093   };
3094   static const CostTblEntry SSE2CostTbl[] = {
3095     { ISD::ABS,        MVT::v2i64,   4 },
3096     { ISD::ABS,        MVT::v4i32,   3 },
3097     { ISD::ABS,        MVT::v8i16,   2 },
3098     { ISD::ABS,        MVT::v16i8,   2 },
3099     { ISD::BITREVERSE, MVT::v2i64,  29 },
3100     { ISD::BITREVERSE, MVT::v4i32,  27 },
3101     { ISD::BITREVERSE, MVT::v8i16,  27 },
3102     { ISD::BITREVERSE, MVT::v16i8,  20 },
3103     { ISD::BSWAP,      MVT::v2i64,   7 },
3104     { ISD::BSWAP,      MVT::v4i32,   7 },
3105     { ISD::BSWAP,      MVT::v8i16,   7 },
3106     { ISD::CTLZ,       MVT::v2i64,  25 },
3107     { ISD::CTLZ,       MVT::v4i32,  26 },
3108     { ISD::CTLZ,       MVT::v8i16,  20 },
3109     { ISD::CTLZ,       MVT::v16i8,  17 },
3110     { ISD::CTPOP,      MVT::v2i64,  12 },
3111     { ISD::CTPOP,      MVT::v4i32,  15 },
3112     { ISD::CTPOP,      MVT::v8i16,  13 },
3113     { ISD::CTPOP,      MVT::v16i8,  10 },
3114     { ISD::CTTZ,       MVT::v2i64,  14 },
3115     { ISD::CTTZ,       MVT::v4i32,  18 },
3116     { ISD::CTTZ,       MVT::v8i16,  16 },
3117     { ISD::CTTZ,       MVT::v16i8,  13 },
3118     { ISD::SADDSAT,    MVT::v8i16,   1 },
3119     { ISD::SADDSAT,    MVT::v16i8,   1 },
3120     { ISD::SMAX,       MVT::v8i16,   1 },
3121     { ISD::SMIN,       MVT::v8i16,   1 },
3122     { ISD::SSUBSAT,    MVT::v8i16,   1 },
3123     { ISD::SSUBSAT,    MVT::v16i8,   1 },
3124     { ISD::UADDSAT,    MVT::v8i16,   1 },
3125     { ISD::UADDSAT,    MVT::v16i8,   1 },
3126     { ISD::UMAX,       MVT::v8i16,   2 },
3127     { ISD::UMAX,       MVT::v16i8,   1 },
3128     { ISD::UMIN,       MVT::v8i16,   2 },
3129     { ISD::UMIN,       MVT::v16i8,   1 },
3130     { ISD::USUBSAT,    MVT::v8i16,   1 },
3131     { ISD::USUBSAT,    MVT::v16i8,   1 },
3132     { ISD::FMAXNUM,    MVT::f64,     4 },
3133     { ISD::FMAXNUM,    MVT::v2f64,   4 },
3134     { ISD::FSQRT,      MVT::f64,    32 }, // Nehalem from http://www.agner.org/
3135     { ISD::FSQRT,      MVT::v2f64,  32 }, // Nehalem from http://www.agner.org/
3136   };
3137   static const CostTblEntry SSE1CostTbl[] = {
3138     { ISD::FMAXNUM,    MVT::f32,     4 },
3139     { ISD::FMAXNUM,    MVT::v4f32,   4 },
3140     { ISD::FSQRT,      MVT::f32,    28 }, // Pentium III from http://www.agner.org/
3141     { ISD::FSQRT,      MVT::v4f32,  56 }, // Pentium III from http://www.agner.org/
3142   };
3143   static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
3144     { ISD::CTTZ,       MVT::i64,     1 },
3145   };
3146   static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
3147     { ISD::CTTZ,       MVT::i32,     1 },
3148     { ISD::CTTZ,       MVT::i16,     1 },
3149     { ISD::CTTZ,       MVT::i8,      1 },
3150   };
3151   static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
3152     { ISD::CTLZ,       MVT::i64,     1 },
3153   };
3154   static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
3155     { ISD::CTLZ,       MVT::i32,     1 },
3156     { ISD::CTLZ,       MVT::i16,     1 },
3157     { ISD::CTLZ,       MVT::i8,      1 },
3158   };
3159   static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
3160     { ISD::CTPOP,      MVT::i64,     1 },
3161   };
3162   static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
3163     { ISD::CTPOP,      MVT::i32,     1 },
3164     { ISD::CTPOP,      MVT::i16,     1 },
3165     { ISD::CTPOP,      MVT::i8,      1 },
3166   };
3167   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3168     { ISD::ABS,        MVT::i64,     2 }, // SUB+CMOV
3169     { ISD::BITREVERSE, MVT::i64,    14 },
3170     { ISD::BSWAP,      MVT::i64,     1 },
3171     { ISD::CTLZ,       MVT::i64,     4 }, // BSR+XOR or BSR+XOR+CMOV
3172     { ISD::CTTZ,       MVT::i64,     3 }, // TEST+BSF+CMOV/BRANCH
3173     { ISD::CTPOP,      MVT::i64,    10 },
3174     { ISD::SADDO,      MVT::i64,     1 },
3175     { ISD::UADDO,      MVT::i64,     1 },
3176     { ISD::UMULO,      MVT::i64,     2 }, // mulq + seto
3177   };
3178   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3179     { ISD::ABS,        MVT::i32,     2 }, // SUB+CMOV
3180     { ISD::ABS,        MVT::i16,     2 }, // SUB+CMOV
3181     { ISD::BITREVERSE, MVT::i32,    14 },
3182     { ISD::BITREVERSE, MVT::i16,    14 },
3183     { ISD::BITREVERSE, MVT::i8,     11 },
3184     { ISD::BSWAP,      MVT::i32,     1 },
3185     { ISD::BSWAP,      MVT::i16,     1 }, // ROL
3186     { ISD::CTLZ,       MVT::i32,     4 }, // BSR+XOR or BSR+XOR+CMOV
3187     { ISD::CTLZ,       MVT::i16,     4 }, // BSR+XOR or BSR+XOR+CMOV
3188     { ISD::CTLZ,       MVT::i8,      4 }, // BSR+XOR or BSR+XOR+CMOV
3189     { ISD::CTTZ,       MVT::i32,     3 }, // TEST+BSF+CMOV/BRANCH
3190     { ISD::CTTZ,       MVT::i16,     3 }, // TEST+BSF+CMOV/BRANCH
3191     { ISD::CTTZ,       MVT::i8,      3 }, // TEST+BSF+CMOV/BRANCH
3192     { ISD::CTPOP,      MVT::i32,     8 },
3193     { ISD::CTPOP,      MVT::i16,     9 },
3194     { ISD::CTPOP,      MVT::i8,      7 },
3195     { ISD::SADDO,      MVT::i32,     1 },
3196     { ISD::SADDO,      MVT::i16,     1 },
3197     { ISD::SADDO,      MVT::i8,      1 },
3198     { ISD::UADDO,      MVT::i32,     1 },
3199     { ISD::UADDO,      MVT::i16,     1 },
3200     { ISD::UADDO,      MVT::i8,      1 },
3201     { ISD::UMULO,      MVT::i32,     2 }, // mul + seto
3202     { ISD::UMULO,      MVT::i16,     2 },
3203     { ISD::UMULO,      MVT::i8,      2 },
3204   };
3205 
3206   Type *RetTy = ICA.getReturnType();
3207   Type *OpTy = RetTy;
3208   Intrinsic::ID IID = ICA.getID();
3209   unsigned ISD = ISD::DELETED_NODE;
3210   switch (IID) {
3211   default:
3212     break;
3213   case Intrinsic::abs:
3214     ISD = ISD::ABS;
3215     break;
3216   case Intrinsic::bitreverse:
3217     ISD = ISD::BITREVERSE;
3218     break;
3219   case Intrinsic::bswap:
3220     ISD = ISD::BSWAP;
3221     break;
3222   case Intrinsic::ctlz:
3223     ISD = ISD::CTLZ;
3224     break;
3225   case Intrinsic::ctpop:
3226     ISD = ISD::CTPOP;
3227     break;
3228   case Intrinsic::cttz:
3229     ISD = ISD::CTTZ;
3230     break;
3231   case Intrinsic::maxnum:
3232   case Intrinsic::minnum:
3233     // FMINNUM has same costs so don't duplicate.
3234     ISD = ISD::FMAXNUM;
3235     break;
3236   case Intrinsic::sadd_sat:
3237     ISD = ISD::SADDSAT;
3238     break;
3239   case Intrinsic::smax:
3240     ISD = ISD::SMAX;
3241     break;
3242   case Intrinsic::smin:
3243     ISD = ISD::SMIN;
3244     break;
3245   case Intrinsic::ssub_sat:
3246     ISD = ISD::SSUBSAT;
3247     break;
3248   case Intrinsic::uadd_sat:
3249     ISD = ISD::UADDSAT;
3250     break;
3251   case Intrinsic::umax:
3252     ISD = ISD::UMAX;
3253     break;
3254   case Intrinsic::umin:
3255     ISD = ISD::UMIN;
3256     break;
3257   case Intrinsic::usub_sat:
3258     ISD = ISD::USUBSAT;
3259     break;
3260   case Intrinsic::sqrt:
3261     ISD = ISD::FSQRT;
3262     break;
3263   case Intrinsic::sadd_with_overflow:
3264   case Intrinsic::ssub_with_overflow:
3265     // SSUBO has same costs so don't duplicate.
3266     ISD = ISD::SADDO;
3267     OpTy = RetTy->getContainedType(0);
3268     break;
3269   case Intrinsic::uadd_with_overflow:
3270   case Intrinsic::usub_with_overflow:
3271     // USUBO has same costs so don't duplicate.
3272     ISD = ISD::UADDO;
3273     OpTy = RetTy->getContainedType(0);
3274     break;
3275   case Intrinsic::umul_with_overflow:
3276   case Intrinsic::smul_with_overflow:
3277     // SMULO has same costs so don't duplicate.
3278     ISD = ISD::UMULO;
3279     OpTy = RetTy->getContainedType(0);
3280     break;
3281   }
3282 
3283   if (ISD != ISD::DELETED_NODE) {
3284     // Legalize the type.
3285     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
3286     MVT MTy = LT.second;
3287 
3288     // Attempt to lookup cost.
3289     if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
3290         MTy.isVector()) {
3291       // With PSHUFB the code is very similar for all types. If we have integer
3292       // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
3293       // we also need a PSHUFB.
3294       unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
3295 
3296       // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
3297       // instructions. We also need an extract and an insert.
3298       if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
3299             (ST->hasBWI() && MTy.is512BitVector())))
3300         Cost = Cost * 2 + 2;
3301 
3302       return LT.first * Cost;
3303     }
3304 
3305     auto adjustTableCost = [](const CostTblEntry &Entry,
3306                               InstructionCost LegalizationCost,
3307                               FastMathFlags FMF) {
3308       // If there are no NANs to deal with, then these are reduced to a
3309       // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
3310       // assume is used in the non-fast case.
3311       if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
3312         if (FMF.noNaNs())
3313           return LegalizationCost * 1;
3314       }
3315       return LegalizationCost * (int)Entry.Cost;
3316     };
3317 
3318     if (ST->useGLMDivSqrtCosts())
3319       if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
3320         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3321 
3322     if (ST->useSLMArithCosts())
3323       if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3324         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3325 
3326     if (ST->hasBITALG())
3327       if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
3328         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3329 
3330     if (ST->hasVPOPCNTDQ())
3331       if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
3332         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3333 
3334     if (ST->hasCDI())
3335       if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
3336         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3337 
3338     if (ST->hasBWI())
3339       if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3340         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3341 
3342     if (ST->hasAVX512())
3343       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3344         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3345 
3346     if (ST->hasXOP())
3347       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3348         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3349 
3350     if (ST->hasAVX2())
3351       if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3352         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3353 
3354     if (ST->hasAVX())
3355       if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3356         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3357 
3358     if (ST->hasSSE42())
3359       if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3360         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3361 
3362     if (ST->hasSSE41())
3363       if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3364         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3365 
3366     if (ST->hasSSSE3())
3367       if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
3368         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3369 
3370     if (ST->hasSSE2())
3371       if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3372         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3373 
3374     if (ST->hasSSE1())
3375       if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3376         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3377 
3378     if (ST->hasBMI()) {
3379       if (ST->is64Bit())
3380         if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
3381           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3382 
3383       if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
3384         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3385     }
3386 
3387     if (ST->hasLZCNT()) {
3388       if (ST->is64Bit())
3389         if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
3390           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3391 
3392       if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
3393         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3394     }
3395 
3396     if (ST->hasPOPCNT()) {
3397       if (ST->is64Bit())
3398         if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
3399           return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3400 
3401       if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
3402         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3403     }
3404 
3405     if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
3406       if (const Instruction *II = ICA.getInst()) {
3407         if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
3408           return TTI::TCC_Free;
3409         if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
3410           if (LI->hasOneUse())
3411             return TTI::TCC_Free;
3412         }
3413       }
3414     }
3415 
3416     if (ST->is64Bit())
3417       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3418         return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3419 
3420     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3421       return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3422   }
3423 
3424   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3425 }
3426 
3427 InstructionCost
3428 X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3429                                   TTI::TargetCostKind CostKind) {
3430   if (ICA.isTypeBasedOnly())
3431     return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3432 
3433   static const CostTblEntry AVX512BWCostTbl[] = {
3434     { ISD::ROTL,       MVT::v32i16,  2 },
3435     { ISD::ROTL,       MVT::v16i16,  2 },
3436     { ISD::ROTL,       MVT::v8i16,   2 },
3437     { ISD::ROTL,       MVT::v64i8,   5 },
3438     { ISD::ROTL,       MVT::v32i8,   5 },
3439     { ISD::ROTL,       MVT::v16i8,   5 },
3440     { ISD::ROTR,       MVT::v32i16,  2 },
3441     { ISD::ROTR,       MVT::v16i16,  2 },
3442     { ISD::ROTR,       MVT::v8i16,   2 },
3443     { ISD::ROTR,       MVT::v64i8,   5 },
3444     { ISD::ROTR,       MVT::v32i8,   5 },
3445     { ISD::ROTR,       MVT::v16i8,   5 }
3446   };
3447   static const CostTblEntry AVX512CostTbl[] = {
3448     { ISD::ROTL,       MVT::v8i64,   1 },
3449     { ISD::ROTL,       MVT::v4i64,   1 },
3450     { ISD::ROTL,       MVT::v2i64,   1 },
3451     { ISD::ROTL,       MVT::v16i32,  1 },
3452     { ISD::ROTL,       MVT::v8i32,   1 },
3453     { ISD::ROTL,       MVT::v4i32,   1 },
3454     { ISD::ROTR,       MVT::v8i64,   1 },
3455     { ISD::ROTR,       MVT::v4i64,   1 },
3456     { ISD::ROTR,       MVT::v2i64,   1 },
3457     { ISD::ROTR,       MVT::v16i32,  1 },
3458     { ISD::ROTR,       MVT::v8i32,   1 },
3459     { ISD::ROTR,       MVT::v4i32,   1 }
3460   };
3461   // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3462   static const CostTblEntry XOPCostTbl[] = {
3463     { ISD::ROTL,       MVT::v4i64,   4 },
3464     { ISD::ROTL,       MVT::v8i32,   4 },
3465     { ISD::ROTL,       MVT::v16i16,  4 },
3466     { ISD::ROTL,       MVT::v32i8,   4 },
3467     { ISD::ROTL,       MVT::v2i64,   1 },
3468     { ISD::ROTL,       MVT::v4i32,   1 },
3469     { ISD::ROTL,       MVT::v8i16,   1 },
3470     { ISD::ROTL,       MVT::v16i8,   1 },
3471     { ISD::ROTR,       MVT::v4i64,   6 },
3472     { ISD::ROTR,       MVT::v8i32,   6 },
3473     { ISD::ROTR,       MVT::v16i16,  6 },
3474     { ISD::ROTR,       MVT::v32i8,   6 },
3475     { ISD::ROTR,       MVT::v2i64,   2 },
3476     { ISD::ROTR,       MVT::v4i32,   2 },
3477     { ISD::ROTR,       MVT::v8i16,   2 },
3478     { ISD::ROTR,       MVT::v16i8,   2 }
3479   };
3480   static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3481     { ISD::ROTL,       MVT::i64,     1 },
3482     { ISD::ROTR,       MVT::i64,     1 },
3483     { ISD::FSHL,       MVT::i64,     4 }
3484   };
3485   static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3486     { ISD::ROTL,       MVT::i32,     1 },
3487     { ISD::ROTL,       MVT::i16,     1 },
3488     { ISD::ROTL,       MVT::i8,      1 },
3489     { ISD::ROTR,       MVT::i32,     1 },
3490     { ISD::ROTR,       MVT::i16,     1 },
3491     { ISD::ROTR,       MVT::i8,      1 },
3492     { ISD::FSHL,       MVT::i32,     4 },
3493     { ISD::FSHL,       MVT::i16,     4 },
3494     { ISD::FSHL,       MVT::i8,      4 }
3495   };
3496 
3497   Intrinsic::ID IID = ICA.getID();
3498   Type *RetTy = ICA.getReturnType();
3499   const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3500   unsigned ISD = ISD::DELETED_NODE;
3501   switch (IID) {
3502   default:
3503     break;
3504   case Intrinsic::fshl:
3505     ISD = ISD::FSHL;
3506     if (Args[0] == Args[1])
3507       ISD = ISD::ROTL;
3508     break;
3509   case Intrinsic::fshr:
3510     // FSHR has same costs so don't duplicate.
3511     ISD = ISD::FSHL;
3512     if (Args[0] == Args[1])
3513       ISD = ISD::ROTR;
3514     break;
3515   }
3516 
3517   if (ISD != ISD::DELETED_NODE) {
3518     // Legalize the type.
3519     std::pair<InstructionCost, MVT> LT =
3520         TLI->getTypeLegalizationCost(DL, RetTy);
3521     MVT MTy = LT.second;
3522 
3523     // Attempt to lookup cost.
3524     if (ST->hasBWI())
3525       if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3526         return LT.first * Entry->Cost;
3527 
3528     if (ST->hasAVX512())
3529       if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3530         return LT.first * Entry->Cost;
3531 
3532     if (ST->hasXOP())
3533       if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3534         return LT.first * Entry->Cost;
3535 
3536     if (ST->is64Bit())
3537       if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3538         return LT.first * Entry->Cost;
3539 
3540     if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3541       return LT.first * Entry->Cost;
3542   }
3543 
3544   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3545 }
3546 
3547 InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3548                                                unsigned Index) {
3549   static const CostTblEntry SLMCostTbl[] = {
3550      { ISD::EXTRACT_VECTOR_ELT,       MVT::i8,      4 },
3551      { ISD::EXTRACT_VECTOR_ELT,       MVT::i16,     4 },
3552      { ISD::EXTRACT_VECTOR_ELT,       MVT::i32,     4 },
3553      { ISD::EXTRACT_VECTOR_ELT,       MVT::i64,     7 }
3554    };
3555 
3556   assert(Val->isVectorTy() && "This must be a vector type");
3557   Type *ScalarType = Val->getScalarType();
3558   int RegisterFileMoveCost = 0;
3559 
3560   // Non-immediate extraction/insertion can be handled as a sequence of
3561   // aliased loads+stores via the stack.
3562   if (Index == -1U && (Opcode == Instruction::ExtractElement ||
3563                        Opcode == Instruction::InsertElement)) {
3564     // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
3565     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
3566 
3567     // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
3568     assert(isa<FixedVectorType>(Val) && "Fixed vector type expected");
3569     Align VecAlign = DL.getPrefTypeAlign(Val);
3570     Align SclAlign = DL.getPrefTypeAlign(ScalarType);
3571 
3572     // Extract - store vector to stack, load scalar.
3573     if (Opcode == Instruction::ExtractElement) {
3574       return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3575                              TTI::TargetCostKind::TCK_RecipThroughput) +
3576              getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
3577                              TTI::TargetCostKind::TCK_RecipThroughput);
3578     }
3579     // Insert - store vector to stack, store scalar, load vector.
3580     if (Opcode == Instruction::InsertElement) {
3581       return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3582                              TTI::TargetCostKind::TCK_RecipThroughput) +
3583              getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
3584                              TTI::TargetCostKind::TCK_RecipThroughput) +
3585              getMemoryOpCost(Instruction::Load, Val, VecAlign, 0,
3586                              TTI::TargetCostKind::TCK_RecipThroughput);
3587     }
3588   }
3589 
3590   if (Index != -1U && (Opcode == Instruction::ExtractElement ||
3591                        Opcode == Instruction::InsertElement)) {
3592     // Legalize the type.
3593     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3594 
3595     // This type is legalized to a scalar type.
3596     if (!LT.second.isVector())
3597       return 0;
3598 
3599     // The type may be split. Normalize the index to the new type.
3600     unsigned SizeInBits = LT.second.getSizeInBits();
3601     unsigned NumElts = LT.second.getVectorNumElements();
3602     unsigned SubNumElts = NumElts;
3603     Index = Index % NumElts;
3604 
3605     // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3606     // For inserts, we also need to insert the subvector back.
3607     if (SizeInBits > 128) {
3608       assert((SizeInBits % 128) == 0 && "Illegal vector");
3609       unsigned NumSubVecs = SizeInBits / 128;
3610       SubNumElts = NumElts / NumSubVecs;
3611       if (SubNumElts <= Index) {
3612         RegisterFileMoveCost += (Opcode == Instruction::InsertElement ? 2 : 1);
3613         Index %= SubNumElts;
3614       }
3615     }
3616 
3617     if (Index == 0) {
3618       // Floating point scalars are already located in index #0.
3619       // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3620       // true for all.
3621       if (ScalarType->isFloatingPointTy())
3622         return RegisterFileMoveCost;
3623 
3624       // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3625       if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3626         return 1 + RegisterFileMoveCost;
3627     }
3628 
3629     int ISD = TLI->InstructionOpcodeToISD(Opcode);
3630     assert(ISD && "Unexpected vector opcode");
3631     MVT MScalarTy = LT.second.getScalarType();
3632     if (ST->useSLMArithCosts())
3633       if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3634         return Entry->Cost + RegisterFileMoveCost;
3635 
3636     // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3637     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3638         (MScalarTy.isInteger() && ST->hasSSE41()))
3639       return 1 + RegisterFileMoveCost;
3640 
3641     // Assume insertps is relatively cheap on all targets.
3642     if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3643         Opcode == Instruction::InsertElement)
3644       return 1 + RegisterFileMoveCost;
3645 
3646     // For extractions we just need to shuffle the element to index 0, which
3647     // should be very cheap (assume cost = 1). For insertions we need to shuffle
3648     // the elements to its destination. In both cases we must handle the
3649     // subvector move(s).
3650     // If the vector type is already less than 128-bits then don't reduce it.
3651     // TODO: Under what circumstances should we shuffle using the full width?
3652     InstructionCost ShuffleCost = 1;
3653     if (Opcode == Instruction::InsertElement) {
3654       auto *SubTy = cast<VectorType>(Val);
3655       EVT VT = TLI->getValueType(DL, Val);
3656       if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3657         SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3658       ShuffleCost =
3659           getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3660     }
3661     int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3662     return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3663   }
3664 
3665   // Add to the base cost if we know that the extracted element of a vector is
3666   // destined to be moved to and used in the integer register file.
3667   if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3668     RegisterFileMoveCost += 1;
3669 
3670   return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3671 }
3672 
3673 InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3674                                                      const APInt &DemandedElts,
3675                                                      bool Insert,
3676                                                      bool Extract) {
3677   InstructionCost Cost = 0;
3678 
3679   // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3680   // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3681   if (Insert) {
3682     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3683     MVT MScalarTy = LT.second.getScalarType();
3684     unsigned SizeInBits = LT.second.getSizeInBits();
3685 
3686     if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3687         (MScalarTy.isInteger() && ST->hasSSE41()) ||
3688         (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3689       // For types we can insert directly, insertion into 128-bit sub vectors is
3690       // cheap, followed by a cheap chain of concatenations.
3691       if (SizeInBits <= 128) {
3692         Cost +=
3693             BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3694       } else {
3695         // In each 128-lane, if at least one index is demanded but not all
3696         // indices are demanded and this 128-lane is not the first 128-lane of
3697         // the legalized-vector, then this 128-lane needs a extracti128; If in
3698         // each 128-lane, there is at least one demanded index, this 128-lane
3699         // needs a inserti128.
3700 
3701         // The following cases will help you build a better understanding:
3702         // Assume we insert several elements into a v8i32 vector in avx2,
3703         // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3704         // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3705         // inserti128.
3706         // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3707         const int CostValue = *LT.first.getValue();
3708         assert(CostValue >= 0 && "Negative cost!");
3709         unsigned Num128Lanes = SizeInBits / 128 * CostValue;
3710         unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3711         APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3712         unsigned Scale = NumElts / Num128Lanes;
3713         // We iterate each 128-lane, and check if we need a
3714         // extracti128/inserti128 for this 128-lane.
3715         for (unsigned I = 0; I < NumElts; I += Scale) {
3716           APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3717           APInt MaskedDE = Mask & WidenedDemandedElts;
3718           unsigned Population = MaskedDE.countPopulation();
3719           Cost += (Population > 0 && Population != Scale &&
3720                    I % LT.second.getVectorNumElements() != 0);
3721           Cost += Population > 0;
3722         }
3723         Cost += DemandedElts.countPopulation();
3724 
3725         // For vXf32 cases, insertion into the 0'th index in each v4f32
3726         // 128-bit vector is free.
3727         // NOTE: This assumes legalization widens vXf32 vectors.
3728         if (MScalarTy == MVT::f32)
3729           for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3730                i < e; i += 4)
3731             if (DemandedElts[i])
3732               Cost--;
3733       }
3734     } else if (LT.second.isVector()) {
3735       // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3736       // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3737       // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3738       // considered cheap.
3739       if (Ty->isIntOrIntVectorTy())
3740         Cost += DemandedElts.countPopulation();
3741 
3742       // Get the smaller of the legalized or original pow2-extended number of
3743       // vector elements, which represents the number of unpacks we'll end up
3744       // performing.
3745       unsigned NumElts = LT.second.getVectorNumElements();
3746       unsigned Pow2Elts =
3747           PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3748       Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3749     }
3750   }
3751 
3752   // TODO: Use default extraction for now, but we should investigate extending this
3753   // to handle repeated subvector extraction.
3754   if (Extract)
3755     Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3756 
3757   return Cost;
3758 }
3759 
3760 InstructionCost
3761 X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
3762                                       int VF, const APInt &DemandedDstElts,
3763                                       TTI::TargetCostKind CostKind) {
3764   const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy);
3765   // We don't differentiate element types here, only element bit width.
3766   EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits);
3767 
3768   auto bailout = [&]() {
3769     return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
3770                                             DemandedDstElts, CostKind);
3771   };
3772 
3773   // For now, only deal with AVX512 cases.
3774   if (!ST->hasAVX512())
3775     return bailout();
3776 
3777   // Do we have a native shuffle for this element type, or should we promote?
3778   unsigned PromEltTyBits = EltTyBits;
3779   switch (EltTyBits) {
3780   case 32:
3781   case 64:
3782     break; // AVX512F.
3783   case 16:
3784     if (!ST->hasBWI())
3785       PromEltTyBits = 32; // promote to i32, AVX512F.
3786     break;                // AVX512BW
3787   case 8:
3788     if (!ST->hasVBMI())
3789       PromEltTyBits = 32; // promote to i32, AVX512F.
3790     break;                // AVX512VBMI
3791   case 1:
3792     // There is no support for shuffling i1 elements. We *must* promote.
3793     if (ST->hasBWI()) {
3794       if (ST->hasVBMI())
3795         PromEltTyBits = 8; // promote to i8, AVX512VBMI.
3796       else
3797         PromEltTyBits = 16; // promote to i16, AVX512BW.
3798       break;
3799     }
3800     if (ST->hasDQI()) {
3801       PromEltTyBits = 32; // promote to i32, AVX512F.
3802       break;
3803     }
3804     return bailout();
3805   default:
3806     return bailout();
3807   }
3808   auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits);
3809 
3810   auto *SrcVecTy = FixedVectorType::get(EltTy, VF);
3811   auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF);
3812 
3813   int NumDstElements = VF * ReplicationFactor;
3814   auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements);
3815   auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements);
3816 
3817   // Legalize the types.
3818   MVT LegalSrcVecTy = TLI->getTypeLegalizationCost(DL, SrcVecTy).second;
3819   MVT LegalPromSrcVecTy = TLI->getTypeLegalizationCost(DL, PromSrcVecTy).second;
3820   MVT LegalPromDstVecTy = TLI->getTypeLegalizationCost(DL, PromDstVecTy).second;
3821   MVT LegalDstVecTy = TLI->getTypeLegalizationCost(DL, DstVecTy).second;
3822   // They should have legalized into vector types.
3823   if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() ||
3824       !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector())
3825     return bailout();
3826 
3827   if (PromEltTyBits != EltTyBits) {
3828     // If we have to perform the shuffle with wider elt type than our data type,
3829     // then we will first need to anyext (we don't care about the new bits)
3830     // the source elements, and then truncate Dst elements.
3831     InstructionCost PromotionCost;
3832     PromotionCost += getCastInstrCost(
3833         Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy,
3834         TargetTransformInfo::CastContextHint::None, CostKind);
3835     PromotionCost +=
3836         getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy,
3837                          /*Src=*/PromDstVecTy,
3838                          TargetTransformInfo::CastContextHint::None, CostKind);
3839     return PromotionCost + getReplicationShuffleCost(PromEltTy,
3840                                                      ReplicationFactor, VF,
3841                                                      DemandedDstElts, CostKind);
3842   }
3843 
3844   assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits &&
3845          LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() &&
3846          "We expect that the legalization doesn't affect the element width, "
3847          "doesn't coalesce/split elements.");
3848 
3849   unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements();
3850   unsigned NumDstVectors =
3851       divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec);
3852 
3853   auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec);
3854 
3855   // Not all the produced Dst elements may be demanded. In our case,
3856   // given that a single Dst vector is formed by a single shuffle,
3857   // if all elements that will form a single Dst vector aren't demanded,
3858   // then we won't need to do that shuffle, so adjust the cost accordingly.
3859   APInt DemandedDstVectors = APIntOps::ScaleBitMask(
3860       DemandedDstElts.zextOrSelf(NumDstVectors * NumEltsPerDstVec),
3861       NumDstVectors);
3862   unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation();
3863 
3864   InstructionCost SingleShuffleCost =
3865       getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy,
3866                      /*Mask=*/None, /*Index=*/0, /*SubTp=*/nullptr);
3867   return NumDstVectorsDemanded * SingleShuffleCost;
3868 }
3869 
3870 InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3871                                             MaybeAlign Alignment,
3872                                             unsigned AddressSpace,
3873                                             TTI::TargetCostKind CostKind,
3874                                             const Instruction *I) {
3875   // TODO: Handle other cost kinds.
3876   if (CostKind != TTI::TCK_RecipThroughput) {
3877     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3878       // Store instruction with index and scale costs 2 Uops.
3879       // Check the preceding GEP to identify non-const indices.
3880       if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3881         if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3882           return TTI::TCC_Basic * 2;
3883       }
3884     }
3885     return TTI::TCC_Basic;
3886   }
3887 
3888   assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
3889          "Invalid Opcode");
3890   // Type legalization can't handle structs
3891   if (TLI->getValueType(DL, Src, true) == MVT::Other)
3892     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3893                                   CostKind);
3894 
3895   // Legalize the type.
3896   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3897 
3898   auto *VTy = dyn_cast<FixedVectorType>(Src);
3899 
3900   // Handle the simple case of non-vectors.
3901   // NOTE: this assumes that legalization never creates vector from scalars!
3902   if (!VTy || !LT.second.isVector())
3903     // Each load/store unit costs 1.
3904     return LT.first * 1;
3905 
3906   bool IsLoad = Opcode == Instruction::Load;
3907 
3908   Type *EltTy = VTy->getElementType();
3909 
3910   const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3911 
3912   InstructionCost Cost = 0;
3913 
3914   // Source of truth: how many elements were there in the original IR vector?
3915   const unsigned SrcNumElt = VTy->getNumElements();
3916 
3917   // How far have we gotten?
3918   int NumEltRemaining = SrcNumElt;
3919   // Note that we intentionally capture by-reference, NumEltRemaining changes.
3920   auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3921 
3922   const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3923 
3924   // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3925   const unsigned XMMBits = 128;
3926   if (XMMBits % EltTyBits != 0)
3927     // Vector size must be a multiple of the element size. I.e. no padding.
3928     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3929                                   CostKind);
3930   const int NumEltPerXMM = XMMBits / EltTyBits;
3931 
3932   auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3933 
3934   for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3935        NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3936     // How many elements would a single op deal with at once?
3937     if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3938       // Vector size must be a multiple of the element size. I.e. no padding.
3939       return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3940                                     CostKind);
3941     int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3942 
3943     assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?");
3944     assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||
3945             (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&
3946            "Unless we haven't halved the op size yet, "
3947            "we have less than two op's sized units of work left.");
3948 
3949     auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3950                           ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3951                           : XMMVecTy;
3952 
3953     assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&
3954            "After halving sizes, the vector elt count is no longer a multiple "
3955            "of number of elements per operation?");
3956     auto *CoalescedVecTy =
3957         CurrNumEltPerOp == 1
3958             ? CurrVecTy
3959             : FixedVectorType::get(
3960                   IntegerType::get(Src->getContext(),
3961                                    EltTyBits * CurrNumEltPerOp),
3962                   CurrVecTy->getNumElements() / CurrNumEltPerOp);
3963     assert(DL.getTypeSizeInBits(CoalescedVecTy) ==
3964                DL.getTypeSizeInBits(CurrVecTy) &&
3965            "coalesciing elements doesn't change vector width.");
3966 
3967     while (NumEltRemaining > 0) {
3968       assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?");
3969 
3970       // Can we use this vector size, as per the remaining element count?
3971       // Iff the vector is naturally aligned, we can do a wide load regardless.
3972       if (NumEltRemaining < CurrNumEltPerOp &&
3973           (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3974           CurrOpSizeBytes != 1)
3975         break; // Try smalled vector size.
3976 
3977       bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3978 
3979       // If we have fully processed the previous reg, we need to replenish it.
3980       if (SubVecEltsLeft == 0) {
3981         SubVecEltsLeft += CurrVecTy->getNumElements();
3982         // And that's free only for the 0'th subvector of a legalized vector.
3983         if (!Is0thSubVec)
3984           Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3985                                         : TTI::ShuffleKind::SK_ExtractSubvector,
3986                                  VTy, None, NumEltDone(), CurrVecTy);
3987       }
3988 
3989       // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3990       // for smaller widths (32/16/8) we have to insert/extract them separately.
3991       // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3992       // but let's pretend that it is also true for 16/8 bit wide ops...)
3993       if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3994         int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3995         assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "");
3996         int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3997         APInt DemandedElts =
3998             APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3999                               CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
4000         assert(DemandedElts.countPopulation() == 1 && "Inserting single value");
4001         Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
4002                                          !IsLoad);
4003       }
4004 
4005       // This isn't exactly right. We're using slow unaligned 32-byte accesses
4006       // as a proxy for a double-pumped AVX memory interface such as on
4007       // Sandybridge.
4008       if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
4009         Cost += 2;
4010       else
4011         Cost += 1;
4012 
4013       SubVecEltsLeft -= CurrNumEltPerOp;
4014       NumEltRemaining -= CurrNumEltPerOp;
4015       Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
4016     }
4017   }
4018 
4019   assert(NumEltRemaining <= 0 && "Should have processed all the elements.");
4020 
4021   return Cost;
4022 }
4023 
4024 InstructionCost
4025 X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
4026                                   unsigned AddressSpace,
4027                                   TTI::TargetCostKind CostKind) {
4028   bool IsLoad = (Instruction::Load == Opcode);
4029   bool IsStore = (Instruction::Store == Opcode);
4030 
4031   auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
4032   if (!SrcVTy)
4033     // To calculate scalar take the regular cost, without mask
4034     return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
4035 
4036   unsigned NumElem = SrcVTy->getNumElements();
4037   auto *MaskTy =
4038       FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
4039   if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
4040       (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
4041     // Scalarization
4042     APInt DemandedElts = APInt::getAllOnes(NumElem);
4043     InstructionCost MaskSplitCost =
4044         getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4045     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4046         Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
4047         CmpInst::BAD_ICMP_PREDICATE, CostKind);
4048     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4049     InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
4050     InstructionCost ValueSplitCost =
4051         getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
4052     InstructionCost MemopCost =
4053         NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4054                                          Alignment, AddressSpace, CostKind);
4055     return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
4056   }
4057 
4058   // Legalize the type.
4059   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
4060   auto VT = TLI->getValueType(DL, SrcVTy);
4061   InstructionCost Cost = 0;
4062   if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
4063       LT.second.getVectorNumElements() == NumElem)
4064     // Promotion requires extend/truncate for data and a shuffle for mask.
4065     Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
4066             getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
4067 
4068   else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
4069     auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
4070                                            LT.second.getVectorNumElements());
4071     // Expanding requires fill mask with zeroes
4072     Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
4073   }
4074 
4075   // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
4076   if (!ST->hasAVX512())
4077     return Cost + LT.first * (IsLoad ? 2 : 8);
4078 
4079   // AVX-512 masked load/store is cheapper
4080   return Cost + LT.first;
4081 }
4082 
4083 InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
4084                                                       ScalarEvolution *SE,
4085                                                       const SCEV *Ptr) {
4086   // Address computations in vectorized code with non-consecutive addresses will
4087   // likely result in more instructions compared to scalar code where the
4088   // computation can more often be merged into the index mode. The resulting
4089   // extra micro-ops can significantly decrease throughput.
4090   const unsigned NumVectorInstToHideOverhead = 10;
4091 
4092   // Cost modeling of Strided Access Computation is hidden by the indexing
4093   // modes of X86 regardless of the stride value. We dont believe that there
4094   // is a difference between constant strided access in gerenal and constant
4095   // strided value which is less than or equal to 64.
4096   // Even in the case of (loop invariant) stride whose value is not known at
4097   // compile time, the address computation will not incur more than one extra
4098   // ADD instruction.
4099   if (Ty->isVectorTy() && SE && !ST->hasAVX2()) {
4100     // TODO: AVX2 is the current cut-off because we don't have correct
4101     //       interleaving costs for prior ISA's.
4102     if (!BaseT::isStridedAccess(Ptr))
4103       return NumVectorInstToHideOverhead;
4104     if (!BaseT::getConstantStrideStep(SE, Ptr))
4105       return 1;
4106   }
4107 
4108   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
4109 }
4110 
4111 InstructionCost
4112 X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
4113                                        Optional<FastMathFlags> FMF,
4114                                        TTI::TargetCostKind CostKind) {
4115   if (TTI::requiresOrderedReduction(FMF))
4116     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
4117 
4118   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4119   // and make it as the cost.
4120 
4121   static const CostTblEntry SLMCostTblNoPairWise[] = {
4122     { ISD::FADD,  MVT::v2f64,   3 },
4123     { ISD::ADD,   MVT::v2i64,   5 },
4124   };
4125 
4126   static const CostTblEntry SSE2CostTblNoPairWise[] = {
4127     { ISD::FADD,  MVT::v2f64,   2 },
4128     { ISD::FADD,  MVT::v2f32,   2 },
4129     { ISD::FADD,  MVT::v4f32,   4 },
4130     { ISD::ADD,   MVT::v2i64,   2 },      // The data reported by the IACA tool is "1.6".
4131     { ISD::ADD,   MVT::v2i32,   2 }, // FIXME: chosen to be less than v4i32
4132     { ISD::ADD,   MVT::v4i32,   3 },      // The data reported by the IACA tool is "3.3".
4133     { ISD::ADD,   MVT::v2i16,   2 },      // The data reported by the IACA tool is "4.3".
4134     { ISD::ADD,   MVT::v4i16,   3 },      // The data reported by the IACA tool is "4.3".
4135     { ISD::ADD,   MVT::v8i16,   4 },      // The data reported by the IACA tool is "4.3".
4136     { ISD::ADD,   MVT::v2i8,    2 },
4137     { ISD::ADD,   MVT::v4i8,    2 },
4138     { ISD::ADD,   MVT::v8i8,    2 },
4139     { ISD::ADD,   MVT::v16i8,   3 },
4140   };
4141 
4142   static const CostTblEntry AVX1CostTblNoPairWise[] = {
4143     { ISD::FADD,  MVT::v4f64,   3 },
4144     { ISD::FADD,  MVT::v4f32,   3 },
4145     { ISD::FADD,  MVT::v8f32,   4 },
4146     { ISD::ADD,   MVT::v2i64,   1 },      // The data reported by the IACA tool is "1.5".
4147     { ISD::ADD,   MVT::v4i64,   3 },
4148     { ISD::ADD,   MVT::v8i32,   5 },
4149     { ISD::ADD,   MVT::v16i16,  5 },
4150     { ISD::ADD,   MVT::v32i8,   4 },
4151   };
4152 
4153   int ISD = TLI->InstructionOpcodeToISD(Opcode);
4154   assert(ISD && "Invalid opcode");
4155 
4156   // Before legalizing the type, give a chance to look up illegal narrow types
4157   // in the table.
4158   // FIXME: Is there a better way to do this?
4159   EVT VT = TLI->getValueType(DL, ValTy);
4160   if (VT.isSimple()) {
4161     MVT MTy = VT.getSimpleVT();
4162     if (ST->useSLMArithCosts())
4163       if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4164         return Entry->Cost;
4165 
4166     if (ST->hasAVX())
4167       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4168         return Entry->Cost;
4169 
4170     if (ST->hasSSE2())
4171       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4172         return Entry->Cost;
4173   }
4174 
4175   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4176 
4177   MVT MTy = LT.second;
4178 
4179   auto *ValVTy = cast<FixedVectorType>(ValTy);
4180 
4181   // Special case: vXi8 mul reductions are performed as vXi16.
4182   if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
4183     auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
4184     auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
4185     return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
4186                             TargetTransformInfo::CastContextHint::None,
4187                             CostKind) +
4188            getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
4189   }
4190 
4191   InstructionCost ArithmeticCost = 0;
4192   if (LT.first != 1 && MTy.isVector() &&
4193       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4194     // Type needs to be split. We need LT.first - 1 arithmetic ops.
4195     auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4196                                             MTy.getVectorNumElements());
4197     ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4198     ArithmeticCost *= LT.first - 1;
4199   }
4200 
4201   if (ST->useSLMArithCosts())
4202     if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4203       return ArithmeticCost + Entry->Cost;
4204 
4205   if (ST->hasAVX())
4206     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4207       return ArithmeticCost + Entry->Cost;
4208 
4209   if (ST->hasSSE2())
4210     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4211       return ArithmeticCost + Entry->Cost;
4212 
4213   // FIXME: These assume a naive kshift+binop lowering, which is probably
4214   // conservative in most cases.
4215   static const CostTblEntry AVX512BoolReduction[] = {
4216     { ISD::AND,  MVT::v2i1,   3 },
4217     { ISD::AND,  MVT::v4i1,   5 },
4218     { ISD::AND,  MVT::v8i1,   7 },
4219     { ISD::AND,  MVT::v16i1,  9 },
4220     { ISD::AND,  MVT::v32i1, 11 },
4221     { ISD::AND,  MVT::v64i1, 13 },
4222     { ISD::OR,   MVT::v2i1,   3 },
4223     { ISD::OR,   MVT::v4i1,   5 },
4224     { ISD::OR,   MVT::v8i1,   7 },
4225     { ISD::OR,   MVT::v16i1,  9 },
4226     { ISD::OR,   MVT::v32i1, 11 },
4227     { ISD::OR,   MVT::v64i1, 13 },
4228   };
4229 
4230   static const CostTblEntry AVX2BoolReduction[] = {
4231     { ISD::AND,  MVT::v16i16,  2 }, // vpmovmskb + cmp
4232     { ISD::AND,  MVT::v32i8,   2 }, // vpmovmskb + cmp
4233     { ISD::OR,   MVT::v16i16,  2 }, // vpmovmskb + cmp
4234     { ISD::OR,   MVT::v32i8,   2 }, // vpmovmskb + cmp
4235   };
4236 
4237   static const CostTblEntry AVX1BoolReduction[] = {
4238     { ISD::AND,  MVT::v4i64,   2 }, // vmovmskpd + cmp
4239     { ISD::AND,  MVT::v8i32,   2 }, // vmovmskps + cmp
4240     { ISD::AND,  MVT::v16i16,  4 }, // vextractf128 + vpand + vpmovmskb + cmp
4241     { ISD::AND,  MVT::v32i8,   4 }, // vextractf128 + vpand + vpmovmskb + cmp
4242     { ISD::OR,   MVT::v4i64,   2 }, // vmovmskpd + cmp
4243     { ISD::OR,   MVT::v8i32,   2 }, // vmovmskps + cmp
4244     { ISD::OR,   MVT::v16i16,  4 }, // vextractf128 + vpor + vpmovmskb + cmp
4245     { ISD::OR,   MVT::v32i8,   4 }, // vextractf128 + vpor + vpmovmskb + cmp
4246   };
4247 
4248   static const CostTblEntry SSE2BoolReduction[] = {
4249     { ISD::AND,  MVT::v2i64,   2 }, // movmskpd + cmp
4250     { ISD::AND,  MVT::v4i32,   2 }, // movmskps + cmp
4251     { ISD::AND,  MVT::v8i16,   2 }, // pmovmskb + cmp
4252     { ISD::AND,  MVT::v16i8,   2 }, // pmovmskb + cmp
4253     { ISD::OR,   MVT::v2i64,   2 }, // movmskpd + cmp
4254     { ISD::OR,   MVT::v4i32,   2 }, // movmskps + cmp
4255     { ISD::OR,   MVT::v8i16,   2 }, // pmovmskb + cmp
4256     { ISD::OR,   MVT::v16i8,   2 }, // pmovmskb + cmp
4257   };
4258 
4259   // Handle bool allof/anyof patterns.
4260   if (ValVTy->getElementType()->isIntegerTy(1)) {
4261     InstructionCost ArithmeticCost = 0;
4262     if (LT.first != 1 && MTy.isVector() &&
4263         MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4264       // Type needs to be split. We need LT.first - 1 arithmetic ops.
4265       auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4266                                               MTy.getVectorNumElements());
4267       ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4268       ArithmeticCost *= LT.first - 1;
4269     }
4270 
4271     if (ST->hasAVX512())
4272       if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
4273         return ArithmeticCost + Entry->Cost;
4274     if (ST->hasAVX2())
4275       if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
4276         return ArithmeticCost + Entry->Cost;
4277     if (ST->hasAVX())
4278       if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
4279         return ArithmeticCost + Entry->Cost;
4280     if (ST->hasSSE2())
4281       if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
4282         return ArithmeticCost + Entry->Cost;
4283 
4284     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4285   }
4286 
4287   unsigned NumVecElts = ValVTy->getNumElements();
4288   unsigned ScalarSize = ValVTy->getScalarSizeInBits();
4289 
4290   // Special case power of 2 reductions where the scalar type isn't changed
4291   // by type legalization.
4292   if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
4293     return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4294 
4295   InstructionCost ReductionCost = 0;
4296 
4297   auto *Ty = ValVTy;
4298   if (LT.first != 1 && MTy.isVector() &&
4299       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4300     // Type needs to be split. We need LT.first - 1 arithmetic ops.
4301     Ty = FixedVectorType::get(ValVTy->getElementType(),
4302                               MTy.getVectorNumElements());
4303     ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
4304     ReductionCost *= LT.first - 1;
4305     NumVecElts = MTy.getVectorNumElements();
4306   }
4307 
4308   // Now handle reduction with the legal type, taking into account size changes
4309   // at each level.
4310   while (NumVecElts > 1) {
4311     // Determine the size of the remaining vector we need to reduce.
4312     unsigned Size = NumVecElts * ScalarSize;
4313     NumVecElts /= 2;
4314     // If we're reducing from 256/512 bits, use an extract_subvector.
4315     if (Size > 128) {
4316       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4317       ReductionCost +=
4318           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4319       Ty = SubTy;
4320     } else if (Size == 128) {
4321       // Reducing from 128 bits is a permute of v2f64/v2i64.
4322       FixedVectorType *ShufTy;
4323       if (ValVTy->isFloatingPointTy())
4324         ShufTy =
4325             FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
4326       else
4327         ShufTy =
4328             FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
4329       ReductionCost +=
4330           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4331     } else if (Size == 64) {
4332       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4333       FixedVectorType *ShufTy;
4334       if (ValVTy->isFloatingPointTy())
4335         ShufTy =
4336             FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
4337       else
4338         ShufTy =
4339             FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
4340       ReductionCost +=
4341           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4342     } else {
4343       // Reducing from smaller size is a shift by immediate.
4344       auto *ShiftTy = FixedVectorType::get(
4345           Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
4346       ReductionCost += getArithmeticInstrCost(
4347           Instruction::LShr, ShiftTy, CostKind,
4348           TargetTransformInfo::OK_AnyValue,
4349           TargetTransformInfo::OK_UniformConstantValue,
4350           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4351     }
4352 
4353     // Add the arithmetic op for this level.
4354     ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
4355   }
4356 
4357   // Add the final extract element to the cost.
4358   return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4359 }
4360 
4361 InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
4362                                           bool IsUnsigned) {
4363   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
4364 
4365   MVT MTy = LT.second;
4366 
4367   int ISD;
4368   if (Ty->isIntOrIntVectorTy()) {
4369     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4370   } else {
4371     assert(Ty->isFPOrFPVectorTy() &&
4372            "Expected float point or integer vector type.");
4373     ISD = ISD::FMINNUM;
4374   }
4375 
4376   static const CostTblEntry SSE1CostTbl[] = {
4377     {ISD::FMINNUM, MVT::v4f32, 1},
4378   };
4379 
4380   static const CostTblEntry SSE2CostTbl[] = {
4381     {ISD::FMINNUM, MVT::v2f64, 1},
4382     {ISD::SMIN,    MVT::v8i16, 1},
4383     {ISD::UMIN,    MVT::v16i8, 1},
4384   };
4385 
4386   static const CostTblEntry SSE41CostTbl[] = {
4387     {ISD::SMIN,    MVT::v4i32, 1},
4388     {ISD::UMIN,    MVT::v4i32, 1},
4389     {ISD::UMIN,    MVT::v8i16, 1},
4390     {ISD::SMIN,    MVT::v16i8, 1},
4391   };
4392 
4393   static const CostTblEntry SSE42CostTbl[] = {
4394     {ISD::UMIN,    MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
4395   };
4396 
4397   static const CostTblEntry AVX1CostTbl[] = {
4398     {ISD::FMINNUM, MVT::v8f32,  1},
4399     {ISD::FMINNUM, MVT::v4f64,  1},
4400     {ISD::SMIN,    MVT::v8i32,  3},
4401     {ISD::UMIN,    MVT::v8i32,  3},
4402     {ISD::SMIN,    MVT::v16i16, 3},
4403     {ISD::UMIN,    MVT::v16i16, 3},
4404     {ISD::SMIN,    MVT::v32i8,  3},
4405     {ISD::UMIN,    MVT::v32i8,  3},
4406   };
4407 
4408   static const CostTblEntry AVX2CostTbl[] = {
4409     {ISD::SMIN,    MVT::v8i32,  1},
4410     {ISD::UMIN,    MVT::v8i32,  1},
4411     {ISD::SMIN,    MVT::v16i16, 1},
4412     {ISD::UMIN,    MVT::v16i16, 1},
4413     {ISD::SMIN,    MVT::v32i8,  1},
4414     {ISD::UMIN,    MVT::v32i8,  1},
4415   };
4416 
4417   static const CostTblEntry AVX512CostTbl[] = {
4418     {ISD::FMINNUM, MVT::v16f32, 1},
4419     {ISD::FMINNUM, MVT::v8f64,  1},
4420     {ISD::SMIN,    MVT::v2i64,  1},
4421     {ISD::UMIN,    MVT::v2i64,  1},
4422     {ISD::SMIN,    MVT::v4i64,  1},
4423     {ISD::UMIN,    MVT::v4i64,  1},
4424     {ISD::SMIN,    MVT::v8i64,  1},
4425     {ISD::UMIN,    MVT::v8i64,  1},
4426     {ISD::SMIN,    MVT::v16i32, 1},
4427     {ISD::UMIN,    MVT::v16i32, 1},
4428   };
4429 
4430   static const CostTblEntry AVX512BWCostTbl[] = {
4431     {ISD::SMIN,    MVT::v32i16, 1},
4432     {ISD::UMIN,    MVT::v32i16, 1},
4433     {ISD::SMIN,    MVT::v64i8,  1},
4434     {ISD::UMIN,    MVT::v64i8,  1},
4435   };
4436 
4437   // If we have a native MIN/MAX instruction for this type, use it.
4438   if (ST->hasBWI())
4439     if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4440       return LT.first * Entry->Cost;
4441 
4442   if (ST->hasAVX512())
4443     if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4444       return LT.first * Entry->Cost;
4445 
4446   if (ST->hasAVX2())
4447     if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4448       return LT.first * Entry->Cost;
4449 
4450   if (ST->hasAVX())
4451     if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4452       return LT.first * Entry->Cost;
4453 
4454   if (ST->hasSSE42())
4455     if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4456       return LT.first * Entry->Cost;
4457 
4458   if (ST->hasSSE41())
4459     if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4460       return LT.first * Entry->Cost;
4461 
4462   if (ST->hasSSE2())
4463     if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4464       return LT.first * Entry->Cost;
4465 
4466   if (ST->hasSSE1())
4467     if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4468       return LT.first * Entry->Cost;
4469 
4470   unsigned CmpOpcode;
4471   if (Ty->isFPOrFPVectorTy()) {
4472     CmpOpcode = Instruction::FCmp;
4473   } else {
4474     assert(Ty->isIntOrIntVectorTy() &&
4475            "expecting floating point or integer type for min/max reduction");
4476     CmpOpcode = Instruction::ICmp;
4477   }
4478 
4479   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4480   // Otherwise fall back to cmp+select.
4481   InstructionCost Result =
4482       getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
4483                          CostKind) +
4484       getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
4485                          CmpInst::BAD_ICMP_PREDICATE, CostKind);
4486   return Result;
4487 }
4488 
4489 InstructionCost
4490 X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
4491                                    bool IsUnsigned,
4492                                    TTI::TargetCostKind CostKind) {
4493   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4494 
4495   MVT MTy = LT.second;
4496 
4497   int ISD;
4498   if (ValTy->isIntOrIntVectorTy()) {
4499     ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4500   } else {
4501     assert(ValTy->isFPOrFPVectorTy() &&
4502            "Expected float point or integer vector type.");
4503     ISD = ISD::FMINNUM;
4504   }
4505 
4506   // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4507   // and make it as the cost.
4508 
4509   static const CostTblEntry SSE2CostTblNoPairWise[] = {
4510       {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
4511       {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
4512       {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
4513   };
4514 
4515   static const CostTblEntry SSE41CostTblNoPairWise[] = {
4516       {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
4517       {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
4518       {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
4519       {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
4520       {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
4521       {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
4522       {ISD::SMIN, MVT::v2i8,  3}, // pminsb
4523       {ISD::SMIN, MVT::v4i8,  5}, // pminsb
4524       {ISD::SMIN, MVT::v8i8,  7}, // pminsb
4525       {ISD::SMIN, MVT::v16i8, 6},
4526       {ISD::UMIN, MVT::v2i8,  3}, // same as sse2
4527       {ISD::UMIN, MVT::v4i8,  5}, // same as sse2
4528       {ISD::UMIN, MVT::v8i8,  7}, // same as sse2
4529       {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
4530   };
4531 
4532   static const CostTblEntry AVX1CostTblNoPairWise[] = {
4533       {ISD::SMIN, MVT::v16i16, 6},
4534       {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
4535       {ISD::SMIN, MVT::v32i8, 8},
4536       {ISD::UMIN, MVT::v32i8, 8},
4537   };
4538 
4539   static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
4540       {ISD::SMIN, MVT::v32i16, 8},
4541       {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
4542       {ISD::SMIN, MVT::v64i8, 10},
4543       {ISD::UMIN, MVT::v64i8, 10},
4544   };
4545 
4546   // Before legalizing the type, give a chance to look up illegal narrow types
4547   // in the table.
4548   // FIXME: Is there a better way to do this?
4549   EVT VT = TLI->getValueType(DL, ValTy);
4550   if (VT.isSimple()) {
4551     MVT MTy = VT.getSimpleVT();
4552     if (ST->hasBWI())
4553       if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4554         return Entry->Cost;
4555 
4556     if (ST->hasAVX())
4557       if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4558         return Entry->Cost;
4559 
4560     if (ST->hasSSE41())
4561       if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4562         return Entry->Cost;
4563 
4564     if (ST->hasSSE2())
4565       if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4566         return Entry->Cost;
4567   }
4568 
4569   auto *ValVTy = cast<FixedVectorType>(ValTy);
4570   unsigned NumVecElts = ValVTy->getNumElements();
4571 
4572   auto *Ty = ValVTy;
4573   InstructionCost MinMaxCost = 0;
4574   if (LT.first != 1 && MTy.isVector() &&
4575       MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4576     // Type needs to be split. We need LT.first - 1 operations ops.
4577     Ty = FixedVectorType::get(ValVTy->getElementType(),
4578                               MTy.getVectorNumElements());
4579     auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
4580                                            MTy.getVectorNumElements());
4581     MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4582     MinMaxCost *= LT.first - 1;
4583     NumVecElts = MTy.getVectorNumElements();
4584   }
4585 
4586   if (ST->hasBWI())
4587     if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4588       return MinMaxCost + Entry->Cost;
4589 
4590   if (ST->hasAVX())
4591     if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4592       return MinMaxCost + Entry->Cost;
4593 
4594   if (ST->hasSSE41())
4595     if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4596       return MinMaxCost + Entry->Cost;
4597 
4598   if (ST->hasSSE2())
4599     if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4600       return MinMaxCost + Entry->Cost;
4601 
4602   unsigned ScalarSize = ValTy->getScalarSizeInBits();
4603 
4604   // Special case power of 2 reductions where the scalar type isn't changed
4605   // by type legalization.
4606   if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4607       ScalarSize != MTy.getScalarSizeInBits())
4608     return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
4609 
4610   // Now handle reduction with the legal type, taking into account size changes
4611   // at each level.
4612   while (NumVecElts > 1) {
4613     // Determine the size of the remaining vector we need to reduce.
4614     unsigned Size = NumVecElts * ScalarSize;
4615     NumVecElts /= 2;
4616     // If we're reducing from 256/512 bits, use an extract_subvector.
4617     if (Size > 128) {
4618       auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4619       MinMaxCost +=
4620           getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4621       Ty = SubTy;
4622     } else if (Size == 128) {
4623       // Reducing from 128 bits is a permute of v2f64/v2i64.
4624       VectorType *ShufTy;
4625       if (ValTy->isFloatingPointTy())
4626         ShufTy =
4627             FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4628       else
4629         ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4630       MinMaxCost +=
4631           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4632     } else if (Size == 64) {
4633       // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4634       FixedVectorType *ShufTy;
4635       if (ValTy->isFloatingPointTy())
4636         ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4637       else
4638         ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4639       MinMaxCost +=
4640           getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4641     } else {
4642       // Reducing from smaller size is a shift by immediate.
4643       auto *ShiftTy = FixedVectorType::get(
4644           Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4645       MinMaxCost += getArithmeticInstrCost(
4646           Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4647           TargetTransformInfo::OK_AnyValue,
4648           TargetTransformInfo::OK_UniformConstantValue,
4649           TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4650     }
4651 
4652     // Add the arithmetic op for this level.
4653     auto *SubCondTy =
4654         FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4655     MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4656   }
4657 
4658   // Add the final extract element to the cost.
4659   return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4660 }
4661 
4662 /// Calculate the cost of materializing a 64-bit value. This helper
4663 /// method might only calculate a fraction of a larger immediate. Therefore it
4664 /// is valid to return a cost of ZERO.
4665 InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4666   if (Val == 0)
4667     return TTI::TCC_Free;
4668 
4669   if (isInt<32>(Val))
4670     return TTI::TCC_Basic;
4671 
4672   return 2 * TTI::TCC_Basic;
4673 }
4674 
4675 InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4676                                           TTI::TargetCostKind CostKind) {
4677   assert(Ty->isIntegerTy());
4678 
4679   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4680   if (BitSize == 0)
4681     return ~0U;
4682 
4683   // Never hoist constants larger than 128bit, because this might lead to
4684   // incorrect code generation or assertions in codegen.
4685   // Fixme: Create a cost model for types larger than i128 once the codegen
4686   // issues have been fixed.
4687   if (BitSize > 128)
4688     return TTI::TCC_Free;
4689 
4690   if (Imm == 0)
4691     return TTI::TCC_Free;
4692 
4693   // Sign-extend all constants to a multiple of 64-bit.
4694   APInt ImmVal = Imm;
4695   if (BitSize % 64 != 0)
4696     ImmVal = Imm.sext(alignTo(BitSize, 64));
4697 
4698   // Split the constant into 64-bit chunks and calculate the cost for each
4699   // chunk.
4700   InstructionCost Cost = 0;
4701   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4702     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4703     int64_t Val = Tmp.getSExtValue();
4704     Cost += getIntImmCost(Val);
4705   }
4706   // We need at least one instruction to materialize the constant.
4707   return std::max<InstructionCost>(1, Cost);
4708 }
4709 
4710 InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4711                                               const APInt &Imm, Type *Ty,
4712                                               TTI::TargetCostKind CostKind,
4713                                               Instruction *Inst) {
4714   assert(Ty->isIntegerTy());
4715 
4716   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4717   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4718   // here, so that constant hoisting will ignore this constant.
4719   if (BitSize == 0)
4720     return TTI::TCC_Free;
4721 
4722   unsigned ImmIdx = ~0U;
4723   switch (Opcode) {
4724   default:
4725     return TTI::TCC_Free;
4726   case Instruction::GetElementPtr:
4727     // Always hoist the base address of a GetElementPtr. This prevents the
4728     // creation of new constants for every base constant that gets constant
4729     // folded with the offset.
4730     if (Idx == 0)
4731       return 2 * TTI::TCC_Basic;
4732     return TTI::TCC_Free;
4733   case Instruction::Store:
4734     ImmIdx = 0;
4735     break;
4736   case Instruction::ICmp:
4737     // This is an imperfect hack to prevent constant hoisting of
4738     // compares that might be trying to check if a 64-bit value fits in
4739     // 32-bits. The backend can optimize these cases using a right shift by 32.
4740     // Ideally we would check the compare predicate here. There also other
4741     // similar immediates the backend can use shifts for.
4742     if (Idx == 1 && Imm.getBitWidth() == 64) {
4743       uint64_t ImmVal = Imm.getZExtValue();
4744       if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4745         return TTI::TCC_Free;
4746     }
4747     ImmIdx = 1;
4748     break;
4749   case Instruction::And:
4750     // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4751     // by using a 32-bit operation with implicit zero extension. Detect such
4752     // immediates here as the normal path expects bit 31 to be sign extended.
4753     if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4754       return TTI::TCC_Free;
4755     ImmIdx = 1;
4756     break;
4757   case Instruction::Add:
4758   case Instruction::Sub:
4759     // For add/sub, we can use the opposite instruction for INT32_MIN.
4760     if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4761       return TTI::TCC_Free;
4762     ImmIdx = 1;
4763     break;
4764   case Instruction::UDiv:
4765   case Instruction::SDiv:
4766   case Instruction::URem:
4767   case Instruction::SRem:
4768     // Division by constant is typically expanded later into a different
4769     // instruction sequence. This completely changes the constants.
4770     // Report them as "free" to stop ConstantHoist from marking them as opaque.
4771     return TTI::TCC_Free;
4772   case Instruction::Mul:
4773   case Instruction::Or:
4774   case Instruction::Xor:
4775     ImmIdx = 1;
4776     break;
4777   // Always return TCC_Free for the shift value of a shift instruction.
4778   case Instruction::Shl:
4779   case Instruction::LShr:
4780   case Instruction::AShr:
4781     if (Idx == 1)
4782       return TTI::TCC_Free;
4783     break;
4784   case Instruction::Trunc:
4785   case Instruction::ZExt:
4786   case Instruction::SExt:
4787   case Instruction::IntToPtr:
4788   case Instruction::PtrToInt:
4789   case Instruction::BitCast:
4790   case Instruction::PHI:
4791   case Instruction::Call:
4792   case Instruction::Select:
4793   case Instruction::Ret:
4794   case Instruction::Load:
4795     break;
4796   }
4797 
4798   if (Idx == ImmIdx) {
4799     int NumConstants = divideCeil(BitSize, 64);
4800     InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4801     return (Cost <= NumConstants * TTI::TCC_Basic)
4802                ? static_cast<int>(TTI::TCC_Free)
4803                : Cost;
4804   }
4805 
4806   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4807 }
4808 
4809 InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4810                                                 const APInt &Imm, Type *Ty,
4811                                                 TTI::TargetCostKind CostKind) {
4812   assert(Ty->isIntegerTy());
4813 
4814   unsigned BitSize = Ty->getPrimitiveSizeInBits();
4815   // There is no cost model for constants with a bit size of 0. Return TCC_Free
4816   // here, so that constant hoisting will ignore this constant.
4817   if (BitSize == 0)
4818     return TTI::TCC_Free;
4819 
4820   switch (IID) {
4821   default:
4822     return TTI::TCC_Free;
4823   case Intrinsic::sadd_with_overflow:
4824   case Intrinsic::uadd_with_overflow:
4825   case Intrinsic::ssub_with_overflow:
4826   case Intrinsic::usub_with_overflow:
4827   case Intrinsic::smul_with_overflow:
4828   case Intrinsic::umul_with_overflow:
4829     if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4830       return TTI::TCC_Free;
4831     break;
4832   case Intrinsic::experimental_stackmap:
4833     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4834       return TTI::TCC_Free;
4835     break;
4836   case Intrinsic::experimental_patchpoint_void:
4837   case Intrinsic::experimental_patchpoint_i64:
4838     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4839       return TTI::TCC_Free;
4840     break;
4841   }
4842   return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4843 }
4844 
4845 InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4846                                            TTI::TargetCostKind CostKind,
4847                                            const Instruction *I) {
4848   if (CostKind != TTI::TCK_RecipThroughput)
4849     return Opcode == Instruction::PHI ? 0 : 1;
4850   // Branches are assumed to be predicted.
4851   return 0;
4852 }
4853 
4854 int X86TTIImpl::getGatherOverhead() const {
4855   // Some CPUs have more overhead for gather. The specified overhead is relative
4856   // to the Load operation. "2" is the number provided by Intel architects. This
4857   // parameter is used for cost estimation of Gather Op and comparison with
4858   // other alternatives.
4859   // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4860   // enable gather with a -march.
4861   if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4862     return 2;
4863 
4864   return 1024;
4865 }
4866 
4867 int X86TTIImpl::getScatterOverhead() const {
4868   if (ST->hasAVX512())
4869     return 2;
4870 
4871   return 1024;
4872 }
4873 
4874 // Return an average cost of Gather / Scatter instruction, maybe improved later.
4875 // FIXME: Add TargetCostKind support.
4876 InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4877                                             const Value *Ptr, Align Alignment,
4878                                             unsigned AddressSpace) {
4879 
4880   assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
4881   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4882 
4883   // Try to reduce index size from 64 bit (default for GEP)
4884   // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4885   // operation will use 16 x 64 indices which do not fit in a zmm and needs
4886   // to split. Also check that the base pointer is the same for all lanes,
4887   // and that there's at most one variable index.
4888   auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4889     unsigned IndexSize = DL.getPointerSizeInBits();
4890     const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4891     if (IndexSize < 64 || !GEP)
4892       return IndexSize;
4893 
4894     unsigned NumOfVarIndices = 0;
4895     const Value *Ptrs = GEP->getPointerOperand();
4896     if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4897       return IndexSize;
4898     for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4899       if (isa<Constant>(GEP->getOperand(i)))
4900         continue;
4901       Type *IndxTy = GEP->getOperand(i)->getType();
4902       if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4903         IndxTy = IndexVTy->getElementType();
4904       if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4905           !isa<SExtInst>(GEP->getOperand(i))) ||
4906          ++NumOfVarIndices > 1)
4907         return IndexSize; // 64
4908     }
4909     return (unsigned)32;
4910   };
4911 
4912   // Trying to reduce IndexSize to 32 bits for vector 16.
4913   // By default the IndexSize is equal to pointer size.
4914   unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4915                            ? getIndexSizeInBits(Ptr, DL)
4916                            : DL.getPointerSizeInBits();
4917 
4918   auto *IndexVTy = FixedVectorType::get(
4919       IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4920   std::pair<InstructionCost, MVT> IdxsLT =
4921       TLI->getTypeLegalizationCost(DL, IndexVTy);
4922   std::pair<InstructionCost, MVT> SrcLT =
4923       TLI->getTypeLegalizationCost(DL, SrcVTy);
4924   InstructionCost::CostType SplitFactor =
4925       *std::max(IdxsLT.first, SrcLT.first).getValue();
4926   if (SplitFactor > 1) {
4927     // Handle splitting of vector of pointers
4928     auto *SplitSrcTy =
4929         FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4930     return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4931                                          AddressSpace);
4932   }
4933 
4934   // The gather / scatter cost is given by Intel architects. It is a rough
4935   // number since we are looking at one instruction in a time.
4936   const int GSOverhead = (Opcode == Instruction::Load)
4937                              ? getGatherOverhead()
4938                              : getScatterOverhead();
4939   return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4940                                            MaybeAlign(Alignment), AddressSpace,
4941                                            TTI::TCK_RecipThroughput);
4942 }
4943 
4944 /// Return the cost of full scalarization of gather / scatter operation.
4945 ///
4946 /// Opcode - Load or Store instruction.
4947 /// SrcVTy - The type of the data vector that should be gathered or scattered.
4948 /// VariableMask - The mask is non-constant at compile time.
4949 /// Alignment - Alignment for one element.
4950 /// AddressSpace - pointer[s] address space.
4951 ///
4952 /// FIXME: Add TargetCostKind support.
4953 InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4954                                             bool VariableMask, Align Alignment,
4955                                             unsigned AddressSpace) {
4956   Type *ScalarTy = SrcVTy->getScalarType();
4957   unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4958   APInt DemandedElts = APInt::getAllOnes(VF);
4959   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4960 
4961   InstructionCost MaskUnpackCost = 0;
4962   if (VariableMask) {
4963     auto *MaskTy =
4964         FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4965     MaskUnpackCost = getScalarizationOverhead(
4966         MaskTy, DemandedElts, /*Insert=*/false, /*Extract=*/true);
4967     InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4968         Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4969         CmpInst::BAD_ICMP_PREDICATE, CostKind);
4970     InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4971     MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4972   }
4973 
4974   InstructionCost AddressUnpackCost = getScalarizationOverhead(
4975       FixedVectorType::get(ScalarTy->getPointerTo(), VF), DemandedElts,
4976       /*Insert=*/false, /*Extract=*/true);
4977 
4978   // The cost of the scalar loads/stores.
4979   InstructionCost MemoryOpCost =
4980       VF * getMemoryOpCost(Opcode, ScalarTy, MaybeAlign(Alignment),
4981                            AddressSpace, CostKind);
4982 
4983   // The cost of forming the vector from loaded scalars/
4984   // scalarizing the vector to perform scalar stores.
4985   InstructionCost InsertExtractCost =
4986       getScalarizationOverhead(cast<FixedVectorType>(SrcVTy), DemandedElts,
4987                                /*Insert=*/Opcode == Instruction::Load,
4988                                /*Extract=*/Opcode == Instruction::Store);
4989 
4990   return AddressUnpackCost + MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4991 }
4992 
4993 /// Calculate the cost of Gather / Scatter operation
4994 InstructionCost X86TTIImpl::getGatherScatterOpCost(
4995     unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4996     Align Alignment, TTI::TargetCostKind CostKind,
4997     const Instruction *I = nullptr) {
4998   if (CostKind != TTI::TCK_RecipThroughput) {
4999     if ((Opcode == Instruction::Load &&
5000          isLegalMaskedGather(SrcVTy, Align(Alignment)) &&
5001          !forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
5002                                      Align(Alignment))) ||
5003         (Opcode == Instruction::Store &&
5004          isLegalMaskedScatter(SrcVTy, Align(Alignment)) &&
5005          !forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
5006                                       Align(Alignment))))
5007       return 1;
5008     return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
5009                                          Alignment, CostKind, I);
5010   }
5011 
5012   assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
5013   PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
5014   if (!PtrTy && Ptr->getType()->isVectorTy())
5015     PtrTy = dyn_cast<PointerType>(
5016         cast<VectorType>(Ptr->getType())->getElementType());
5017   assert(PtrTy && "Unexpected type for Ptr argument");
5018   unsigned AddressSpace = PtrTy->getAddressSpace();
5019 
5020   if ((Opcode == Instruction::Load &&
5021        (!isLegalMaskedGather(SrcVTy, Align(Alignment)) ||
5022         forceScalarizeMaskedGather(cast<VectorType>(SrcVTy),
5023                                    Align(Alignment)))) ||
5024       (Opcode == Instruction::Store &&
5025        (!isLegalMaskedScatter(SrcVTy, Align(Alignment)) ||
5026         forceScalarizeMaskedScatter(cast<VectorType>(SrcVTy),
5027                                     Align(Alignment)))))
5028     return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
5029                            AddressSpace);
5030 
5031   return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
5032 }
5033 
5034 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
5035                                TargetTransformInfo::LSRCost &C2) {
5036     // X86 specific here are "instruction number 1st priority".
5037     return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
5038                     C1.NumIVMuls, C1.NumBaseAdds,
5039                     C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
5040            std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
5041                     C2.NumIVMuls, C2.NumBaseAdds,
5042                     C2.ScaleCost, C2.ImmCost, C2.SetupCost);
5043 }
5044 
5045 bool X86TTIImpl::canMacroFuseCmp() {
5046   return ST->hasMacroFusion() || ST->hasBranchFusion();
5047 }
5048 
5049 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
5050   if (!ST->hasAVX())
5051     return false;
5052 
5053   // The backend can't handle a single element vector.
5054   if (isa<VectorType>(DataTy) &&
5055       cast<FixedVectorType>(DataTy)->getNumElements() == 1)
5056     return false;
5057   Type *ScalarTy = DataTy->getScalarType();
5058 
5059   if (ScalarTy->isPointerTy())
5060     return true;
5061 
5062   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5063     return true;
5064 
5065   if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16())
5066     return true;
5067 
5068   if (!ScalarTy->isIntegerTy())
5069     return false;
5070 
5071   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5072   return IntWidth == 32 || IntWidth == 64 ||
5073          ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
5074 }
5075 
5076 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
5077   return isLegalMaskedLoad(DataType, Alignment);
5078 }
5079 
5080 bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
5081   unsigned DataSize = DL.getTypeStoreSize(DataType);
5082   // The only supported nontemporal loads are for aligned vectors of 16 or 32
5083   // bytes.  Note that 32-byte nontemporal vector loads are supported by AVX2
5084   // (the equivalent stores only require AVX).
5085   if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
5086     return DataSize == 16 ?  ST->hasSSE1() : ST->hasAVX2();
5087 
5088   return false;
5089 }
5090 
5091 bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
5092   unsigned DataSize = DL.getTypeStoreSize(DataType);
5093 
5094   // SSE4A supports nontemporal stores of float and double at arbitrary
5095   // alignment.
5096   if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
5097     return true;
5098 
5099   // Besides the SSE4A subtarget exception above, only aligned stores are
5100   // available nontemporaly on any other subtarget.  And only stores with a size
5101   // of 4..32 bytes (powers of 2, only) are permitted.
5102   if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
5103       !isPowerOf2_32(DataSize))
5104     return false;
5105 
5106   // 32-byte vector nontemporal stores are supported by AVX (the equivalent
5107   // loads require AVX2).
5108   if (DataSize == 32)
5109     return ST->hasAVX();
5110   if (DataSize == 16)
5111     return ST->hasSSE1();
5112   return true;
5113 }
5114 
5115 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
5116   if (!isa<VectorType>(DataTy))
5117     return false;
5118 
5119   if (!ST->hasAVX512())
5120     return false;
5121 
5122   // The backend can't handle a single element vector.
5123   if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
5124     return false;
5125 
5126   Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
5127 
5128   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5129     return true;
5130 
5131   if (!ScalarTy->isIntegerTy())
5132     return false;
5133 
5134   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5135   return IntWidth == 32 || IntWidth == 64 ||
5136          ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
5137 }
5138 
5139 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
5140   return isLegalMaskedExpandLoad(DataTy);
5141 }
5142 
5143 bool X86TTIImpl::supportsGather() const {
5144   // Some CPUs have better gather performance than others.
5145   // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
5146   // enable gather with a -march.
5147   return ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2());
5148 }
5149 
5150 bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) {
5151   // Gather / Scatter for vector 2 is not profitable on KNL / SKX
5152   // Vector-4 of gather/scatter instruction does not exist on KNL. We can extend
5153   // it to 8 elements, but zeroing upper bits of the mask vector will add more
5154   // instructions. Right now we give the scalar cost of vector-4 for KNL. TODO:
5155   // Check, maybe the gather/scatter instruction is better in the VariableMask
5156   // case.
5157   unsigned NumElts = cast<FixedVectorType>(VTy)->getNumElements();
5158   return NumElts == 1 ||
5159          (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())));
5160 }
5161 
5162 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
5163   if (!supportsGather())
5164     return false;
5165   Type *ScalarTy = DataTy->getScalarType();
5166   if (ScalarTy->isPointerTy())
5167     return true;
5168 
5169   if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
5170     return true;
5171 
5172   if (!ScalarTy->isIntegerTy())
5173     return false;
5174 
5175   unsigned IntWidth = ScalarTy->getIntegerBitWidth();
5176   return IntWidth == 32 || IntWidth == 64;
5177 }
5178 
5179 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
5180   // AVX2 doesn't support scatter
5181   if (!ST->hasAVX512())
5182     return false;
5183   return isLegalMaskedGather(DataType, Alignment);
5184 }
5185 
5186 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
5187   EVT VT = TLI->getValueType(DL, DataType);
5188   return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
5189 }
5190 
5191 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
5192   return false;
5193 }
5194 
5195 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
5196                                      const Function *Callee) const {
5197   const TargetMachine &TM = getTLI()->getTargetMachine();
5198 
5199   // Work this as a subsetting of subtarget features.
5200   const FeatureBitset &CallerBits =
5201       TM.getSubtargetImpl(*Caller)->getFeatureBits();
5202   const FeatureBitset &CalleeBits =
5203       TM.getSubtargetImpl(*Callee)->getFeatureBits();
5204 
5205   // Check whether features are the same (apart from the ignore list).
5206   FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
5207   FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
5208   if (RealCallerBits == RealCalleeBits)
5209     return true;
5210 
5211   // If the features are a subset, we need to additionally check for calls
5212   // that may become ABI-incompatible as a result of inlining.
5213   if ((RealCallerBits & RealCalleeBits) != RealCalleeBits)
5214     return false;
5215 
5216   for (const Instruction &I : instructions(Callee)) {
5217     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5218       SmallVector<Type *, 8> Types;
5219       for (Value *Arg : CB->args())
5220         Types.push_back(Arg->getType());
5221       if (!CB->getType()->isVoidTy())
5222         Types.push_back(CB->getType());
5223 
5224       // Simple types are always ABI compatible.
5225       auto IsSimpleTy = [](Type *Ty) {
5226         return !Ty->isVectorTy() && !Ty->isAggregateType();
5227       };
5228       if (all_of(Types, IsSimpleTy))
5229         continue;
5230 
5231       if (Function *NestedCallee = CB->getCalledFunction()) {
5232         // Assume that intrinsics are always ABI compatible.
5233         if (NestedCallee->isIntrinsic())
5234           continue;
5235 
5236         // Do a precise compatibility check.
5237         if (!areTypesABICompatible(Caller, NestedCallee, Types))
5238           return false;
5239       } else {
5240         // We don't know the target features of the callee,
5241         // assume it is incompatible.
5242         return false;
5243       }
5244     }
5245   }
5246   return true;
5247 }
5248 
5249 bool X86TTIImpl::areTypesABICompatible(const Function *Caller,
5250                                        const Function *Callee,
5251                                        const ArrayRef<Type *> &Types) const {
5252   if (!BaseT::areTypesABICompatible(Caller, Callee, Types))
5253     return false;
5254 
5255   // If we get here, we know the target features match. If one function
5256   // considers 512-bit vectors legal and the other does not, consider them
5257   // incompatible.
5258   const TargetMachine &TM = getTLI()->getTargetMachine();
5259 
5260   if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
5261       TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
5262     return true;
5263 
5264   // Consider the arguments compatible if they aren't vectors or aggregates.
5265   // FIXME: Look at the size of vectors.
5266   // FIXME: Look at the element types of aggregates to see if there are vectors.
5267   return llvm::none_of(Types,
5268       [](Type *T) { return T->isVectorTy() || T->isAggregateType(); });
5269 }
5270 
5271 X86TTIImpl::TTI::MemCmpExpansionOptions
5272 X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
5273   TTI::MemCmpExpansionOptions Options;
5274   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
5275   Options.NumLoadsPerBlock = 2;
5276   // All GPR and vector loads can be unaligned.
5277   Options.AllowOverlappingLoads = true;
5278   if (IsZeroCmp) {
5279     // Only enable vector loads for equality comparison. Right now the vector
5280     // version is not as fast for three way compare (see #33329).
5281     const unsigned PreferredWidth = ST->getPreferVectorWidth();
5282     if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
5283     if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
5284     if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
5285   }
5286   if (ST->is64Bit()) {
5287     Options.LoadSizes.push_back(8);
5288   }
5289   Options.LoadSizes.push_back(4);
5290   Options.LoadSizes.push_back(2);
5291   Options.LoadSizes.push_back(1);
5292   return Options;
5293 }
5294 
5295 bool X86TTIImpl::prefersVectorizedAddressing() const {
5296   return supportsGather();
5297 }
5298 
5299 bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const {
5300   return false;
5301 }
5302 
5303 bool X86TTIImpl::enableInterleavedAccessVectorization() {
5304   // TODO: We expect this to be beneficial regardless of arch,
5305   // but there are currently some unexplained performance artifacts on Atom.
5306   // As a temporary solution, disable on Atom.
5307   return !(ST->isAtom());
5308 }
5309 
5310 // Get estimation for interleaved load/store operations and strided load.
5311 // \p Indices contains indices for strided load.
5312 // \p Factor - the factor of interleaving.
5313 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
5314 InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
5315     unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
5316     ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
5317     TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
5318   // VecTy for interleave memop is <VF*Factor x Elt>.
5319   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5320   // VecTy = <12 x i32>.
5321 
5322   // Calculate the number of memory operations (NumOfMemOps), required
5323   // for load/store the VecTy.
5324   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5325   unsigned VecTySize = DL.getTypeStoreSize(VecTy);
5326   unsigned LegalVTSize = LegalVT.getStoreSize();
5327   unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
5328 
5329   // Get the cost of one memory operation.
5330   auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
5331                                              LegalVT.getVectorNumElements());
5332   InstructionCost MemOpCost;
5333   bool UseMaskedMemOp = UseMaskForCond || UseMaskForGaps;
5334   if (UseMaskedMemOp)
5335     MemOpCost = getMaskedMemoryOpCost(Opcode, SingleMemOpTy, Alignment,
5336                                       AddressSpace, CostKind);
5337   else
5338     MemOpCost = getMemoryOpCost(Opcode, SingleMemOpTy, MaybeAlign(Alignment),
5339                                 AddressSpace, CostKind);
5340 
5341   unsigned VF = VecTy->getNumElements() / Factor;
5342   MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
5343 
5344   InstructionCost MaskCost;
5345   if (UseMaskedMemOp) {
5346     APInt DemandedLoadStoreElts = APInt::getZero(VecTy->getNumElements());
5347     for (unsigned Index : Indices) {
5348       assert(Index < Factor && "Invalid index for interleaved memory op");
5349       for (unsigned Elm = 0; Elm < VF; Elm++)
5350         DemandedLoadStoreElts.setBit(Index + Elm * Factor);
5351     }
5352 
5353     Type *I1Type = Type::getInt1Ty(VecTy->getContext());
5354 
5355     MaskCost = getReplicationShuffleCost(
5356         I1Type, Factor, VF,
5357         UseMaskForGaps ? DemandedLoadStoreElts
5358                        : APInt::getAllOnes(VecTy->getNumElements()),
5359         CostKind);
5360 
5361     // The Gaps mask is invariant and created outside the loop, therefore the
5362     // cost of creating it is not accounted for here. However if we have both
5363     // a MaskForGaps and some other mask that guards the execution of the
5364     // memory access, we need to account for the cost of And-ing the two masks
5365     // inside the loop.
5366     if (UseMaskForGaps) {
5367       auto *MaskVT = FixedVectorType::get(I1Type, VecTy->getNumElements());
5368       MaskCost += getArithmeticInstrCost(BinaryOperator::And, MaskVT, CostKind);
5369     }
5370   }
5371 
5372   if (Opcode == Instruction::Load) {
5373     // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
5374     // contain the cost of the optimized shuffle sequence that the
5375     // X86InterleavedAccess pass will generate.
5376     // The cost of loads and stores are computed separately from the table.
5377 
5378     // X86InterleavedAccess support only the following interleaved-access group.
5379     static const CostTblEntry AVX512InterleavedLoadTbl[] = {
5380         {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
5381         {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
5382         {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
5383     };
5384 
5385     if (const auto *Entry =
5386             CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
5387       return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
5388     //If an entry does not exist, fallback to the default implementation.
5389 
5390     // Kind of shuffle depends on number of loaded values.
5391     // If we load the entire data in one register, we can use a 1-src shuffle.
5392     // Otherwise, we'll merge 2 sources in each operation.
5393     TTI::ShuffleKind ShuffleKind =
5394         (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
5395 
5396     InstructionCost ShuffleCost =
5397         getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
5398 
5399     unsigned NumOfLoadsInInterleaveGrp =
5400         Indices.size() ? Indices.size() : Factor;
5401     auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
5402                                           VecTy->getNumElements() / Factor);
5403     InstructionCost NumOfResults =
5404         getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
5405         NumOfLoadsInInterleaveGrp;
5406 
5407     // About a half of the loads may be folded in shuffles when we have only
5408     // one result. If we have more than one result, or the loads are masked,
5409     // we do not fold loads at all.
5410     unsigned NumOfUnfoldedLoads =
5411         UseMaskedMemOp || NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
5412 
5413     // Get a number of shuffle operations per result.
5414     unsigned NumOfShufflesPerResult =
5415         std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
5416 
5417     // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5418     // When we have more than one destination, we need additional instructions
5419     // to keep sources.
5420     InstructionCost NumOfMoves = 0;
5421     if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
5422       NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
5423 
5424     InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
5425                            MaskCost + NumOfUnfoldedLoads * MemOpCost +
5426                            NumOfMoves;
5427 
5428     return Cost;
5429   }
5430 
5431   // Store.
5432   assert(Opcode == Instruction::Store &&
5433          "Expected Store Instruction at this  point");
5434   // X86InterleavedAccess support only the following interleaved-access group.
5435   static const CostTblEntry AVX512InterleavedStoreTbl[] = {
5436       {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
5437       {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
5438       {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
5439 
5440       {4, MVT::v8i8, 10},  // interleave 4 x 8i8  into 32i8  (and store)
5441       {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8  (and store)
5442       {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
5443       {4, MVT::v64i8, 24}  // interleave 4 x 32i8 into 256i8 (and store)
5444   };
5445 
5446   if (const auto *Entry =
5447           CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
5448     return MaskCost + NumOfMemOps * MemOpCost + Entry->Cost;
5449   //If an entry does not exist, fallback to the default implementation.
5450 
5451   // There is no strided stores meanwhile. And store can't be folded in
5452   // shuffle.
5453   unsigned NumOfSources = Factor; // The number of values to be merged.
5454   InstructionCost ShuffleCost =
5455       getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
5456   unsigned NumOfShufflesPerStore = NumOfSources - 1;
5457 
5458   // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5459   // We need additional instructions to keep sources.
5460   unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
5461   InstructionCost Cost =
5462       MaskCost +
5463       NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
5464       NumOfMoves;
5465   return Cost;
5466 }
5467 
5468 InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
5469     unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
5470     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
5471     bool UseMaskForCond, bool UseMaskForGaps) {
5472   auto *VecTy = cast<FixedVectorType>(BaseTy);
5473 
5474   auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) {
5475     Type *EltTy = cast<VectorType>(VecTy)->getElementType();
5476     if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
5477         EltTy->isIntegerTy(32) || EltTy->isPointerTy())
5478       return true;
5479     if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) ||
5480         (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy()))
5481       return HasBW;
5482     return false;
5483   };
5484   if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
5485     return getInterleavedMemoryOpCostAVX512(
5486         Opcode, VecTy, Factor, Indices, Alignment,
5487         AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5488 
5489   if (UseMaskForCond || UseMaskForGaps)
5490     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5491                                              Alignment, AddressSpace, CostKind,
5492                                              UseMaskForCond, UseMaskForGaps);
5493 
5494   // Get estimation for interleaved load/store operations for SSE-AVX2.
5495   // As opposed to AVX-512, SSE-AVX2 do not have generic shuffles that allow
5496   // computing the cost using a generic formula as a function of generic
5497   // shuffles. We therefore use a lookup table instead, filled according to
5498   // the instruction sequences that codegen currently generates.
5499 
5500   // VecTy for interleave memop is <VF*Factor x Elt>.
5501   // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5502   // VecTy = <12 x i32>.
5503   MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5504 
5505   // This function can be called with VecTy=<6xi128>, Factor=3, in which case
5506   // the VF=2, while v2i128 is an unsupported MVT vector type
5507   // (see MachineValueType.h::getVectorVT()).
5508   if (!LegalVT.isVector())
5509     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5510                                              Alignment, AddressSpace, CostKind);
5511 
5512   unsigned VF = VecTy->getNumElements() / Factor;
5513   Type *ScalarTy = VecTy->getElementType();
5514   // Deduplicate entries, model floats/pointers as appropriately-sized integers.
5515   if (!ScalarTy->isIntegerTy())
5516     ScalarTy =
5517         Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
5518 
5519   // Get the cost of all the memory operations.
5520   // FIXME: discount dead loads.
5521   InstructionCost MemOpCosts = getMemoryOpCost(
5522       Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5523 
5524   auto *VT = FixedVectorType::get(ScalarTy, VF);
5525   EVT ETy = TLI->getValueType(DL, VT);
5526   if (!ETy.isSimple())
5527     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5528                                              Alignment, AddressSpace, CostKind);
5529 
5530   // TODO: Complete for other data-types and strides.
5531   // Each combination of Stride, element bit width and VF results in a different
5532   // sequence; The cost tables are therefore accessed with:
5533   // Factor (stride) and VectorType=VFxiN.
5534   // The Cost accounts only for the shuffle sequence;
5535   // The cost of the loads/stores is accounted for separately.
5536   //
5537   static const CostTblEntry AVX2InterleavedLoadTbl[] = {
5538       {2, MVT::v2i8, 2},  // (load 4i8 and) deinterleave into 2 x 2i8
5539       {2, MVT::v4i8, 2},  // (load 8i8 and) deinterleave into 2 x 4i8
5540       {2, MVT::v8i8, 2},  // (load 16i8 and) deinterleave into 2 x 8i8
5541       {2, MVT::v16i8, 4}, // (load 32i8 and) deinterleave into 2 x 16i8
5542       {2, MVT::v32i8, 6}, // (load 64i8 and) deinterleave into 2 x 32i8
5543 
5544       {2, MVT::v8i16, 6},   // (load 16i16 and) deinterleave into 2 x 8i16
5545       {2, MVT::v16i16, 9},  // (load 32i16 and) deinterleave into 2 x 16i16
5546       {2, MVT::v32i16, 18}, // (load 64i16 and) deinterleave into 2 x 32i16
5547 
5548       {2, MVT::v8i32, 4},   // (load 16i32 and) deinterleave into 2 x 8i32
5549       {2, MVT::v16i32, 8},  // (load 32i32 and) deinterleave into 2 x 16i32
5550       {2, MVT::v32i32, 16}, // (load 64i32 and) deinterleave into 2 x 32i32
5551 
5552       {2, MVT::v4i64, 4},   // (load 8i64 and) deinterleave into 2 x 4i64
5553       {2, MVT::v8i64, 8},   // (load 16i64 and) deinterleave into 2 x 8i64
5554       {2, MVT::v16i64, 16}, // (load 32i64 and) deinterleave into 2 x 16i64
5555       {2, MVT::v32i64, 32}, // (load 64i64 and) deinterleave into 2 x 32i64
5556 
5557       {3, MVT::v2i8, 3},   // (load 6i8 and) deinterleave into 3 x 2i8
5558       {3, MVT::v4i8, 3},   // (load 12i8 and) deinterleave into 3 x 4i8
5559       {3, MVT::v8i8, 6},   // (load 24i8 and) deinterleave into 3 x 8i8
5560       {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
5561       {3, MVT::v32i8, 14}, // (load 96i8 and) deinterleave into 3 x 32i8
5562 
5563       {3, MVT::v2i16, 5},   // (load 6i16 and) deinterleave into 3 x 2i16
5564       {3, MVT::v4i16, 7},   // (load 12i16 and) deinterleave into 3 x 4i16
5565       {3, MVT::v8i16, 9},   // (load 24i16 and) deinterleave into 3 x 8i16
5566       {3, MVT::v16i16, 28}, // (load 48i16 and) deinterleave into 3 x 16i16
5567       {3, MVT::v32i16, 56}, // (load 96i16 and) deinterleave into 3 x 32i16
5568 
5569       {3, MVT::v2i32, 3},   // (load 6i32 and) deinterleave into 3 x 2i32
5570       {3, MVT::v4i32, 3},   // (load 12i32 and) deinterleave into 3 x 4i32
5571       {3, MVT::v8i32, 7},   // (load 24i32 and) deinterleave into 3 x 8i32
5572       {3, MVT::v16i32, 14}, // (load 48i32 and) deinterleave into 3 x 16i32
5573       {3, MVT::v32i32, 32}, // (load 96i32 and) deinterleave into 3 x 32i32
5574 
5575       {3, MVT::v2i64, 1},   // (load 6i64 and) deinterleave into 3 x 2i64
5576       {3, MVT::v4i64, 5},   // (load 12i64 and) deinterleave into 3 x 4i64
5577       {3, MVT::v8i64, 10},  // (load 24i64 and) deinterleave into 3 x 8i64
5578       {3, MVT::v16i64, 20}, // (load 48i64 and) deinterleave into 3 x 16i64
5579 
5580       {4, MVT::v2i8, 4},   // (load 8i8 and) deinterleave into 4 x 2i8
5581       {4, MVT::v4i8, 4},   // (load 16i8 and) deinterleave into 4 x 4i8
5582       {4, MVT::v8i8, 12},  // (load 32i8 and) deinterleave into 4 x 8i8
5583       {4, MVT::v16i8, 24}, // (load 64i8 and) deinterleave into 4 x 16i8
5584       {4, MVT::v32i8, 56}, // (load 128i8 and) deinterleave into 4 x 32i8
5585 
5586       {4, MVT::v2i16, 6},    // (load 8i16 and) deinterleave into 4 x 2i16
5587       {4, MVT::v4i16, 17},   // (load 16i16 and) deinterleave into 4 x 4i16
5588       {4, MVT::v8i16, 33},   // (load 32i16 and) deinterleave into 4 x 8i16
5589       {4, MVT::v16i16, 75},  // (load 64i16 and) deinterleave into 4 x 16i16
5590       {4, MVT::v32i16, 150}, // (load 128i16 and) deinterleave into 4 x 32i16
5591 
5592       {4, MVT::v2i32, 4},   // (load 8i32 and) deinterleave into 4 x 2i32
5593       {4, MVT::v4i32, 8},   // (load 16i32 and) deinterleave into 4 x 4i32
5594       {4, MVT::v8i32, 16},  // (load 32i32 and) deinterleave into 4 x 8i32
5595       {4, MVT::v16i32, 32}, // (load 64i32 and) deinterleave into 4 x 16i32
5596       {4, MVT::v32i32, 68}, // (load 128i32 and) deinterleave into 4 x 32i32
5597 
5598       {4, MVT::v2i64, 6},  // (load 8i64 and) deinterleave into 4 x 2i64
5599       {4, MVT::v4i64, 8},  // (load 16i64 and) deinterleave into 4 x 4i64
5600       {4, MVT::v8i64, 20}, // (load 32i64 and) deinterleave into 4 x 8i64
5601       {4, MVT::v16i64, 40}, // (load 64i64 and) deinterleave into 4 x 16i64
5602 
5603       {6, MVT::v2i8, 6},   // (load 12i8 and) deinterleave into 6 x 2i8
5604       {6, MVT::v4i8, 14},  // (load 24i8 and) deinterleave into 6 x 4i8
5605       {6, MVT::v8i8, 18},  // (load 48i8 and) deinterleave into 6 x 8i8
5606       {6, MVT::v16i8, 43}, // (load 96i8 and) deinterleave into 6 x 16i8
5607       {6, MVT::v32i8, 82}, // (load 192i8 and) deinterleave into 6 x 32i8
5608 
5609       {6, MVT::v2i16, 13},   // (load 12i16 and) deinterleave into 6 x 2i16
5610       {6, MVT::v4i16, 9},    // (load 24i16 and) deinterleave into 6 x 4i16
5611       {6, MVT::v8i16, 39},   // (load 48i16 and) deinterleave into 6 x 8i16
5612       {6, MVT::v16i16, 106}, // (load 96i16 and) deinterleave into 6 x 16i16
5613       {6, MVT::v32i16, 212}, // (load 192i16 and) deinterleave into 6 x 32i16
5614 
5615       {6, MVT::v2i32, 6},   // (load 12i32 and) deinterleave into 6 x 2i32
5616       {6, MVT::v4i32, 15},  // (load 24i32 and) deinterleave into 6 x 4i32
5617       {6, MVT::v8i32, 31},  // (load 48i32 and) deinterleave into 6 x 8i32
5618       {6, MVT::v16i32, 64}, // (load 96i32 and) deinterleave into 6 x 16i32
5619 
5620       {6, MVT::v2i64, 6},  // (load 12i64 and) deinterleave into 6 x 2i64
5621       {6, MVT::v4i64, 18}, // (load 24i64 and) deinterleave into 6 x 4i64
5622       {6, MVT::v8i64, 36}, // (load 48i64 and) deinterleave into 6 x 8i64
5623 
5624       {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
5625   };
5626 
5627   static const CostTblEntry SSSE3InterleavedLoadTbl[] = {
5628       {2, MVT::v4i16, 2},   // (load 8i16 and) deinterleave into 2 x 4i16
5629   };
5630 
5631   static const CostTblEntry SSE2InterleavedLoadTbl[] = {
5632       {2, MVT::v2i16, 2},   // (load 4i16 and) deinterleave into 2 x 2i16
5633       {2, MVT::v4i16, 7},   // (load 8i16 and) deinterleave into 2 x 4i16
5634 
5635       {2, MVT::v2i32, 2},   // (load 4i32 and) deinterleave into 2 x 2i32
5636       {2, MVT::v4i32, 2},   // (load 8i32 and) deinterleave into 2 x 4i32
5637 
5638       {2, MVT::v2i64, 2},   // (load 4i64 and) deinterleave into 2 x 2i64
5639   };
5640 
5641   static const CostTblEntry AVX2InterleavedStoreTbl[] = {
5642       {2, MVT::v16i8, 3}, // interleave 2 x 16i8 into 32i8 (and store)
5643       {2, MVT::v32i8, 4}, // interleave 2 x 32i8 into 64i8 (and store)
5644 
5645       {2, MVT::v8i16, 3},  // interleave 2 x 8i16 into 16i16 (and store)
5646       {2, MVT::v16i16, 4}, // interleave 2 x 16i16 into 32i16 (and store)
5647       {2, MVT::v32i16, 8}, // interleave 2 x 32i16 into 64i16 (and store)
5648 
5649       {2, MVT::v4i32, 2},   // interleave 2 x 4i32 into 8i32 (and store)
5650       {2, MVT::v8i32, 4},   // interleave 2 x 8i32 into 16i32 (and store)
5651       {2, MVT::v16i32, 8},  // interleave 2 x 16i32 into 32i32 (and store)
5652       {2, MVT::v32i32, 16}, // interleave 2 x 32i32 into 64i32 (and store)
5653 
5654       {2, MVT::v2i64, 2},   // interleave 2 x 2i64 into 4i64 (and store)
5655       {2, MVT::v4i64, 4},   // interleave 2 x 4i64 into 8i64 (and store)
5656       {2, MVT::v8i64, 8},   // interleave 2 x 8i64 into 16i64 (and store)
5657       {2, MVT::v16i64, 16}, // interleave 2 x 16i64 into 32i64 (and store)
5658       {2, MVT::v32i64, 32}, // interleave 2 x 32i64 into 64i64 (and store)
5659 
5660       {3, MVT::v2i8, 4},   // interleave 3 x 2i8 into 6i8 (and store)
5661       {3, MVT::v4i8, 4},   // interleave 3 x 4i8 into 12i8 (and store)
5662       {3, MVT::v8i8, 6},   // interleave 3 x 8i8 into 24i8 (and store)
5663       {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
5664       {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
5665 
5666       {3, MVT::v2i16, 4},   // interleave 3 x 2i16 into 6i16 (and store)
5667       {3, MVT::v4i16, 6},   // interleave 3 x 4i16 into 12i16 (and store)
5668       {3, MVT::v8i16, 12},  // interleave 3 x 8i16 into 24i16 (and store)
5669       {3, MVT::v16i16, 27}, // interleave 3 x 16i16 into 48i16 (and store)
5670       {3, MVT::v32i16, 54}, // interleave 3 x 32i16 into 96i16 (and store)
5671 
5672       {3, MVT::v2i32, 4},   // interleave 3 x 2i32 into 6i32 (and store)
5673       {3, MVT::v4i32, 5},   // interleave 3 x 4i32 into 12i32 (and store)
5674       {3, MVT::v8i32, 11},  // interleave 3 x 8i32 into 24i32 (and store)
5675       {3, MVT::v16i32, 22}, // interleave 3 x 16i32 into 48i32 (and store)
5676       {3, MVT::v32i32, 48}, // interleave 3 x 32i32 into 96i32 (and store)
5677 
5678       {3, MVT::v2i64, 4},   // interleave 3 x 2i64 into 6i64 (and store)
5679       {3, MVT::v4i64, 6},   // interleave 3 x 4i64 into 12i64 (and store)
5680       {3, MVT::v8i64, 12},  // interleave 3 x 8i64 into 24i64 (and store)
5681       {3, MVT::v16i64, 24}, // interleave 3 x 16i64 into 48i64 (and store)
5682 
5683       {4, MVT::v2i8, 4},   // interleave 4 x 2i8 into 8i8 (and store)
5684       {4, MVT::v4i8, 4},   // interleave 4 x 4i8 into 16i8 (and store)
5685       {4, MVT::v8i8, 4},   // interleave 4 x 8i8 into 32i8 (and store)
5686       {4, MVT::v16i8, 8},  // interleave 4 x 16i8 into 64i8 (and store)
5687       {4, MVT::v32i8, 12}, // interleave 4 x 32i8 into 128i8 (and store)
5688 
5689       {4, MVT::v2i16, 2},   // interleave 4 x 2i16 into 8i16 (and store)
5690       {4, MVT::v4i16, 6},   // interleave 4 x 4i16 into 16i16 (and store)
5691       {4, MVT::v8i16, 10},  // interleave 4 x 8i16 into 32i16 (and store)
5692       {4, MVT::v16i16, 32}, // interleave 4 x 16i16 into 64i16 (and store)
5693       {4, MVT::v32i16, 64}, // interleave 4 x 32i16 into 128i16 (and store)
5694 
5695       {4, MVT::v2i32, 5},   // interleave 4 x 2i32 into 8i32 (and store)
5696       {4, MVT::v4i32, 6},   // interleave 4 x 4i32 into 16i32 (and store)
5697       {4, MVT::v8i32, 16},  // interleave 4 x 8i32 into 32i32 (and store)
5698       {4, MVT::v16i32, 32}, // interleave 4 x 16i32 into 64i32 (and store)
5699       {4, MVT::v32i32, 64}, // interleave 4 x 32i32 into 128i32 (and store)
5700 
5701       {4, MVT::v2i64, 6},  // interleave 4 x 2i64 into 8i64 (and store)
5702       {4, MVT::v4i64, 8},  // interleave 4 x 4i64 into 16i64 (and store)
5703       {4, MVT::v8i64, 20}, // interleave 4 x 8i64 into 32i64 (and store)
5704       {4, MVT::v16i64, 40}, // interleave 4 x 16i64 into 64i64 (and store)
5705 
5706       {6, MVT::v2i8, 7},   // interleave 6 x 2i8 into 12i8 (and store)
5707       {6, MVT::v4i8, 9},   // interleave 6 x 4i8 into 24i8 (and store)
5708       {6, MVT::v8i8, 16},  // interleave 6 x 8i8 into 48i8 (and store)
5709       {6, MVT::v16i8, 27}, // interleave 6 x 16i8 into 96i8 (and store)
5710       {6, MVT::v32i8, 90}, // interleave 6 x 32i8 into 192i8 (and store)
5711 
5712       {6, MVT::v2i16, 10},  // interleave 6 x 2i16 into 12i16 (and store)
5713       {6, MVT::v4i16, 15},  // interleave 6 x 4i16 into 24i16 (and store)
5714       {6, MVT::v8i16, 21},  // interleave 6 x 8i16 into 48i16 (and store)
5715       {6, MVT::v16i16, 58}, // interleave 6 x 16i16 into 96i16 (and store)
5716       {6, MVT::v32i16, 90}, // interleave 6 x 32i16 into 192i16 (and store)
5717 
5718       {6, MVT::v2i32, 9},   // interleave 6 x 2i32 into 12i32 (and store)
5719       {6, MVT::v4i32, 12},  // interleave 6 x 4i32 into 24i32 (and store)
5720       {6, MVT::v8i32, 33},  // interleave 6 x 8i32 into 48i32 (and store)
5721       {6, MVT::v16i32, 66}, // interleave 6 x 16i32 into 96i32 (and store)
5722 
5723       {6, MVT::v2i64, 8},  // interleave 6 x 2i64 into 12i64 (and store)
5724       {6, MVT::v4i64, 15}, // interleave 6 x 4i64 into 24i64 (and store)
5725       {6, MVT::v8i64, 30}, // interleave 6 x 8i64 into 48i64 (and store)
5726   };
5727 
5728   static const CostTblEntry SSE2InterleavedStoreTbl[] = {
5729       {2, MVT::v2i8, 1},   // interleave 2 x 2i8 into 4i8 (and store)
5730       {2, MVT::v4i8, 1},   // interleave 2 x 4i8 into 8i8 (and store)
5731       {2, MVT::v8i8, 1},   // interleave 2 x 8i8 into 16i8 (and store)
5732 
5733       {2, MVT::v2i16, 1},  // interleave 2 x 2i16 into 4i16 (and store)
5734       {2, MVT::v4i16, 1},  // interleave 2 x 4i16 into 8i16 (and store)
5735 
5736       {2, MVT::v2i32, 1},  // interleave 2 x 2i32 into 4i32 (and store)
5737   };
5738 
5739   if (Opcode == Instruction::Load) {
5740     auto GetDiscountedCost = [Factor, NumMembers = Indices.size(),
5741                               MemOpCosts](const CostTblEntry *Entry) {
5742       // NOTE: this is just an approximation!
5743       //       It can over/under -estimate the cost!
5744       return MemOpCosts + divideCeil(NumMembers * Entry->Cost, Factor);
5745     };
5746 
5747     if (ST->hasAVX2())
5748       if (const auto *Entry = CostTableLookup(AVX2InterleavedLoadTbl, Factor,
5749                                               ETy.getSimpleVT()))
5750         return GetDiscountedCost(Entry);
5751 
5752     if (ST->hasSSSE3())
5753       if (const auto *Entry = CostTableLookup(SSSE3InterleavedLoadTbl, Factor,
5754                                               ETy.getSimpleVT()))
5755         return GetDiscountedCost(Entry);
5756 
5757     if (ST->hasSSE2())
5758       if (const auto *Entry = CostTableLookup(SSE2InterleavedLoadTbl, Factor,
5759                                               ETy.getSimpleVT()))
5760         return GetDiscountedCost(Entry);
5761   } else {
5762     assert(Opcode == Instruction::Store &&
5763            "Expected Store Instruction at this point");
5764     assert((!Indices.size() || Indices.size() == Factor) &&
5765            "Interleaved store only supports fully-interleaved groups.");
5766     if (ST->hasAVX2())
5767       if (const auto *Entry = CostTableLookup(AVX2InterleavedStoreTbl, Factor,
5768                                               ETy.getSimpleVT()))
5769         return MemOpCosts + Entry->Cost;
5770 
5771     if (ST->hasSSE2())
5772       if (const auto *Entry = CostTableLookup(SSE2InterleavedStoreTbl, Factor,
5773                                               ETy.getSimpleVT()))
5774         return MemOpCosts + Entry->Cost;
5775   }
5776 
5777   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5778                                            Alignment, AddressSpace, CostKind,
5779                                            UseMaskForCond, UseMaskForGaps);
5780 }
5781