1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/TargetTransformInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/TargetLowering.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/IntrinsicsAArch64.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/Debug.h"
21 #include <algorithm>
22 using namespace llvm;
23 using namespace llvm::PatternMatch;
24 
25 #define DEBUG_TYPE "aarch64tti"
26 
27 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
28                                                cl::init(true), cl::Hidden);
29 
30 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
31                                          const Function *Callee) const {
32   const TargetMachine &TM = getTLI()->getTargetMachine();
33 
34   const FeatureBitset &CallerBits =
35       TM.getSubtargetImpl(*Caller)->getFeatureBits();
36   const FeatureBitset &CalleeBits =
37       TM.getSubtargetImpl(*Callee)->getFeatureBits();
38 
39   // Inline a callee if its target-features are a subset of the callers
40   // target-features.
41   return (CallerBits & CalleeBits) == CalleeBits;
42 }
43 
44 /// Calculate the cost of materializing a 64-bit value. This helper
45 /// method might only calculate a fraction of a larger immediate. Therefore it
46 /// is valid to return a cost of ZERO.
47 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
48   // Check if the immediate can be encoded within an instruction.
49   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
50     return 0;
51 
52   if (Val < 0)
53     Val = ~Val;
54 
55   // Calculate how many moves we will need to materialize this constant.
56   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
57   AArch64_IMM::expandMOVImm(Val, 64, Insn);
58   return Insn.size();
59 }
60 
61 /// Calculate the cost of materializing the given constant.
62 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
63                                   TTI::TargetCostKind CostKind) {
64   assert(Ty->isIntegerTy());
65 
66   unsigned BitSize = Ty->getPrimitiveSizeInBits();
67   if (BitSize == 0)
68     return ~0U;
69 
70   // Sign-extend all constants to a multiple of 64-bit.
71   APInt ImmVal = Imm;
72   if (BitSize & 0x3f)
73     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
74 
75   // Split the constant into 64-bit chunks and calculate the cost for each
76   // chunk.
77   int Cost = 0;
78   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
79     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
80     int64_t Val = Tmp.getSExtValue();
81     Cost += getIntImmCost(Val);
82   }
83   // We need at least one instruction to materialze the constant.
84   return std::max(1, Cost);
85 }
86 
87 int AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
88                                       const APInt &Imm, Type *Ty,
89                                       TTI::TargetCostKind CostKind,
90                                       Instruction *Inst) {
91   assert(Ty->isIntegerTy());
92 
93   unsigned BitSize = Ty->getPrimitiveSizeInBits();
94   // There is no cost model for constants with a bit size of 0. Return TCC_Free
95   // here, so that constant hoisting will ignore this constant.
96   if (BitSize == 0)
97     return TTI::TCC_Free;
98 
99   unsigned ImmIdx = ~0U;
100   switch (Opcode) {
101   default:
102     return TTI::TCC_Free;
103   case Instruction::GetElementPtr:
104     // Always hoist the base address of a GetElementPtr.
105     if (Idx == 0)
106       return 2 * TTI::TCC_Basic;
107     return TTI::TCC_Free;
108   case Instruction::Store:
109     ImmIdx = 0;
110     break;
111   case Instruction::Add:
112   case Instruction::Sub:
113   case Instruction::Mul:
114   case Instruction::UDiv:
115   case Instruction::SDiv:
116   case Instruction::URem:
117   case Instruction::SRem:
118   case Instruction::And:
119   case Instruction::Or:
120   case Instruction::Xor:
121   case Instruction::ICmp:
122     ImmIdx = 1;
123     break;
124   // Always return TCC_Free for the shift value of a shift instruction.
125   case Instruction::Shl:
126   case Instruction::LShr:
127   case Instruction::AShr:
128     if (Idx == 1)
129       return TTI::TCC_Free;
130     break;
131   case Instruction::Trunc:
132   case Instruction::ZExt:
133   case Instruction::SExt:
134   case Instruction::IntToPtr:
135   case Instruction::PtrToInt:
136   case Instruction::BitCast:
137   case Instruction::PHI:
138   case Instruction::Call:
139   case Instruction::Select:
140   case Instruction::Ret:
141   case Instruction::Load:
142     break;
143   }
144 
145   if (Idx == ImmIdx) {
146     int NumConstants = (BitSize + 63) / 64;
147     int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
148     return (Cost <= NumConstants * TTI::TCC_Basic)
149                ? static_cast<int>(TTI::TCC_Free)
150                : Cost;
151   }
152   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
153 }
154 
155 int AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
156                                         const APInt &Imm, Type *Ty,
157                                         TTI::TargetCostKind CostKind) {
158   assert(Ty->isIntegerTy());
159 
160   unsigned BitSize = Ty->getPrimitiveSizeInBits();
161   // There is no cost model for constants with a bit size of 0. Return TCC_Free
162   // here, so that constant hoisting will ignore this constant.
163   if (BitSize == 0)
164     return TTI::TCC_Free;
165 
166   // Most (all?) AArch64 intrinsics do not support folding immediates into the
167   // selected instruction, so we compute the materialization cost for the
168   // immediate directly.
169   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
170     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
171 
172   switch (IID) {
173   default:
174     return TTI::TCC_Free;
175   case Intrinsic::sadd_with_overflow:
176   case Intrinsic::uadd_with_overflow:
177   case Intrinsic::ssub_with_overflow:
178   case Intrinsic::usub_with_overflow:
179   case Intrinsic::smul_with_overflow:
180   case Intrinsic::umul_with_overflow:
181     if (Idx == 1) {
182       int NumConstants = (BitSize + 63) / 64;
183       int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
184       return (Cost <= NumConstants * TTI::TCC_Basic)
185                  ? static_cast<int>(TTI::TCC_Free)
186                  : Cost;
187     }
188     break;
189   case Intrinsic::experimental_stackmap:
190     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
191       return TTI::TCC_Free;
192     break;
193   case Intrinsic::experimental_patchpoint_void:
194   case Intrinsic::experimental_patchpoint_i64:
195     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
196       return TTI::TCC_Free;
197     break;
198   case Intrinsic::experimental_gc_statepoint:
199     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
200       return TTI::TCC_Free;
201     break;
202   }
203   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
204 }
205 
206 TargetTransformInfo::PopcntSupportKind
207 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
208   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
209   if (TyWidth == 32 || TyWidth == 64)
210     return TTI::PSK_FastHardware;
211   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
212   return TTI::PSK_Software;
213 }
214 
215 InstructionCost
216 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
217                                       TTI::TargetCostKind CostKind) {
218   auto *RetTy = ICA.getReturnType();
219   switch (ICA.getID()) {
220   case Intrinsic::umin:
221   case Intrinsic::umax: {
222     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
223     // umin(x,y) -> sub(x,usubsat(x,y))
224     // umax(x,y) -> add(x,usubsat(y,x))
225     if (LT.second == MVT::v2i64)
226       return LT.first * 2;
227     LLVM_FALLTHROUGH;
228   }
229   case Intrinsic::smin:
230   case Intrinsic::smax: {
231     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
232                                         MVT::v8i16, MVT::v2i32, MVT::v4i32};
233     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
234     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
235       return LT.first;
236     break;
237   }
238   case Intrinsic::sadd_sat:
239   case Intrinsic::ssub_sat:
240   case Intrinsic::uadd_sat:
241   case Intrinsic::usub_sat: {
242     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
243                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
244                                      MVT::v2i64};
245     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
246     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
247     // need to extend the type, as it uses shr(qadd(shl, shl)).
248     unsigned Instrs =
249         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
250     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
251       return LT.first * Instrs;
252     break;
253   }
254   case Intrinsic::abs: {
255     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
256                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
257                                      MVT::v2i64};
258     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
259     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
260       return LT.first;
261     break;
262   }
263   case Intrinsic::experimental_stepvector: {
264     unsigned Cost = 1; // Cost of the `index' instruction
265     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
266     // Legalisation of illegal vectors involves an `index' instruction plus
267     // (LT.first - 1) vector adds.
268     if (LT.first > 1) {
269       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
270       unsigned AddCost =
271           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
272       Cost += AddCost * (LT.first - 1);
273     }
274     return Cost;
275   }
276   default:
277     break;
278   }
279   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
280 }
281 
282 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
283                                            ArrayRef<const Value *> Args) {
284 
285   // A helper that returns a vector type from the given type. The number of
286   // elements in type Ty determine the vector width.
287   auto toVectorTy = [&](Type *ArgTy) {
288     return VectorType::get(ArgTy->getScalarType(),
289                            cast<VectorType>(DstTy)->getElementCount());
290   };
291 
292   // Exit early if DstTy is not a vector type whose elements are at least
293   // 16-bits wide.
294   if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
295     return false;
296 
297   // Determine if the operation has a widening variant. We consider both the
298   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
299   // instructions.
300   //
301   // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
302   //       verify that their extending operands are eliminated during code
303   //       generation.
304   switch (Opcode) {
305   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
306   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
307     break;
308   default:
309     return false;
310   }
311 
312   // To be a widening instruction (either the "wide" or "long" versions), the
313   // second operand must be a sign- or zero extend having a single user. We
314   // only consider extends having a single user because they may otherwise not
315   // be eliminated.
316   if (Args.size() != 2 ||
317       (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
318       !Args[1]->hasOneUse())
319     return false;
320   auto *Extend = cast<CastInst>(Args[1]);
321 
322   // Legalize the destination type and ensure it can be used in a widening
323   // operation.
324   auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
325   unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
326   if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
327     return false;
328 
329   // Legalize the source type and ensure it can be used in a widening
330   // operation.
331   auto *SrcTy = toVectorTy(Extend->getSrcTy());
332   auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
333   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
334   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
335     return false;
336 
337   // Get the total number of vector elements in the legalized types.
338   unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorMinNumElements();
339   unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
340 
341   // Return true if the legalized types have the same number of vector elements
342   // and the destination element type size is twice that of the source type.
343   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
344 }
345 
346 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
347                                      TTI::CastContextHint CCH,
348                                      TTI::TargetCostKind CostKind,
349                                      const Instruction *I) {
350   int ISD = TLI->InstructionOpcodeToISD(Opcode);
351   assert(ISD && "Invalid opcode");
352 
353   // If the cast is observable, and it is used by a widening instruction (e.g.,
354   // uaddl, saddw, etc.), it may be free.
355   if (I && I->hasOneUse()) {
356     auto *SingleUser = cast<Instruction>(*I->user_begin());
357     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
358     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
359       // If the cast is the second operand, it is free. We will generate either
360       // a "wide" or "long" version of the widening instruction.
361       if (I == SingleUser->getOperand(1))
362         return 0;
363       // If the cast is not the second operand, it will be free if it looks the
364       // same as the second operand. In this case, we will generate a "long"
365       // version of the widening instruction.
366       if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
367         if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
368             cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
369           return 0;
370     }
371   }
372 
373   // TODO: Allow non-throughput costs that aren't binary.
374   auto AdjustCost = [&CostKind](int Cost) {
375     if (CostKind != TTI::TCK_RecipThroughput)
376       return Cost == 0 ? 0 : 1;
377     return Cost;
378   };
379 
380   EVT SrcTy = TLI->getValueType(DL, Src);
381   EVT DstTy = TLI->getValueType(DL, Dst);
382 
383   if (!SrcTy.isSimple() || !DstTy.isSimple())
384     return AdjustCost(
385         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
386 
387   static const TypeConversionCostTblEntry
388   ConversionTbl[] = {
389     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
390     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
391     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
392     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
393 
394     // Truncations on nxvmiN
395     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
396     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
397     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
398     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
399     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
400     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
401     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
402     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
403     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
404     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
405     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
406     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
407     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
408     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
409     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
410 
411     // The number of shll instructions for the extension.
412     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
413     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
414     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
415     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
416     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
417     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
418     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
419     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
420     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
421     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
422     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
423     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
424     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
425     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
426     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
427     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
428 
429     // LowerVectorINT_TO_FP:
430     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
431     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
432     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
433     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
434     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
435     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
436 
437     // Complex: to v2f32
438     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
439     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
440     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
441     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
442     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
443     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
444 
445     // Complex: to v4f32
446     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
447     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
448     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
449     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
450 
451     // Complex: to v8f32
452     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
453     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
454     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
455     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
456 
457     // Complex: to v16f32
458     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
459     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
460 
461     // Complex: to v2f64
462     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
463     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
464     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
465     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
466     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
467     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
468 
469 
470     // LowerVectorFP_TO_INT
471     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
472     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
473     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
474     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
475     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
476     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
477 
478     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
479     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
480     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
481     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
482     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
483     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
484     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
485 
486     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
487     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
488     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
489     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
490     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
491 
492     // Lowering scalable
493     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
494     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
495     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
496     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
497     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
498     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
499 
500 
501     // Complex, from nxv2f32 legal type is nxv2i32 (no cost) or nxv2i64 (1 ext)
502     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 2 },
503     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
504     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
505     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 2 },
506     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
507     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
508 
509     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
510     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
511     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
512     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
513     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
514     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
515     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
516 
517     // Complex, from nxv2f64: legal type is nxv2i32, 1 narrowing => ~2.
518     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 2 },
519     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 2 },
520     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 2 },
521     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 2 },
522     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 2 },
523     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 2 },
524 
525     // Complex, from nxv4f32 legal type is nxv4i16, 1 narrowing => ~2
526     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 2 },
527     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 2 },
528     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 2 },
529     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 2 },
530 
531     // Complex, from nxv8f64: legal type is nxv8i32, 1 narrowing => ~2.
532     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f64, 2 },
533     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 2 },
534     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 2 },
535     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f64, 2 },
536     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 2 },
537     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 2 },
538 
539     // Complex, from nxv4f64: legal type is nxv4i32, 1 narrowing => ~2.
540     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 2 },
541     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 2 },
542     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 2 },
543     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 2 },
544     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 2 },
545     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 2 },
546 
547     // Complex, from nxv8f32: legal type is nxv8i32 (no cost) or nxv8i64 (1 ext).
548     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f32, 2 },
549     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
550     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 1 },
551     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f32, 2 },
552     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 1 },
553     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 1 },
554 
555     // Truncate from nxvmf32 to nxvmf16.
556     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
557     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
558     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
559 
560     // Truncate from nxvmf64 to nxvmf16.
561     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
562     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
563     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
564 
565     // Truncate from nxvmf64 to nxvmf32.
566     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
567     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
568     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
569 
570     // Extend from nxvmf16 to nxvmf32.
571     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
572     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
573     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
574 
575     // Extend from nxvmf16 to nxvmf64.
576     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
577     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
578     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
579 
580     // Extend from nxvmf32 to nxvmf64.
581     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
582     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
583     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
584 
585   };
586 
587   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
588                                                  DstTy.getSimpleVT(),
589                                                  SrcTy.getSimpleVT()))
590     return AdjustCost(Entry->Cost);
591 
592   return AdjustCost(
593       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
594 }
595 
596 int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
597                                              VectorType *VecTy,
598                                              unsigned Index) {
599 
600   // Make sure we were given a valid extend opcode.
601   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
602          "Invalid opcode");
603 
604   // We are extending an element we extract from a vector, so the source type
605   // of the extend is the element type of the vector.
606   auto *Src = VecTy->getElementType();
607 
608   // Sign- and zero-extends are for integer types only.
609   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
610 
611   // Get the cost for the extract. We compute the cost (if any) for the extend
612   // below.
613   auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
614 
615   // Legalize the types.
616   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
617   auto DstVT = TLI->getValueType(DL, Dst);
618   auto SrcVT = TLI->getValueType(DL, Src);
619   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
620 
621   // If the resulting type is still a vector and the destination type is legal,
622   // we may get the extension for free. If not, get the default cost for the
623   // extend.
624   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
625     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
626                                    CostKind);
627 
628   // The destination type should be larger than the element type. If not, get
629   // the default cost for the extend.
630   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
631     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
632                                    CostKind);
633 
634   switch (Opcode) {
635   default:
636     llvm_unreachable("Opcode should be either SExt or ZExt");
637 
638   // For sign-extends, we only need a smov, which performs the extension
639   // automatically.
640   case Instruction::SExt:
641     return Cost;
642 
643   // For zero-extends, the extend is performed automatically by a umov unless
644   // the destination type is i64 and the element type is i8 or i16.
645   case Instruction::ZExt:
646     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
647       return Cost;
648   }
649 
650   // If we are unable to perform the extend for free, get the default cost.
651   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
652                                  CostKind);
653 }
654 
655 unsigned AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
656                                         TTI::TargetCostKind CostKind) {
657   if (CostKind != TTI::TCK_RecipThroughput)
658     return Opcode == Instruction::PHI ? 0 : 1;
659   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
660   // Branches are assumed to be predicted.
661   return 0;
662 }
663 
664 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
665                                        unsigned Index) {
666   assert(Val->isVectorTy() && "This must be a vector type");
667 
668   if (Index != -1U) {
669     // Legalize the type.
670     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
671 
672     // This type is legalized to a scalar type.
673     if (!LT.second.isVector())
674       return 0;
675 
676     // The type may be split. Normalize the index to the new type.
677     unsigned Width = LT.second.getVectorNumElements();
678     Index = Index % Width;
679 
680     // The element at index zero is already inside the vector.
681     if (Index == 0)
682       return 0;
683   }
684 
685   // All other insert/extracts cost this much.
686   return ST->getVectorInsertExtractBaseCost();
687 }
688 
689 int AArch64TTIImpl::getArithmeticInstrCost(
690     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
691     TTI::OperandValueKind Opd1Info,
692     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
693     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
694     const Instruction *CxtI) {
695   // TODO: Handle more cost kinds.
696   if (CostKind != TTI::TCK_RecipThroughput)
697     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
698                                          Opd2Info, Opd1PropInfo,
699                                          Opd2PropInfo, Args, CxtI);
700 
701   // Legalize the type.
702   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
703 
704   // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
705   // add in the widening overhead specified by the sub-target. Since the
706   // extends feeding widening instructions are performed automatically, they
707   // aren't present in the generated code and have a zero cost. By adding a
708   // widening overhead here, we attach the total cost of the combined operation
709   // to the widening instruction.
710   int Cost = 0;
711   if (isWideningInstruction(Ty, Opcode, Args))
712     Cost += ST->getWideningBaseCost();
713 
714   int ISD = TLI->InstructionOpcodeToISD(Opcode);
715 
716   switch (ISD) {
717   default:
718     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
719                                                 Opd2Info,
720                                                 Opd1PropInfo, Opd2PropInfo);
721   case ISD::SDIV:
722     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
723         Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
724       // On AArch64, scalar signed division by constants power-of-two are
725       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
726       // The OperandValue properties many not be same as that of previous
727       // operation; conservatively assume OP_None.
728       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
729                                      Opd1Info, Opd2Info,
730                                      TargetTransformInfo::OP_None,
731                                      TargetTransformInfo::OP_None);
732       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
733                                      Opd1Info, Opd2Info,
734                                      TargetTransformInfo::OP_None,
735                                      TargetTransformInfo::OP_None);
736       Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind,
737                                      Opd1Info, Opd2Info,
738                                      TargetTransformInfo::OP_None,
739                                      TargetTransformInfo::OP_None);
740       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
741                                      Opd1Info, Opd2Info,
742                                      TargetTransformInfo::OP_None,
743                                      TargetTransformInfo::OP_None);
744       return Cost;
745     }
746     LLVM_FALLTHROUGH;
747   case ISD::UDIV:
748     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
749       auto VT = TLI->getValueType(DL, Ty);
750       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
751         // Vector signed division by constant are expanded to the
752         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
753         // to MULHS + SUB + SRL + ADD + SRL.
754         int MulCost = getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
755                                              Opd1Info, Opd2Info,
756                                              TargetTransformInfo::OP_None,
757                                              TargetTransformInfo::OP_None);
758         int AddCost = getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
759                                              Opd1Info, Opd2Info,
760                                              TargetTransformInfo::OP_None,
761                                              TargetTransformInfo::OP_None);
762         int ShrCost = getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
763                                              Opd1Info, Opd2Info,
764                                              TargetTransformInfo::OP_None,
765                                              TargetTransformInfo::OP_None);
766         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
767       }
768     }
769 
770     Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
771                                           Opd2Info,
772                                           Opd1PropInfo, Opd2PropInfo);
773     if (Ty->isVectorTy()) {
774       // On AArch64, vector divisions are not supported natively and are
775       // expanded into scalar divisions of each pair of elements.
776       Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind,
777                                      Opd1Info, Opd2Info, Opd1PropInfo,
778                                      Opd2PropInfo);
779       Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
780                                      Opd1Info, Opd2Info, Opd1PropInfo,
781                                      Opd2PropInfo);
782       // TODO: if one of the arguments is scalar, then it's not necessary to
783       // double the cost of handling the vector elements.
784       Cost += Cost;
785     }
786     return Cost;
787 
788   case ISD::MUL:
789     if (LT.second != MVT::v2i64)
790       return (Cost + 1) * LT.first;
791     // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive
792     // as elements are extracted from the vectors and the muls scalarized.
793     // As getScalarizationOverhead is a bit too pessimistic, we estimate the
794     // cost for a i64 vector directly here, which is:
795     // - four i64 extracts,
796     // - two i64 inserts, and
797     // - two muls.
798     // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with
799     // LT.first = 2 the cost is 16.
800     return LT.first * 8;
801   case ISD::ADD:
802   case ISD::XOR:
803   case ISD::OR:
804   case ISD::AND:
805     // These nodes are marked as 'custom' for combining purposes only.
806     // We know that they are legal. See LowerAdd in ISelLowering.
807     return (Cost + 1) * LT.first;
808 
809   case ISD::FADD:
810     // These nodes are marked as 'custom' just to lower them to SVE.
811     // We know said lowering will incur no additional cost.
812     if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty())
813       return (Cost + 2) * LT.first;
814 
815     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
816                                                 Opd2Info,
817                                                 Opd1PropInfo, Opd2PropInfo);
818   }
819 }
820 
821 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
822                                               const SCEV *Ptr) {
823   // Address computations in vectorized code with non-consecutive addresses will
824   // likely result in more instructions compared to scalar code where the
825   // computation can more often be merged into the index mode. The resulting
826   // extra micro-ops can significantly decrease throughput.
827   unsigned NumVectorInstToHideOverhead = 10;
828   int MaxMergeDistance = 64;
829 
830   if (Ty->isVectorTy() && SE &&
831       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
832     return NumVectorInstToHideOverhead;
833 
834   // In many cases the address computation is not merged into the instruction
835   // addressing mode.
836   return 1;
837 }
838 
839 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
840                                        Type *CondTy, CmpInst::Predicate VecPred,
841                                        TTI::TargetCostKind CostKind,
842                                        const Instruction *I) {
843   // TODO: Handle other cost kinds.
844   if (CostKind != TTI::TCK_RecipThroughput)
845     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
846                                      I);
847 
848   int ISD = TLI->InstructionOpcodeToISD(Opcode);
849   // We don't lower some vector selects well that are wider than the register
850   // width.
851   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
852     // We would need this many instructions to hide the scalarization happening.
853     const int AmortizationCost = 20;
854 
855     // If VecPred is not set, check if we can get a predicate from the context
856     // instruction, if its type matches the requested ValTy.
857     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
858       CmpInst::Predicate CurrentPred;
859       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
860                             m_Value())))
861         VecPred = CurrentPred;
862     }
863     // Check if we have a compare/select chain that can be lowered using CMxx &
864     // BFI pair.
865     if (CmpInst::isIntPredicate(VecPred)) {
866       static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
867                                           MVT::v8i16, MVT::v2i32, MVT::v4i32,
868                                           MVT::v2i64};
869       auto LT = TLI->getTypeLegalizationCost(DL, ValTy);
870       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
871         return LT.first;
872     }
873 
874     static const TypeConversionCostTblEntry
875     VectorSelectTbl[] = {
876       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
877       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
878       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
879       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
880       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
881       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
882     };
883 
884     EVT SelCondTy = TLI->getValueType(DL, CondTy);
885     EVT SelValTy = TLI->getValueType(DL, ValTy);
886     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
887       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
888                                                      SelCondTy.getSimpleVT(),
889                                                      SelValTy.getSimpleVT()))
890         return Entry->Cost;
891     }
892   }
893   // The base case handles scalable vectors fine for now, since it treats the
894   // cost as 1 * legalization cost.
895   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
896 }
897 
898 AArch64TTIImpl::TTI::MemCmpExpansionOptions
899 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
900   TTI::MemCmpExpansionOptions Options;
901   if (ST->requiresStrictAlign()) {
902     // TODO: Add cost modeling for strict align. Misaligned loads expand to
903     // a bunch of instructions when strict align is enabled.
904     return Options;
905   }
906   Options.AllowOverlappingLoads = true;
907   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
908   Options.NumLoadsPerBlock = Options.MaxNumLoads;
909   // TODO: Though vector loads usually perform well on AArch64, in some targets
910   // they may wake up the FP unit, which raises the power consumption.  Perhaps
911   // they could be used with no holds barred (-O3).
912   Options.LoadSizes = {8, 4, 2, 1};
913   return Options;
914 }
915 
916 unsigned AArch64TTIImpl::getGatherScatterOpCost(
917     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
918     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
919 
920   if (!isa<ScalableVectorType>(DataTy))
921     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
922                                          Alignment, CostKind, I);
923   auto *VT = cast<VectorType>(DataTy);
924   auto LT = TLI->getTypeLegalizationCost(DL, DataTy);
925   ElementCount LegalVF = LT.second.getVectorElementCount();
926   Optional<unsigned> MaxNumVScale = getMaxVScale();
927   assert(MaxNumVScale && "Expected valid max vscale value");
928 
929   unsigned MemOpCost =
930       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I);
931   unsigned MaxNumElementsPerGather =
932       MaxNumVScale.getValue() * LegalVF.getKnownMinValue();
933   return LT.first * MaxNumElementsPerGather * MemOpCost;
934 }
935 
936 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
937   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
938 }
939 
940 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
941                                     MaybeAlign Alignment, unsigned AddressSpace,
942                                     TTI::TargetCostKind CostKind,
943                                     const Instruction *I) {
944   // TODO: Handle other cost kinds.
945   if (CostKind != TTI::TCK_RecipThroughput)
946     return 1;
947 
948   // Type legalization can't handle structs
949   if (TLI->getValueType(DL, Ty,  true) == MVT::Other)
950     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
951                                   CostKind);
952 
953   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
954 
955   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
956       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
957     // Unaligned stores are extremely inefficient. We don't split all
958     // unaligned 128-bit stores because the negative impact that has shown in
959     // practice on inlined block copy code.
960     // We make such stores expensive so that we will only vectorize if there
961     // are 6 other instructions getting vectorized.
962     const int AmortizationCost = 6;
963 
964     return LT.first * 2 * AmortizationCost;
965   }
966 
967   if (useNeonVector(Ty) &&
968       cast<VectorType>(Ty)->getElementType()->isIntegerTy(8)) {
969     unsigned ProfitableNumElements;
970     if (Opcode == Instruction::Store)
971       // We use a custom trunc store lowering so v.4b should be profitable.
972       ProfitableNumElements = 4;
973     else
974       // We scalarize the loads because there is not v.4b register and we
975       // have to promote the elements to v.2.
976       ProfitableNumElements = 8;
977 
978     if (cast<FixedVectorType>(Ty)->getNumElements() < ProfitableNumElements) {
979       unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
980       unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
981       // We generate 2 instructions per vector element.
982       return NumVectorizableInstsToAmortize * NumVecElts * 2;
983     }
984   }
985 
986   return LT.first;
987 }
988 
989 int AArch64TTIImpl::getInterleavedMemoryOpCost(
990     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
991     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
992     bool UseMaskForCond, bool UseMaskForGaps) {
993   assert(Factor >= 2 && "Invalid interleave factor");
994   auto *VecVTy = cast<FixedVectorType>(VecTy);
995 
996   if (!UseMaskForCond && !UseMaskForGaps &&
997       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
998     unsigned NumElts = VecVTy->getNumElements();
999     auto *SubVecTy =
1000         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1001 
1002     // ldN/stN only support legal vector types of size 64 or 128 in bits.
1003     // Accesses having vector types that are a multiple of 128 bits can be
1004     // matched to more than one ldN/stN instruction.
1005     if (NumElts % Factor == 0 &&
1006         TLI->isLegalInterleavedAccessType(SubVecTy, DL))
1007       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1008   }
1009 
1010   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1011                                            Alignment, AddressSpace, CostKind,
1012                                            UseMaskForCond, UseMaskForGaps);
1013 }
1014 
1015 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
1016   int Cost = 0;
1017   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1018   for (auto *I : Tys) {
1019     if (!I->isVectorTy())
1020       continue;
1021     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
1022         128)
1023       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
1024               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
1025   }
1026   return Cost;
1027 }
1028 
1029 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1030   return ST->getMaxInterleaveFactor();
1031 }
1032 
1033 // For Falkor, we want to avoid having too many strided loads in a loop since
1034 // that can exhaust the HW prefetcher resources.  We adjust the unroller
1035 // MaxCount preference below to attempt to ensure unrolling doesn't create too
1036 // many strided loads.
1037 static void
1038 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1039                               TargetTransformInfo::UnrollingPreferences &UP) {
1040   enum { MaxStridedLoads = 7 };
1041   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
1042     int StridedLoads = 0;
1043     // FIXME? We could make this more precise by looking at the CFG and
1044     // e.g. not counting loads in each side of an if-then-else diamond.
1045     for (const auto BB : L->blocks()) {
1046       for (auto &I : *BB) {
1047         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
1048         if (!LMemI)
1049           continue;
1050 
1051         Value *PtrValue = LMemI->getPointerOperand();
1052         if (L->isLoopInvariant(PtrValue))
1053           continue;
1054 
1055         const SCEV *LSCEV = SE.getSCEV(PtrValue);
1056         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
1057         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
1058           continue;
1059 
1060         // FIXME? We could take pairing of unrolled load copies into account
1061         // by looking at the AddRec, but we would probably have to limit this
1062         // to loops with no stores or other memory optimization barriers.
1063         ++StridedLoads;
1064         // We've seen enough strided loads that seeing more won't make a
1065         // difference.
1066         if (StridedLoads > MaxStridedLoads / 2)
1067           return StridedLoads;
1068       }
1069     }
1070     return StridedLoads;
1071   };
1072 
1073   int StridedLoads = countStridedLoads(L, SE);
1074   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
1075                     << " strided loads\n");
1076   // Pick the largest power of 2 unroll count that won't result in too many
1077   // strided loads.
1078   if (StridedLoads) {
1079     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
1080     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
1081                       << UP.MaxCount << '\n');
1082   }
1083 }
1084 
1085 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1086                                              TTI::UnrollingPreferences &UP) {
1087   // Enable partial unrolling and runtime unrolling.
1088   BaseT::getUnrollingPreferences(L, SE, UP);
1089 
1090   // For inner loop, it is more likely to be a hot one, and the runtime check
1091   // can be promoted out from LICM pass, so the overhead is less, let's try
1092   // a larger threshold to unroll more loops.
1093   if (L->getLoopDepth() > 1)
1094     UP.PartialThreshold *= 2;
1095 
1096   // Disable partial & runtime unrolling on -Os.
1097   UP.PartialOptSizeThreshold = 0;
1098 
1099   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
1100       EnableFalkorHWPFUnrollFix)
1101     getFalkorUnrollingPreferences(L, SE, UP);
1102 }
1103 
1104 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1105                                            TTI::PeelingPreferences &PP) {
1106   BaseT::getPeelingPreferences(L, SE, PP);
1107 }
1108 
1109 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1110                                                          Type *ExpectedType) {
1111   switch (Inst->getIntrinsicID()) {
1112   default:
1113     return nullptr;
1114   case Intrinsic::aarch64_neon_st2:
1115   case Intrinsic::aarch64_neon_st3:
1116   case Intrinsic::aarch64_neon_st4: {
1117     // Create a struct type
1118     StructType *ST = dyn_cast<StructType>(ExpectedType);
1119     if (!ST)
1120       return nullptr;
1121     unsigned NumElts = Inst->getNumArgOperands() - 1;
1122     if (ST->getNumElements() != NumElts)
1123       return nullptr;
1124     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1125       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
1126         return nullptr;
1127     }
1128     Value *Res = UndefValue::get(ExpectedType);
1129     IRBuilder<> Builder(Inst);
1130     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1131       Value *L = Inst->getArgOperand(i);
1132       Res = Builder.CreateInsertValue(Res, L, i);
1133     }
1134     return Res;
1135   }
1136   case Intrinsic::aarch64_neon_ld2:
1137   case Intrinsic::aarch64_neon_ld3:
1138   case Intrinsic::aarch64_neon_ld4:
1139     if (Inst->getType() == ExpectedType)
1140       return Inst;
1141     return nullptr;
1142   }
1143 }
1144 
1145 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1146                                         MemIntrinsicInfo &Info) {
1147   switch (Inst->getIntrinsicID()) {
1148   default:
1149     break;
1150   case Intrinsic::aarch64_neon_ld2:
1151   case Intrinsic::aarch64_neon_ld3:
1152   case Intrinsic::aarch64_neon_ld4:
1153     Info.ReadMem = true;
1154     Info.WriteMem = false;
1155     Info.PtrVal = Inst->getArgOperand(0);
1156     break;
1157   case Intrinsic::aarch64_neon_st2:
1158   case Intrinsic::aarch64_neon_st3:
1159   case Intrinsic::aarch64_neon_st4:
1160     Info.ReadMem = false;
1161     Info.WriteMem = true;
1162     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
1163     break;
1164   }
1165 
1166   switch (Inst->getIntrinsicID()) {
1167   default:
1168     return false;
1169   case Intrinsic::aarch64_neon_ld2:
1170   case Intrinsic::aarch64_neon_st2:
1171     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
1172     break;
1173   case Intrinsic::aarch64_neon_ld3:
1174   case Intrinsic::aarch64_neon_st3:
1175     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
1176     break;
1177   case Intrinsic::aarch64_neon_ld4:
1178   case Intrinsic::aarch64_neon_st4:
1179     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
1180     break;
1181   }
1182   return true;
1183 }
1184 
1185 /// See if \p I should be considered for address type promotion. We check if \p
1186 /// I is a sext with right type and used in memory accesses. If it used in a
1187 /// "complex" getelementptr, we allow it to be promoted without finding other
1188 /// sext instructions that sign extended the same initial value. A getelementptr
1189 /// is considered as "complex" if it has more than 2 operands.
1190 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
1191     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
1192   bool Considerable = false;
1193   AllowPromotionWithoutCommonHeader = false;
1194   if (!isa<SExtInst>(&I))
1195     return false;
1196   Type *ConsideredSExtType =
1197       Type::getInt64Ty(I.getParent()->getParent()->getContext());
1198   if (I.getType() != ConsideredSExtType)
1199     return false;
1200   // See if the sext is the one with the right type and used in at least one
1201   // GetElementPtrInst.
1202   for (const User *U : I.users()) {
1203     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
1204       Considerable = true;
1205       // A getelementptr is considered as "complex" if it has more than 2
1206       // operands. We will promote a SExt used in such complex GEP as we
1207       // expect some computation to be merged if they are done on 64 bits.
1208       if (GEPInst->getNumOperands() > 2) {
1209         AllowPromotionWithoutCommonHeader = true;
1210         break;
1211       }
1212     }
1213   }
1214   return Considerable;
1215 }
1216 
1217 bool AArch64TTIImpl::isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,
1218                                                  ElementCount VF) const {
1219   if (!VF.isScalable())
1220     return true;
1221 
1222   Type *Ty = RdxDesc.getRecurrenceType();
1223   if (Ty->isBFloatTy() || !isLegalElementTypeForSVE(Ty))
1224     return false;
1225 
1226   switch (RdxDesc.getRecurrenceKind()) {
1227   case RecurKind::Add:
1228   case RecurKind::FAdd:
1229   case RecurKind::And:
1230   case RecurKind::Or:
1231   case RecurKind::Xor:
1232   case RecurKind::SMin:
1233   case RecurKind::SMax:
1234   case RecurKind::UMin:
1235   case RecurKind::UMax:
1236   case RecurKind::FMin:
1237   case RecurKind::FMax:
1238     return true;
1239   default:
1240     return false;
1241   }
1242 }
1243 
1244 int AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1245                                            bool IsPairwise, bool IsUnsigned,
1246                                            TTI::TargetCostKind CostKind) {
1247   if (!isa<ScalableVectorType>(Ty))
1248     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned,
1249                                          CostKind);
1250   assert((isa<ScalableVectorType>(Ty) && isa<ScalableVectorType>(CondTy)) &&
1251          "Both vector needs to be scalable");
1252 
1253   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1254   int LegalizationCost = 0;
1255   if (LT.first > 1) {
1256     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
1257     unsigned CmpOpcode =
1258         Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp;
1259     LegalizationCost =
1260         getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy,
1261                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
1262         getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy,
1263                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
1264     LegalizationCost *= LT.first - 1;
1265   }
1266 
1267   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
1268 }
1269 
1270 int AArch64TTIImpl::getArithmeticReductionCostSVE(
1271     unsigned Opcode, VectorType *ValTy, bool IsPairwise,
1272     TTI::TargetCostKind CostKind) {
1273   assert(!IsPairwise && "Cannot be pair wise to continue");
1274 
1275   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1276   int LegalizationCost = 0;
1277   if (LT.first > 1) {
1278     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
1279     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
1280     LegalizationCost *= LT.first - 1;
1281   }
1282 
1283   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1284   assert(ISD && "Invalid opcode");
1285   // Add the final reduction cost for the legal horizontal reduction
1286   switch (ISD) {
1287   case ISD::ADD:
1288   case ISD::AND:
1289   case ISD::OR:
1290   case ISD::XOR:
1291   case ISD::FADD:
1292     return LegalizationCost + 2;
1293   default:
1294     // TODO: Replace for invalid when InstructionCost is used
1295     // cases not supported by SVE
1296     return 16;
1297   }
1298 }
1299 
1300 int AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode,
1301                                                VectorType *ValTy,
1302                                                bool IsPairwiseForm,
1303                                                TTI::TargetCostKind CostKind) {
1304 
1305   if (isa<ScalableVectorType>(ValTy))
1306     return getArithmeticReductionCostSVE(Opcode, ValTy, IsPairwiseForm,
1307                                          CostKind);
1308   if (IsPairwiseForm)
1309     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1310                                              CostKind);
1311 
1312   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1313   MVT MTy = LT.second;
1314   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1315   assert(ISD && "Invalid opcode");
1316 
1317   // Horizontal adds can use the 'addv' instruction. We model the cost of these
1318   // instructions as normal vector adds. This is the only arithmetic vector
1319   // reduction operation for which we have an instruction.
1320   static const CostTblEntry CostTblNoPairwise[]{
1321       {ISD::ADD, MVT::v8i8,  1},
1322       {ISD::ADD, MVT::v16i8, 1},
1323       {ISD::ADD, MVT::v4i16, 1},
1324       {ISD::ADD, MVT::v8i16, 1},
1325       {ISD::ADD, MVT::v4i32, 1},
1326   };
1327 
1328   if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
1329     return LT.first * Entry->Cost;
1330 
1331   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1332                                            CostKind);
1333 }
1334 
1335 int AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
1336                                    ArrayRef<int> Mask, int Index,
1337                                    VectorType *SubTp) {
1338   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
1339       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
1340       Kind == TTI::SK_Reverse) {
1341     static const CostTblEntry ShuffleTbl[] = {
1342       // Broadcast shuffle kinds can be performed with 'dup'.
1343       { TTI::SK_Broadcast, MVT::v8i8,  1 },
1344       { TTI::SK_Broadcast, MVT::v16i8, 1 },
1345       { TTI::SK_Broadcast, MVT::v4i16, 1 },
1346       { TTI::SK_Broadcast, MVT::v8i16, 1 },
1347       { TTI::SK_Broadcast, MVT::v2i32, 1 },
1348       { TTI::SK_Broadcast, MVT::v4i32, 1 },
1349       { TTI::SK_Broadcast, MVT::v2i64, 1 },
1350       { TTI::SK_Broadcast, MVT::v2f32, 1 },
1351       { TTI::SK_Broadcast, MVT::v4f32, 1 },
1352       { TTI::SK_Broadcast, MVT::v2f64, 1 },
1353       // Transpose shuffle kinds can be performed with 'trn1/trn2' and
1354       // 'zip1/zip2' instructions.
1355       { TTI::SK_Transpose, MVT::v8i8,  1 },
1356       { TTI::SK_Transpose, MVT::v16i8, 1 },
1357       { TTI::SK_Transpose, MVT::v4i16, 1 },
1358       { TTI::SK_Transpose, MVT::v8i16, 1 },
1359       { TTI::SK_Transpose, MVT::v2i32, 1 },
1360       { TTI::SK_Transpose, MVT::v4i32, 1 },
1361       { TTI::SK_Transpose, MVT::v2i64, 1 },
1362       { TTI::SK_Transpose, MVT::v2f32, 1 },
1363       { TTI::SK_Transpose, MVT::v4f32, 1 },
1364       { TTI::SK_Transpose, MVT::v2f64, 1 },
1365       // Select shuffle kinds.
1366       // TODO: handle vXi8/vXi16.
1367       { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
1368       { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
1369       { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
1370       { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
1371       { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
1372       { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
1373       // PermuteSingleSrc shuffle kinds.
1374       // TODO: handle vXi8/vXi16.
1375       { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
1376       { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
1377       { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
1378       { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
1379       { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
1380       { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
1381       // Broadcast shuffle kinds for scalable vectors
1382       { TTI::SK_Broadcast, MVT::nxv16i8,  1 },
1383       { TTI::SK_Broadcast, MVT::nxv8i16,  1 },
1384       { TTI::SK_Broadcast, MVT::nxv4i32,  1 },
1385       { TTI::SK_Broadcast, MVT::nxv2i64,  1 },
1386       { TTI::SK_Broadcast, MVT::nxv8f16,  1 },
1387       { TTI::SK_Broadcast, MVT::nxv8bf16, 1 },
1388       { TTI::SK_Broadcast, MVT::nxv4f32,  1 },
1389       { TTI::SK_Broadcast, MVT::nxv2f64,  1 },
1390       // Handle the cases for vector.reverse with scalable vectors
1391       { TTI::SK_Reverse, MVT::nxv16i8,  1 },
1392       { TTI::SK_Reverse, MVT::nxv8i16,  1 },
1393       { TTI::SK_Reverse, MVT::nxv4i32,  1 },
1394       { TTI::SK_Reverse, MVT::nxv2i64,  1 },
1395       { TTI::SK_Reverse, MVT::nxv8f16,  1 },
1396       { TTI::SK_Reverse, MVT::nxv8bf16, 1 },
1397       { TTI::SK_Reverse, MVT::nxv4f32,  1 },
1398       { TTI::SK_Reverse, MVT::nxv2f64,  1 },
1399       { TTI::SK_Reverse, MVT::nxv16i1,  1 },
1400       { TTI::SK_Reverse, MVT::nxv8i1,   1 },
1401       { TTI::SK_Reverse, MVT::nxv4i1,   1 },
1402       { TTI::SK_Reverse, MVT::nxv2i1,   1 },
1403     };
1404     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1405     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
1406       return LT.first * Entry->Cost;
1407   }
1408 
1409   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
1410 }
1411