1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/TargetTransformInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/TargetLowering.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/IntrinsicsAArch64.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include <algorithm>
23 using namespace llvm;
24 using namespace llvm::PatternMatch;
25 
26 #define DEBUG_TYPE "aarch64tti"
27 
28 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
29                                                cl::init(true), cl::Hidden);
30 
31 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
32                                          const Function *Callee) const {
33   const TargetMachine &TM = getTLI()->getTargetMachine();
34 
35   const FeatureBitset &CallerBits =
36       TM.getSubtargetImpl(*Caller)->getFeatureBits();
37   const FeatureBitset &CalleeBits =
38       TM.getSubtargetImpl(*Callee)->getFeatureBits();
39 
40   // Inline a callee if its target-features are a subset of the callers
41   // target-features.
42   return (CallerBits & CalleeBits) == CalleeBits;
43 }
44 
45 /// Calculate the cost of materializing a 64-bit value. This helper
46 /// method might only calculate a fraction of a larger immediate. Therefore it
47 /// is valid to return a cost of ZERO.
48 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
49   // Check if the immediate can be encoded within an instruction.
50   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
51     return 0;
52 
53   if (Val < 0)
54     Val = ~Val;
55 
56   // Calculate how many moves we will need to materialize this constant.
57   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
58   AArch64_IMM::expandMOVImm(Val, 64, Insn);
59   return Insn.size();
60 }
61 
62 /// Calculate the cost of materializing the given constant.
63 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
64                                   TTI::TargetCostKind CostKind) {
65   assert(Ty->isIntegerTy());
66 
67   unsigned BitSize = Ty->getPrimitiveSizeInBits();
68   if (BitSize == 0)
69     return ~0U;
70 
71   // Sign-extend all constants to a multiple of 64-bit.
72   APInt ImmVal = Imm;
73   if (BitSize & 0x3f)
74     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
75 
76   // Split the constant into 64-bit chunks and calculate the cost for each
77   // chunk.
78   int Cost = 0;
79   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
80     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
81     int64_t Val = Tmp.getSExtValue();
82     Cost += getIntImmCost(Val);
83   }
84   // We need at least one instruction to materialze the constant.
85   return std::max(1, Cost);
86 }
87 
88 int AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
89                                       const APInt &Imm, Type *Ty,
90                                       TTI::TargetCostKind CostKind,
91                                       Instruction *Inst) {
92   assert(Ty->isIntegerTy());
93 
94   unsigned BitSize = Ty->getPrimitiveSizeInBits();
95   // There is no cost model for constants with a bit size of 0. Return TCC_Free
96   // here, so that constant hoisting will ignore this constant.
97   if (BitSize == 0)
98     return TTI::TCC_Free;
99 
100   unsigned ImmIdx = ~0U;
101   switch (Opcode) {
102   default:
103     return TTI::TCC_Free;
104   case Instruction::GetElementPtr:
105     // Always hoist the base address of a GetElementPtr.
106     if (Idx == 0)
107       return 2 * TTI::TCC_Basic;
108     return TTI::TCC_Free;
109   case Instruction::Store:
110     ImmIdx = 0;
111     break;
112   case Instruction::Add:
113   case Instruction::Sub:
114   case Instruction::Mul:
115   case Instruction::UDiv:
116   case Instruction::SDiv:
117   case Instruction::URem:
118   case Instruction::SRem:
119   case Instruction::And:
120   case Instruction::Or:
121   case Instruction::Xor:
122   case Instruction::ICmp:
123     ImmIdx = 1;
124     break;
125   // Always return TCC_Free for the shift value of a shift instruction.
126   case Instruction::Shl:
127   case Instruction::LShr:
128   case Instruction::AShr:
129     if (Idx == 1)
130       return TTI::TCC_Free;
131     break;
132   case Instruction::Trunc:
133   case Instruction::ZExt:
134   case Instruction::SExt:
135   case Instruction::IntToPtr:
136   case Instruction::PtrToInt:
137   case Instruction::BitCast:
138   case Instruction::PHI:
139   case Instruction::Call:
140   case Instruction::Select:
141   case Instruction::Ret:
142   case Instruction::Load:
143     break;
144   }
145 
146   if (Idx == ImmIdx) {
147     int NumConstants = (BitSize + 63) / 64;
148     int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
149     return (Cost <= NumConstants * TTI::TCC_Basic)
150                ? static_cast<int>(TTI::TCC_Free)
151                : Cost;
152   }
153   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
154 }
155 
156 int AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
157                                         const APInt &Imm, Type *Ty,
158                                         TTI::TargetCostKind CostKind) {
159   assert(Ty->isIntegerTy());
160 
161   unsigned BitSize = Ty->getPrimitiveSizeInBits();
162   // There is no cost model for constants with a bit size of 0. Return TCC_Free
163   // here, so that constant hoisting will ignore this constant.
164   if (BitSize == 0)
165     return TTI::TCC_Free;
166 
167   // Most (all?) AArch64 intrinsics do not support folding immediates into the
168   // selected instruction, so we compute the materialization cost for the
169   // immediate directly.
170   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
171     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
172 
173   switch (IID) {
174   default:
175     return TTI::TCC_Free;
176   case Intrinsic::sadd_with_overflow:
177   case Intrinsic::uadd_with_overflow:
178   case Intrinsic::ssub_with_overflow:
179   case Intrinsic::usub_with_overflow:
180   case Intrinsic::smul_with_overflow:
181   case Intrinsic::umul_with_overflow:
182     if (Idx == 1) {
183       int NumConstants = (BitSize + 63) / 64;
184       int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
185       return (Cost <= NumConstants * TTI::TCC_Basic)
186                  ? static_cast<int>(TTI::TCC_Free)
187                  : Cost;
188     }
189     break;
190   case Intrinsic::experimental_stackmap:
191     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
192       return TTI::TCC_Free;
193     break;
194   case Intrinsic::experimental_patchpoint_void:
195   case Intrinsic::experimental_patchpoint_i64:
196     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
197       return TTI::TCC_Free;
198     break;
199   case Intrinsic::experimental_gc_statepoint:
200     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
201       return TTI::TCC_Free;
202     break;
203   }
204   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
205 }
206 
207 TargetTransformInfo::PopcntSupportKind
208 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
209   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
210   if (TyWidth == 32 || TyWidth == 64)
211     return TTI::PSK_FastHardware;
212   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
213   return TTI::PSK_Software;
214 }
215 
216 InstructionCost
217 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
218                                       TTI::TargetCostKind CostKind) {
219   auto *RetTy = ICA.getReturnType();
220   switch (ICA.getID()) {
221   case Intrinsic::umin:
222   case Intrinsic::umax: {
223     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
224     // umin(x,y) -> sub(x,usubsat(x,y))
225     // umax(x,y) -> add(x,usubsat(y,x))
226     if (LT.second == MVT::v2i64)
227       return LT.first * 2;
228     LLVM_FALLTHROUGH;
229   }
230   case Intrinsic::smin:
231   case Intrinsic::smax: {
232     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
233                                         MVT::v8i16, MVT::v2i32, MVT::v4i32};
234     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
235     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
236       return LT.first;
237     break;
238   }
239   case Intrinsic::sadd_sat:
240   case Intrinsic::ssub_sat:
241   case Intrinsic::uadd_sat:
242   case Intrinsic::usub_sat: {
243     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
244                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
245                                      MVT::v2i64};
246     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
247     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
248     // need to extend the type, as it uses shr(qadd(shl, shl)).
249     unsigned Instrs =
250         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
251     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
252       return LT.first * Instrs;
253     break;
254   }
255   case Intrinsic::abs: {
256     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
257                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
258                                      MVT::v2i64};
259     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
260     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
261       return LT.first;
262     break;
263   }
264   case Intrinsic::experimental_stepvector: {
265     InstructionCost Cost = 1; // Cost of the `index' instruction
266     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
267     // Legalisation of illegal vectors involves an `index' instruction plus
268     // (LT.first - 1) vector adds.
269     if (LT.first > 1) {
270       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
271       InstructionCost AddCost =
272           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
273       Cost += AddCost * (LT.first - 1);
274     }
275     return Cost;
276   }
277   default:
278     break;
279   }
280   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
281 }
282 
283 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC,
284                                                   IntrinsicInst &II) {
285   Value *Pg = II.getArgOperand(0);
286   Value *Vec = II.getArgOperand(1);
287   bool IsAfter = II.getIntrinsicID() == Intrinsic::aarch64_sve_lasta;
288 
289   auto *C = dyn_cast<Constant>(Pg);
290   if (IsAfter && C && C->isNullValue()) {
291     // The intrinsic is extracting lane 0 so use an extract instead.
292     auto *IdxTy = Type::getInt64Ty(II.getContext());
293     auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
294     Extract->insertBefore(&II);
295     Extract->takeName(&II);
296     return IC.replaceInstUsesWith(II, Extract);
297   }
298 
299   auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
300   if (!IntrPG)
301     return None;
302 
303   if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
304     return None;
305 
306   const auto PTruePattern =
307       cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
308 
309   // Can the intrinsic's predicate be converted to a known constant index?
310   unsigned Idx;
311   switch (PTruePattern) {
312   default:
313     return None;
314   case AArch64SVEPredPattern::vl1:
315     Idx = 0;
316     break;
317   case AArch64SVEPredPattern::vl2:
318     Idx = 1;
319     break;
320   case AArch64SVEPredPattern::vl3:
321     Idx = 2;
322     break;
323   case AArch64SVEPredPattern::vl4:
324     Idx = 3;
325     break;
326   case AArch64SVEPredPattern::vl5:
327     Idx = 4;
328     break;
329   case AArch64SVEPredPattern::vl6:
330     Idx = 5;
331     break;
332   case AArch64SVEPredPattern::vl7:
333     Idx = 6;
334     break;
335   case AArch64SVEPredPattern::vl8:
336     Idx = 7;
337     break;
338   case AArch64SVEPredPattern::vl16:
339     Idx = 15;
340     break;
341   }
342 
343   // Increment the index if extracting the element after the last active
344   // predicate element.
345   if (IsAfter)
346     ++Idx;
347 
348   // Ignore extracts whose index is larger than the known minimum vector
349   // length. NOTE: This is an artificial constraint where we prefer to
350   // maintain what the user asked for until an alternative is proven faster.
351   auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
352   if (Idx >= PgVTy->getMinNumElements())
353     return None;
354 
355   // The intrinsic is extracting a fixed lane so use an extract instead.
356   auto *IdxTy = Type::getInt64Ty(II.getContext());
357   auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
358   Extract->insertBefore(&II);
359   Extract->takeName(&II);
360   return IC.replaceInstUsesWith(II, Extract);
361 }
362 
363 Optional<Instruction *>
364 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
365                                      IntrinsicInst &II) const {
366   Intrinsic::ID IID = II.getIntrinsicID();
367   switch (IID) {
368   default:
369     break;
370   case Intrinsic::aarch64_sve_lasta:
371   case Intrinsic::aarch64_sve_lastb:
372     return instCombineSVELast(IC, II);
373   }
374 
375   return None;
376 }
377 
378 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
379                                            ArrayRef<const Value *> Args) {
380 
381   // A helper that returns a vector type from the given type. The number of
382   // elements in type Ty determine the vector width.
383   auto toVectorTy = [&](Type *ArgTy) {
384     return VectorType::get(ArgTy->getScalarType(),
385                            cast<VectorType>(DstTy)->getElementCount());
386   };
387 
388   // Exit early if DstTy is not a vector type whose elements are at least
389   // 16-bits wide.
390   if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
391     return false;
392 
393   // Determine if the operation has a widening variant. We consider both the
394   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
395   // instructions.
396   //
397   // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
398   //       verify that their extending operands are eliminated during code
399   //       generation.
400   switch (Opcode) {
401   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
402   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
403     break;
404   default:
405     return false;
406   }
407 
408   // To be a widening instruction (either the "wide" or "long" versions), the
409   // second operand must be a sign- or zero extend having a single user. We
410   // only consider extends having a single user because they may otherwise not
411   // be eliminated.
412   if (Args.size() != 2 ||
413       (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
414       !Args[1]->hasOneUse())
415     return false;
416   auto *Extend = cast<CastInst>(Args[1]);
417 
418   // Legalize the destination type and ensure it can be used in a widening
419   // operation.
420   auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
421   unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
422   if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
423     return false;
424 
425   // Legalize the source type and ensure it can be used in a widening
426   // operation.
427   auto *SrcTy = toVectorTy(Extend->getSrcTy());
428   auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
429   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
430   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
431     return false;
432 
433   // Get the total number of vector elements in the legalized types.
434   unsigned NumDstEls = DstTyL.first * DstTyL.second.getVectorMinNumElements();
435   unsigned NumSrcEls = SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
436 
437   // Return true if the legalized types have the same number of vector elements
438   // and the destination element type size is twice that of the source type.
439   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
440 }
441 
442 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
443                                                  Type *Src,
444                                                  TTI::CastContextHint CCH,
445                                                  TTI::TargetCostKind CostKind,
446                                                  const Instruction *I) {
447   int ISD = TLI->InstructionOpcodeToISD(Opcode);
448   assert(ISD && "Invalid opcode");
449 
450   // If the cast is observable, and it is used by a widening instruction (e.g.,
451   // uaddl, saddw, etc.), it may be free.
452   if (I && I->hasOneUse()) {
453     auto *SingleUser = cast<Instruction>(*I->user_begin());
454     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
455     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
456       // If the cast is the second operand, it is free. We will generate either
457       // a "wide" or "long" version of the widening instruction.
458       if (I == SingleUser->getOperand(1))
459         return 0;
460       // If the cast is not the second operand, it will be free if it looks the
461       // same as the second operand. In this case, we will generate a "long"
462       // version of the widening instruction.
463       if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
464         if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
465             cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
466           return 0;
467     }
468   }
469 
470   // TODO: Allow non-throughput costs that aren't binary.
471   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
472     if (CostKind != TTI::TCK_RecipThroughput)
473       return Cost == 0 ? 0 : 1;
474     return Cost;
475   };
476 
477   EVT SrcTy = TLI->getValueType(DL, Src);
478   EVT DstTy = TLI->getValueType(DL, Dst);
479 
480   if (!SrcTy.isSimple() || !DstTy.isSimple())
481     return AdjustCost(
482         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
483 
484   static const TypeConversionCostTblEntry
485   ConversionTbl[] = {
486     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
487     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
488     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
489     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
490 
491     // Truncations on nxvmiN
492     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
493     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
494     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
495     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
496     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
497     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
498     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
499     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
500     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
501     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
502     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
503     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
504     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
505     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
506     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
507 
508     // The number of shll instructions for the extension.
509     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
510     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
511     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
512     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
513     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
514     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
515     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
516     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
517     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
518     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
519     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
520     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
521     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
522     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
523     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
524     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
525 
526     // LowerVectorINT_TO_FP:
527     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
528     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
529     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
530     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
531     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
532     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
533 
534     // Complex: to v2f32
535     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
536     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
537     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
538     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
539     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
540     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
541 
542     // Complex: to v4f32
543     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
544     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
545     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
546     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
547 
548     // Complex: to v8f32
549     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
550     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
551     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
552     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
553 
554     // Complex: to v16f32
555     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
556     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
557 
558     // Complex: to v2f64
559     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
560     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
561     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
562     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
563     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
564     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
565 
566 
567     // LowerVectorFP_TO_INT
568     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
569     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
570     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
571     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
572     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
573     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
574 
575     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
576     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
577     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
578     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
579     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
580     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
581     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
582 
583     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
584     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
585     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
586     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
587     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
588 
589     // Complex, from nxv2f32.
590     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
591     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
592     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
593     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
594     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
595     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
596     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
597     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
598 
599     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
600     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
601     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
602     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
603     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
604     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
605     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
606 
607     // Complex, from nxv2f64.
608     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
609     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
610     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
611     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
612     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
613     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
614     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
615     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
616 
617     // Complex, from nxv4f32.
618     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
619     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
620     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
621     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
622     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
623     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
624     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
625     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
626 
627     // Complex, from nxv8f64. Illegal -> illegal conversions not required.
628     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
629     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
630     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
631     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
632 
633     // Complex, from nxv4f64. Illegal -> illegal conversions not required.
634     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
635     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
636     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
637     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
638     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
639     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
640 
641     // Complex, from nxv8f32. Illegal -> illegal conversions not required.
642     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
643     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
644     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
645     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
646 
647     // Complex, from nxv8f16.
648     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
649     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
650     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
651     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
652     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
653     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
654     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
655     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
656 
657     // Complex, from nxv4f16.
658     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
659     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
660     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
661     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
662     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
663     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
664     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
665     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
666 
667     // Complex, from nxv2f16.
668     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
669     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
670     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
671     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
672     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
673     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
674     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
675     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
676 
677     // Truncate from nxvmf32 to nxvmf16.
678     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
679     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
680     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
681 
682     // Truncate from nxvmf64 to nxvmf16.
683     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
684     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
685     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
686 
687     // Truncate from nxvmf64 to nxvmf32.
688     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
689     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
690     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
691 
692     // Extend from nxvmf16 to nxvmf32.
693     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
694     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
695     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
696 
697     // Extend from nxvmf16 to nxvmf64.
698     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
699     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
700     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
701 
702     // Extend from nxvmf32 to nxvmf64.
703     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
704     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
705     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
706 
707   };
708 
709   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
710                                                  DstTy.getSimpleVT(),
711                                                  SrcTy.getSimpleVT()))
712     return AdjustCost(Entry->Cost);
713 
714   return AdjustCost(
715       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
716 }
717 
718 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
719                                                          Type *Dst,
720                                                          VectorType *VecTy,
721                                                          unsigned Index) {
722 
723   // Make sure we were given a valid extend opcode.
724   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
725          "Invalid opcode");
726 
727   // We are extending an element we extract from a vector, so the source type
728   // of the extend is the element type of the vector.
729   auto *Src = VecTy->getElementType();
730 
731   // Sign- and zero-extends are for integer types only.
732   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
733 
734   // Get the cost for the extract. We compute the cost (if any) for the extend
735   // below.
736   InstructionCost Cost =
737       getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
738 
739   // Legalize the types.
740   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
741   auto DstVT = TLI->getValueType(DL, Dst);
742   auto SrcVT = TLI->getValueType(DL, Src);
743   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
744 
745   // If the resulting type is still a vector and the destination type is legal,
746   // we may get the extension for free. If not, get the default cost for the
747   // extend.
748   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
749     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
750                                    CostKind);
751 
752   // The destination type should be larger than the element type. If not, get
753   // the default cost for the extend.
754   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
755     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
756                                    CostKind);
757 
758   switch (Opcode) {
759   default:
760     llvm_unreachable("Opcode should be either SExt or ZExt");
761 
762   // For sign-extends, we only need a smov, which performs the extension
763   // automatically.
764   case Instruction::SExt:
765     return Cost;
766 
767   // For zero-extends, the extend is performed automatically by a umov unless
768   // the destination type is i64 and the element type is i8 or i16.
769   case Instruction::ZExt:
770     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
771       return Cost;
772   }
773 
774   // If we are unable to perform the extend for free, get the default cost.
775   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
776                                  CostKind);
777 }
778 
779 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
780                                                TTI::TargetCostKind CostKind,
781                                                const Instruction *I) {
782   if (CostKind != TTI::TCK_RecipThroughput)
783     return Opcode == Instruction::PHI ? 0 : 1;
784   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
785   // Branches are assumed to be predicted.
786   return 0;
787 }
788 
789 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
790                                                    unsigned Index) {
791   assert(Val->isVectorTy() && "This must be a vector type");
792 
793   if (Index != -1U) {
794     // Legalize the type.
795     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
796 
797     // This type is legalized to a scalar type.
798     if (!LT.second.isVector())
799       return 0;
800 
801     // The type may be split. Normalize the index to the new type.
802     unsigned Width = LT.second.getVectorNumElements();
803     Index = Index % Width;
804 
805     // The element at index zero is already inside the vector.
806     if (Index == 0)
807       return 0;
808   }
809 
810   // All other insert/extracts cost this much.
811   return ST->getVectorInsertExtractBaseCost();
812 }
813 
814 InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
815     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
816     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
817     TTI::OperandValueProperties Opd1PropInfo,
818     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
819     const Instruction *CxtI) {
820   // TODO: Handle more cost kinds.
821   if (CostKind != TTI::TCK_RecipThroughput)
822     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
823                                          Opd2Info, Opd1PropInfo,
824                                          Opd2PropInfo, Args, CxtI);
825 
826   // Legalize the type.
827   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
828 
829   // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
830   // add in the widening overhead specified by the sub-target. Since the
831   // extends feeding widening instructions are performed automatically, they
832   // aren't present in the generated code and have a zero cost. By adding a
833   // widening overhead here, we attach the total cost of the combined operation
834   // to the widening instruction.
835   InstructionCost Cost = 0;
836   if (isWideningInstruction(Ty, Opcode, Args))
837     Cost += ST->getWideningBaseCost();
838 
839   int ISD = TLI->InstructionOpcodeToISD(Opcode);
840 
841   switch (ISD) {
842   default:
843     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
844                                                 Opd2Info,
845                                                 Opd1PropInfo, Opd2PropInfo);
846   case ISD::SDIV:
847     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
848         Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
849       // On AArch64, scalar signed division by constants power-of-two are
850       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
851       // The OperandValue properties many not be same as that of previous
852       // operation; conservatively assume OP_None.
853       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
854                                      Opd1Info, Opd2Info,
855                                      TargetTransformInfo::OP_None,
856                                      TargetTransformInfo::OP_None);
857       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
858                                      Opd1Info, Opd2Info,
859                                      TargetTransformInfo::OP_None,
860                                      TargetTransformInfo::OP_None);
861       Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind,
862                                      Opd1Info, Opd2Info,
863                                      TargetTransformInfo::OP_None,
864                                      TargetTransformInfo::OP_None);
865       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
866                                      Opd1Info, Opd2Info,
867                                      TargetTransformInfo::OP_None,
868                                      TargetTransformInfo::OP_None);
869       return Cost;
870     }
871     LLVM_FALLTHROUGH;
872   case ISD::UDIV:
873     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
874       auto VT = TLI->getValueType(DL, Ty);
875       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
876         // Vector signed division by constant are expanded to the
877         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
878         // to MULHS + SUB + SRL + ADD + SRL.
879         InstructionCost MulCost = getArithmeticInstrCost(
880             Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info,
881             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
882         InstructionCost AddCost = getArithmeticInstrCost(
883             Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info,
884             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
885         InstructionCost ShrCost = getArithmeticInstrCost(
886             Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info,
887             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
888         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
889       }
890     }
891 
892     Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
893                                           Opd2Info,
894                                           Opd1PropInfo, Opd2PropInfo);
895     if (Ty->isVectorTy()) {
896       // On AArch64, vector divisions are not supported natively and are
897       // expanded into scalar divisions of each pair of elements.
898       Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind,
899                                      Opd1Info, Opd2Info, Opd1PropInfo,
900                                      Opd2PropInfo);
901       Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
902                                      Opd1Info, Opd2Info, Opd1PropInfo,
903                                      Opd2PropInfo);
904       // TODO: if one of the arguments is scalar, then it's not necessary to
905       // double the cost of handling the vector elements.
906       Cost += Cost;
907     }
908     return Cost;
909 
910   case ISD::MUL:
911     if (LT.second != MVT::v2i64)
912       return (Cost + 1) * LT.first;
913     // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive
914     // as elements are extracted from the vectors and the muls scalarized.
915     // As getScalarizationOverhead is a bit too pessimistic, we estimate the
916     // cost for a i64 vector directly here, which is:
917     // - four i64 extracts,
918     // - two i64 inserts, and
919     // - two muls.
920     // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with
921     // LT.first = 2 the cost is 16.
922     return LT.first * 8;
923   case ISD::ADD:
924   case ISD::XOR:
925   case ISD::OR:
926   case ISD::AND:
927     // These nodes are marked as 'custom' for combining purposes only.
928     // We know that they are legal. See LowerAdd in ISelLowering.
929     return (Cost + 1) * LT.first;
930 
931   case ISD::FADD:
932     // These nodes are marked as 'custom' just to lower them to SVE.
933     // We know said lowering will incur no additional cost.
934     if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty())
935       return (Cost + 2) * LT.first;
936 
937     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
938                                                 Opd2Info,
939                                                 Opd1PropInfo, Opd2PropInfo);
940   }
941 }
942 
943 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
944                                               const SCEV *Ptr) {
945   // Address computations in vectorized code with non-consecutive addresses will
946   // likely result in more instructions compared to scalar code where the
947   // computation can more often be merged into the index mode. The resulting
948   // extra micro-ops can significantly decrease throughput.
949   unsigned NumVectorInstToHideOverhead = 10;
950   int MaxMergeDistance = 64;
951 
952   if (Ty->isVectorTy() && SE &&
953       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
954     return NumVectorInstToHideOverhead;
955 
956   // In many cases the address computation is not merged into the instruction
957   // addressing mode.
958   return 1;
959 }
960 
961 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
962                                                    Type *CondTy,
963                                                    CmpInst::Predicate VecPred,
964                                                    TTI::TargetCostKind CostKind,
965                                                    const Instruction *I) {
966   // TODO: Handle other cost kinds.
967   if (CostKind != TTI::TCK_RecipThroughput)
968     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
969                                      I);
970 
971   int ISD = TLI->InstructionOpcodeToISD(Opcode);
972   // We don't lower some vector selects well that are wider than the register
973   // width.
974   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
975     // We would need this many instructions to hide the scalarization happening.
976     const int AmortizationCost = 20;
977 
978     // If VecPred is not set, check if we can get a predicate from the context
979     // instruction, if its type matches the requested ValTy.
980     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
981       CmpInst::Predicate CurrentPred;
982       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
983                             m_Value())))
984         VecPred = CurrentPred;
985     }
986     // Check if we have a compare/select chain that can be lowered using CMxx &
987     // BFI pair.
988     if (CmpInst::isIntPredicate(VecPred)) {
989       static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
990                                           MVT::v8i16, MVT::v2i32, MVT::v4i32,
991                                           MVT::v2i64};
992       auto LT = TLI->getTypeLegalizationCost(DL, ValTy);
993       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
994         return LT.first;
995     }
996 
997     static const TypeConversionCostTblEntry
998     VectorSelectTbl[] = {
999       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
1000       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
1001       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
1002       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
1003       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
1004       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
1005     };
1006 
1007     EVT SelCondTy = TLI->getValueType(DL, CondTy);
1008     EVT SelValTy = TLI->getValueType(DL, ValTy);
1009     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1010       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
1011                                                      SelCondTy.getSimpleVT(),
1012                                                      SelValTy.getSimpleVT()))
1013         return Entry->Cost;
1014     }
1015   }
1016   // The base case handles scalable vectors fine for now, since it treats the
1017   // cost as 1 * legalization cost.
1018   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1019 }
1020 
1021 AArch64TTIImpl::TTI::MemCmpExpansionOptions
1022 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
1023   TTI::MemCmpExpansionOptions Options;
1024   if (ST->requiresStrictAlign()) {
1025     // TODO: Add cost modeling for strict align. Misaligned loads expand to
1026     // a bunch of instructions when strict align is enabled.
1027     return Options;
1028   }
1029   Options.AllowOverlappingLoads = true;
1030   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
1031   Options.NumLoadsPerBlock = Options.MaxNumLoads;
1032   // TODO: Though vector loads usually perform well on AArch64, in some targets
1033   // they may wake up the FP unit, which raises the power consumption.  Perhaps
1034   // they could be used with no holds barred (-O3).
1035   Options.LoadSizes = {8, 4, 2, 1};
1036   return Options;
1037 }
1038 
1039 InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
1040     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1041     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1042 
1043   if (!isa<ScalableVectorType>(DataTy))
1044     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1045                                          Alignment, CostKind, I);
1046   auto *VT = cast<VectorType>(DataTy);
1047   auto LT = TLI->getTypeLegalizationCost(DL, DataTy);
1048   ElementCount LegalVF = LT.second.getVectorElementCount();
1049   Optional<unsigned> MaxNumVScale = getMaxVScale();
1050   assert(MaxNumVScale && "Expected valid max vscale value");
1051 
1052   InstructionCost MemOpCost =
1053       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I);
1054   unsigned MaxNumElementsPerGather =
1055       MaxNumVScale.getValue() * LegalVF.getKnownMinValue();
1056   return LT.first * MaxNumElementsPerGather * MemOpCost;
1057 }
1058 
1059 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
1060   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
1061 }
1062 
1063 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
1064                                                 MaybeAlign Alignment,
1065                                                 unsigned AddressSpace,
1066                                                 TTI::TargetCostKind CostKind,
1067                                                 const Instruction *I) {
1068   // Type legalization can't handle structs
1069   if (TLI->getValueType(DL, Ty,  true) == MVT::Other)
1070     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
1071                                   CostKind);
1072 
1073   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
1074 
1075   // TODO: consider latency as well for TCK_SizeAndLatency.
1076   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1077     return LT.first;
1078 
1079   if (CostKind != TTI::TCK_RecipThroughput)
1080     return 1;
1081 
1082   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
1083       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
1084     // Unaligned stores are extremely inefficient. We don't split all
1085     // unaligned 128-bit stores because the negative impact that has shown in
1086     // practice on inlined block copy code.
1087     // We make such stores expensive so that we will only vectorize if there
1088     // are 6 other instructions getting vectorized.
1089     const int AmortizationCost = 6;
1090 
1091     return LT.first * 2 * AmortizationCost;
1092   }
1093 
1094   if (useNeonVector(Ty) &&
1095       cast<VectorType>(Ty)->getElementType()->isIntegerTy(8)) {
1096     unsigned ProfitableNumElements;
1097     if (Opcode == Instruction::Store)
1098       // We use a custom trunc store lowering so v.4b should be profitable.
1099       ProfitableNumElements = 4;
1100     else
1101       // We scalarize the loads because there is not v.4b register and we
1102       // have to promote the elements to v.2.
1103       ProfitableNumElements = 8;
1104 
1105     if (cast<FixedVectorType>(Ty)->getNumElements() < ProfitableNumElements) {
1106       unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
1107       unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
1108       // We generate 2 instructions per vector element.
1109       return NumVectorizableInstsToAmortize * NumVecElts * 2;
1110     }
1111   }
1112 
1113   return LT.first;
1114 }
1115 
1116 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
1117     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1118     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1119     bool UseMaskForCond, bool UseMaskForGaps) {
1120   assert(Factor >= 2 && "Invalid interleave factor");
1121   auto *VecVTy = cast<FixedVectorType>(VecTy);
1122 
1123   if (!UseMaskForCond && !UseMaskForGaps &&
1124       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
1125     unsigned NumElts = VecVTy->getNumElements();
1126     auto *SubVecTy =
1127         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1128 
1129     // ldN/stN only support legal vector types of size 64 or 128 in bits.
1130     // Accesses having vector types that are a multiple of 128 bits can be
1131     // matched to more than one ldN/stN instruction.
1132     if (NumElts % Factor == 0 &&
1133         TLI->isLegalInterleavedAccessType(SubVecTy, DL))
1134       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1135   }
1136 
1137   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1138                                            Alignment, AddressSpace, CostKind,
1139                                            UseMaskForCond, UseMaskForGaps);
1140 }
1141 
1142 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
1143   InstructionCost Cost = 0;
1144   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1145   for (auto *I : Tys) {
1146     if (!I->isVectorTy())
1147       continue;
1148     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
1149         128)
1150       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
1151               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
1152   }
1153   return *Cost.getValue();
1154 }
1155 
1156 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1157   return ST->getMaxInterleaveFactor();
1158 }
1159 
1160 // For Falkor, we want to avoid having too many strided loads in a loop since
1161 // that can exhaust the HW prefetcher resources.  We adjust the unroller
1162 // MaxCount preference below to attempt to ensure unrolling doesn't create too
1163 // many strided loads.
1164 static void
1165 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1166                               TargetTransformInfo::UnrollingPreferences &UP) {
1167   enum { MaxStridedLoads = 7 };
1168   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
1169     int StridedLoads = 0;
1170     // FIXME? We could make this more precise by looking at the CFG and
1171     // e.g. not counting loads in each side of an if-then-else diamond.
1172     for (const auto BB : L->blocks()) {
1173       for (auto &I : *BB) {
1174         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
1175         if (!LMemI)
1176           continue;
1177 
1178         Value *PtrValue = LMemI->getPointerOperand();
1179         if (L->isLoopInvariant(PtrValue))
1180           continue;
1181 
1182         const SCEV *LSCEV = SE.getSCEV(PtrValue);
1183         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
1184         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
1185           continue;
1186 
1187         // FIXME? We could take pairing of unrolled load copies into account
1188         // by looking at the AddRec, but we would probably have to limit this
1189         // to loops with no stores or other memory optimization barriers.
1190         ++StridedLoads;
1191         // We've seen enough strided loads that seeing more won't make a
1192         // difference.
1193         if (StridedLoads > MaxStridedLoads / 2)
1194           return StridedLoads;
1195       }
1196     }
1197     return StridedLoads;
1198   };
1199 
1200   int StridedLoads = countStridedLoads(L, SE);
1201   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
1202                     << " strided loads\n");
1203   // Pick the largest power of 2 unroll count that won't result in too many
1204   // strided loads.
1205   if (StridedLoads) {
1206     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
1207     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
1208                       << UP.MaxCount << '\n');
1209   }
1210 }
1211 
1212 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1213                                              TTI::UnrollingPreferences &UP) {
1214   // Enable partial unrolling and runtime unrolling.
1215   BaseT::getUnrollingPreferences(L, SE, UP);
1216 
1217   // For inner loop, it is more likely to be a hot one, and the runtime check
1218   // can be promoted out from LICM pass, so the overhead is less, let's try
1219   // a larger threshold to unroll more loops.
1220   if (L->getLoopDepth() > 1)
1221     UP.PartialThreshold *= 2;
1222 
1223   // Disable partial & runtime unrolling on -Os.
1224   UP.PartialOptSizeThreshold = 0;
1225 
1226   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
1227       EnableFalkorHWPFUnrollFix)
1228     getFalkorUnrollingPreferences(L, SE, UP);
1229 }
1230 
1231 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1232                                            TTI::PeelingPreferences &PP) {
1233   BaseT::getPeelingPreferences(L, SE, PP);
1234 }
1235 
1236 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1237                                                          Type *ExpectedType) {
1238   switch (Inst->getIntrinsicID()) {
1239   default:
1240     return nullptr;
1241   case Intrinsic::aarch64_neon_st2:
1242   case Intrinsic::aarch64_neon_st3:
1243   case Intrinsic::aarch64_neon_st4: {
1244     // Create a struct type
1245     StructType *ST = dyn_cast<StructType>(ExpectedType);
1246     if (!ST)
1247       return nullptr;
1248     unsigned NumElts = Inst->getNumArgOperands() - 1;
1249     if (ST->getNumElements() != NumElts)
1250       return nullptr;
1251     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1252       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
1253         return nullptr;
1254     }
1255     Value *Res = UndefValue::get(ExpectedType);
1256     IRBuilder<> Builder(Inst);
1257     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1258       Value *L = Inst->getArgOperand(i);
1259       Res = Builder.CreateInsertValue(Res, L, i);
1260     }
1261     return Res;
1262   }
1263   case Intrinsic::aarch64_neon_ld2:
1264   case Intrinsic::aarch64_neon_ld3:
1265   case Intrinsic::aarch64_neon_ld4:
1266     if (Inst->getType() == ExpectedType)
1267       return Inst;
1268     return nullptr;
1269   }
1270 }
1271 
1272 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1273                                         MemIntrinsicInfo &Info) {
1274   switch (Inst->getIntrinsicID()) {
1275   default:
1276     break;
1277   case Intrinsic::aarch64_neon_ld2:
1278   case Intrinsic::aarch64_neon_ld3:
1279   case Intrinsic::aarch64_neon_ld4:
1280     Info.ReadMem = true;
1281     Info.WriteMem = false;
1282     Info.PtrVal = Inst->getArgOperand(0);
1283     break;
1284   case Intrinsic::aarch64_neon_st2:
1285   case Intrinsic::aarch64_neon_st3:
1286   case Intrinsic::aarch64_neon_st4:
1287     Info.ReadMem = false;
1288     Info.WriteMem = true;
1289     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
1290     break;
1291   }
1292 
1293   switch (Inst->getIntrinsicID()) {
1294   default:
1295     return false;
1296   case Intrinsic::aarch64_neon_ld2:
1297   case Intrinsic::aarch64_neon_st2:
1298     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
1299     break;
1300   case Intrinsic::aarch64_neon_ld3:
1301   case Intrinsic::aarch64_neon_st3:
1302     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
1303     break;
1304   case Intrinsic::aarch64_neon_ld4:
1305   case Intrinsic::aarch64_neon_st4:
1306     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
1307     break;
1308   }
1309   return true;
1310 }
1311 
1312 /// See if \p I should be considered for address type promotion. We check if \p
1313 /// I is a sext with right type and used in memory accesses. If it used in a
1314 /// "complex" getelementptr, we allow it to be promoted without finding other
1315 /// sext instructions that sign extended the same initial value. A getelementptr
1316 /// is considered as "complex" if it has more than 2 operands.
1317 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
1318     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
1319   bool Considerable = false;
1320   AllowPromotionWithoutCommonHeader = false;
1321   if (!isa<SExtInst>(&I))
1322     return false;
1323   Type *ConsideredSExtType =
1324       Type::getInt64Ty(I.getParent()->getParent()->getContext());
1325   if (I.getType() != ConsideredSExtType)
1326     return false;
1327   // See if the sext is the one with the right type and used in at least one
1328   // GetElementPtrInst.
1329   for (const User *U : I.users()) {
1330     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
1331       Considerable = true;
1332       // A getelementptr is considered as "complex" if it has more than 2
1333       // operands. We will promote a SExt used in such complex GEP as we
1334       // expect some computation to be merged if they are done on 64 bits.
1335       if (GEPInst->getNumOperands() > 2) {
1336         AllowPromotionWithoutCommonHeader = true;
1337         break;
1338       }
1339     }
1340   }
1341   return Considerable;
1342 }
1343 
1344 bool AArch64TTIImpl::isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,
1345                                                  ElementCount VF) const {
1346   if (!VF.isScalable())
1347     return true;
1348 
1349   Type *Ty = RdxDesc.getRecurrenceType();
1350   if (Ty->isBFloatTy() || !isLegalElementTypeForSVE(Ty))
1351     return false;
1352 
1353   switch (RdxDesc.getRecurrenceKind()) {
1354   case RecurKind::Add:
1355   case RecurKind::FAdd:
1356   case RecurKind::And:
1357   case RecurKind::Or:
1358   case RecurKind::Xor:
1359   case RecurKind::SMin:
1360   case RecurKind::SMax:
1361   case RecurKind::UMin:
1362   case RecurKind::UMax:
1363   case RecurKind::FMin:
1364   case RecurKind::FMax:
1365     return true;
1366   default:
1367     return false;
1368   }
1369 }
1370 
1371 InstructionCost
1372 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1373                                        bool IsPairwise, bool IsUnsigned,
1374                                        TTI::TargetCostKind CostKind) {
1375   if (!isa<ScalableVectorType>(Ty))
1376     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned,
1377                                          CostKind);
1378   assert((isa<ScalableVectorType>(Ty) && isa<ScalableVectorType>(CondTy)) &&
1379          "Both vector needs to be scalable");
1380 
1381   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1382   InstructionCost LegalizationCost = 0;
1383   if (LT.first > 1) {
1384     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
1385     unsigned CmpOpcode =
1386         Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp;
1387     LegalizationCost =
1388         getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy,
1389                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
1390         getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy,
1391                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
1392     LegalizationCost *= LT.first - 1;
1393   }
1394 
1395   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
1396 }
1397 
1398 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
1399     unsigned Opcode, VectorType *ValTy, bool IsPairwise,
1400     TTI::TargetCostKind CostKind) {
1401   assert(!IsPairwise && "Cannot be pair wise to continue");
1402 
1403   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1404   InstructionCost LegalizationCost = 0;
1405   if (LT.first > 1) {
1406     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
1407     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
1408     LegalizationCost *= LT.first - 1;
1409   }
1410 
1411   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1412   assert(ISD && "Invalid opcode");
1413   // Add the final reduction cost for the legal horizontal reduction
1414   switch (ISD) {
1415   case ISD::ADD:
1416   case ISD::AND:
1417   case ISD::OR:
1418   case ISD::XOR:
1419   case ISD::FADD:
1420     return LegalizationCost + 2;
1421   default:
1422     return InstructionCost::getInvalid();
1423   }
1424 }
1425 
1426 InstructionCost
1427 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1428                                            bool IsPairwiseForm,
1429                                            TTI::TargetCostKind CostKind) {
1430 
1431   if (isa<ScalableVectorType>(ValTy))
1432     return getArithmeticReductionCostSVE(Opcode, ValTy, IsPairwiseForm,
1433                                          CostKind);
1434   if (IsPairwiseForm)
1435     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1436                                              CostKind);
1437 
1438   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1439   MVT MTy = LT.second;
1440   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1441   assert(ISD && "Invalid opcode");
1442 
1443   // Horizontal adds can use the 'addv' instruction. We model the cost of these
1444   // instructions as normal vector adds. This is the only arithmetic vector
1445   // reduction operation for which we have an instruction.
1446   static const CostTblEntry CostTblNoPairwise[]{
1447       {ISD::ADD, MVT::v8i8,  1},
1448       {ISD::ADD, MVT::v16i8, 1},
1449       {ISD::ADD, MVT::v4i16, 1},
1450       {ISD::ADD, MVT::v8i16, 1},
1451       {ISD::ADD, MVT::v4i32, 1},
1452   };
1453 
1454   if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
1455     return LT.first * Entry->Cost;
1456 
1457   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1458                                            CostKind);
1459 }
1460 
1461 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1462                                                VectorType *Tp,
1463                                                ArrayRef<int> Mask, int Index,
1464                                                VectorType *SubTp) {
1465   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
1466       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
1467       Kind == TTI::SK_Reverse) {
1468     static const CostTblEntry ShuffleTbl[] = {
1469       // Broadcast shuffle kinds can be performed with 'dup'.
1470       { TTI::SK_Broadcast, MVT::v8i8,  1 },
1471       { TTI::SK_Broadcast, MVT::v16i8, 1 },
1472       { TTI::SK_Broadcast, MVT::v4i16, 1 },
1473       { TTI::SK_Broadcast, MVT::v8i16, 1 },
1474       { TTI::SK_Broadcast, MVT::v2i32, 1 },
1475       { TTI::SK_Broadcast, MVT::v4i32, 1 },
1476       { TTI::SK_Broadcast, MVT::v2i64, 1 },
1477       { TTI::SK_Broadcast, MVT::v2f32, 1 },
1478       { TTI::SK_Broadcast, MVT::v4f32, 1 },
1479       { TTI::SK_Broadcast, MVT::v2f64, 1 },
1480       // Transpose shuffle kinds can be performed with 'trn1/trn2' and
1481       // 'zip1/zip2' instructions.
1482       { TTI::SK_Transpose, MVT::v8i8,  1 },
1483       { TTI::SK_Transpose, MVT::v16i8, 1 },
1484       { TTI::SK_Transpose, MVT::v4i16, 1 },
1485       { TTI::SK_Transpose, MVT::v8i16, 1 },
1486       { TTI::SK_Transpose, MVT::v2i32, 1 },
1487       { TTI::SK_Transpose, MVT::v4i32, 1 },
1488       { TTI::SK_Transpose, MVT::v2i64, 1 },
1489       { TTI::SK_Transpose, MVT::v2f32, 1 },
1490       { TTI::SK_Transpose, MVT::v4f32, 1 },
1491       { TTI::SK_Transpose, MVT::v2f64, 1 },
1492       // Select shuffle kinds.
1493       // TODO: handle vXi8/vXi16.
1494       { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
1495       { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
1496       { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
1497       { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
1498       { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
1499       { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
1500       // PermuteSingleSrc shuffle kinds.
1501       // TODO: handle vXi8/vXi16.
1502       { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
1503       { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
1504       { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
1505       { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
1506       { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
1507       { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
1508       // Reverse can be lowered with `rev`.
1509       { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov.
1510       { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT
1511       { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov.
1512       { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov.
1513       { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT
1514       { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov.
1515       // Broadcast shuffle kinds for scalable vectors
1516       { TTI::SK_Broadcast, MVT::nxv16i8,  1 },
1517       { TTI::SK_Broadcast, MVT::nxv8i16,  1 },
1518       { TTI::SK_Broadcast, MVT::nxv4i32,  1 },
1519       { TTI::SK_Broadcast, MVT::nxv2i64,  1 },
1520       { TTI::SK_Broadcast, MVT::nxv8f16,  1 },
1521       { TTI::SK_Broadcast, MVT::nxv8bf16, 1 },
1522       { TTI::SK_Broadcast, MVT::nxv4f32,  1 },
1523       { TTI::SK_Broadcast, MVT::nxv2f64,  1 },
1524       // Handle the cases for vector.reverse with scalable vectors
1525       { TTI::SK_Reverse, MVT::nxv16i8,  1 },
1526       { TTI::SK_Reverse, MVT::nxv8i16,  1 },
1527       { TTI::SK_Reverse, MVT::nxv4i32,  1 },
1528       { TTI::SK_Reverse, MVT::nxv2i64,  1 },
1529       { TTI::SK_Reverse, MVT::nxv8f16,  1 },
1530       { TTI::SK_Reverse, MVT::nxv8bf16, 1 },
1531       { TTI::SK_Reverse, MVT::nxv4f32,  1 },
1532       { TTI::SK_Reverse, MVT::nxv2f64,  1 },
1533       { TTI::SK_Reverse, MVT::nxv16i1,  1 },
1534       { TTI::SK_Reverse, MVT::nxv8i1,   1 },
1535       { TTI::SK_Reverse, MVT::nxv4i1,   1 },
1536       { TTI::SK_Reverse, MVT::nxv2i1,   1 },
1537     };
1538     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1539     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
1540       return LT.first * Entry->Cost;
1541   }
1542 
1543   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
1544 }
1545