1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/IVDescriptors.h"
13 #include "llvm/Analysis/LoopInfo.h"
14 #include "llvm/Analysis/TargetTransformInfo.h"
15 #include "llvm/CodeGen/BasicTTIImpl.h"
16 #include "llvm/CodeGen/CostTable.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/IntrinsicsAArch64.h"
21 #include "llvm/IR/PatternMatch.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Transforms/InstCombine/InstCombiner.h"
24 #include <algorithm>
25 using namespace llvm;
26 using namespace llvm::PatternMatch;
27 
28 #define DEBUG_TYPE "aarch64tti"
29 
30 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
31                                                cl::init(true), cl::Hidden);
32 
33 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
34                                          const Function *Callee) const {
35   const TargetMachine &TM = getTLI()->getTargetMachine();
36 
37   const FeatureBitset &CallerBits =
38       TM.getSubtargetImpl(*Caller)->getFeatureBits();
39   const FeatureBitset &CalleeBits =
40       TM.getSubtargetImpl(*Callee)->getFeatureBits();
41 
42   // Inline a callee if its target-features are a subset of the callers
43   // target-features.
44   return (CallerBits & CalleeBits) == CalleeBits;
45 }
46 
47 /// Calculate the cost of materializing a 64-bit value. This helper
48 /// method might only calculate a fraction of a larger immediate. Therefore it
49 /// is valid to return a cost of ZERO.
50 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
51   // Check if the immediate can be encoded within an instruction.
52   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
53     return 0;
54 
55   if (Val < 0)
56     Val = ~Val;
57 
58   // Calculate how many moves we will need to materialize this constant.
59   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
60   AArch64_IMM::expandMOVImm(Val, 64, Insn);
61   return Insn.size();
62 }
63 
64 /// Calculate the cost of materializing the given constant.
65 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
66                                               TTI::TargetCostKind CostKind) {
67   assert(Ty->isIntegerTy());
68 
69   unsigned BitSize = Ty->getPrimitiveSizeInBits();
70   if (BitSize == 0)
71     return ~0U;
72 
73   // Sign-extend all constants to a multiple of 64-bit.
74   APInt ImmVal = Imm;
75   if (BitSize & 0x3f)
76     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
77 
78   // Split the constant into 64-bit chunks and calculate the cost for each
79   // chunk.
80   InstructionCost Cost = 0;
81   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
82     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
83     int64_t Val = Tmp.getSExtValue();
84     Cost += getIntImmCost(Val);
85   }
86   // We need at least one instruction to materialze the constant.
87   return std::max<InstructionCost>(1, Cost);
88 }
89 
90 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
91                                                   const APInt &Imm, Type *Ty,
92                                                   TTI::TargetCostKind CostKind,
93                                                   Instruction *Inst) {
94   assert(Ty->isIntegerTy());
95 
96   unsigned BitSize = Ty->getPrimitiveSizeInBits();
97   // There is no cost model for constants with a bit size of 0. Return TCC_Free
98   // here, so that constant hoisting will ignore this constant.
99   if (BitSize == 0)
100     return TTI::TCC_Free;
101 
102   unsigned ImmIdx = ~0U;
103   switch (Opcode) {
104   default:
105     return TTI::TCC_Free;
106   case Instruction::GetElementPtr:
107     // Always hoist the base address of a GetElementPtr.
108     if (Idx == 0)
109       return 2 * TTI::TCC_Basic;
110     return TTI::TCC_Free;
111   case Instruction::Store:
112     ImmIdx = 0;
113     break;
114   case Instruction::Add:
115   case Instruction::Sub:
116   case Instruction::Mul:
117   case Instruction::UDiv:
118   case Instruction::SDiv:
119   case Instruction::URem:
120   case Instruction::SRem:
121   case Instruction::And:
122   case Instruction::Or:
123   case Instruction::Xor:
124   case Instruction::ICmp:
125     ImmIdx = 1;
126     break;
127   // Always return TCC_Free for the shift value of a shift instruction.
128   case Instruction::Shl:
129   case Instruction::LShr:
130   case Instruction::AShr:
131     if (Idx == 1)
132       return TTI::TCC_Free;
133     break;
134   case Instruction::Trunc:
135   case Instruction::ZExt:
136   case Instruction::SExt:
137   case Instruction::IntToPtr:
138   case Instruction::PtrToInt:
139   case Instruction::BitCast:
140   case Instruction::PHI:
141   case Instruction::Call:
142   case Instruction::Select:
143   case Instruction::Ret:
144   case Instruction::Load:
145     break;
146   }
147 
148   if (Idx == ImmIdx) {
149     int NumConstants = (BitSize + 63) / 64;
150     InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
151     return (Cost <= NumConstants * TTI::TCC_Basic)
152                ? static_cast<int>(TTI::TCC_Free)
153                : Cost;
154   }
155   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
156 }
157 
158 InstructionCost
159 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
160                                     const APInt &Imm, Type *Ty,
161                                     TTI::TargetCostKind CostKind) {
162   assert(Ty->isIntegerTy());
163 
164   unsigned BitSize = Ty->getPrimitiveSizeInBits();
165   // There is no cost model for constants with a bit size of 0. Return TCC_Free
166   // here, so that constant hoisting will ignore this constant.
167   if (BitSize == 0)
168     return TTI::TCC_Free;
169 
170   // Most (all?) AArch64 intrinsics do not support folding immediates into the
171   // selected instruction, so we compute the materialization cost for the
172   // immediate directly.
173   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
174     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
175 
176   switch (IID) {
177   default:
178     return TTI::TCC_Free;
179   case Intrinsic::sadd_with_overflow:
180   case Intrinsic::uadd_with_overflow:
181   case Intrinsic::ssub_with_overflow:
182   case Intrinsic::usub_with_overflow:
183   case Intrinsic::smul_with_overflow:
184   case Intrinsic::umul_with_overflow:
185     if (Idx == 1) {
186       int NumConstants = (BitSize + 63) / 64;
187       InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
188       return (Cost <= NumConstants * TTI::TCC_Basic)
189                  ? static_cast<int>(TTI::TCC_Free)
190                  : Cost;
191     }
192     break;
193   case Intrinsic::experimental_stackmap:
194     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
195       return TTI::TCC_Free;
196     break;
197   case Intrinsic::experimental_patchpoint_void:
198   case Intrinsic::experimental_patchpoint_i64:
199     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
200       return TTI::TCC_Free;
201     break;
202   case Intrinsic::experimental_gc_statepoint:
203     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
204       return TTI::TCC_Free;
205     break;
206   }
207   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
208 }
209 
210 TargetTransformInfo::PopcntSupportKind
211 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
212   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
213   if (TyWidth == 32 || TyWidth == 64)
214     return TTI::PSK_FastHardware;
215   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
216   return TTI::PSK_Software;
217 }
218 
219 InstructionCost
220 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
221                                       TTI::TargetCostKind CostKind) {
222   auto *RetTy = ICA.getReturnType();
223   switch (ICA.getID()) {
224   case Intrinsic::umin:
225   case Intrinsic::umax:
226   case Intrinsic::smin:
227   case Intrinsic::smax: {
228     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
229                                         MVT::v8i16, MVT::v2i32, MVT::v4i32};
230     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
231     // v2i64 types get converted to cmp+bif hence the cost of 2
232     if (LT.second == MVT::v2i64)
233       return LT.first * 2;
234     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
235       return LT.first;
236     break;
237   }
238   case Intrinsic::sadd_sat:
239   case Intrinsic::ssub_sat:
240   case Intrinsic::uadd_sat:
241   case Intrinsic::usub_sat: {
242     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
243                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
244                                      MVT::v2i64};
245     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
246     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
247     // need to extend the type, as it uses shr(qadd(shl, shl)).
248     unsigned Instrs =
249         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
250     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
251       return LT.first * Instrs;
252     break;
253   }
254   case Intrinsic::abs: {
255     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
256                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
257                                      MVT::v2i64};
258     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
259     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
260       return LT.first;
261     break;
262   }
263   case Intrinsic::experimental_stepvector: {
264     InstructionCost Cost = 1; // Cost of the `index' instruction
265     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
266     // Legalisation of illegal vectors involves an `index' instruction plus
267     // (LT.first - 1) vector adds.
268     if (LT.first > 1) {
269       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
270       InstructionCost AddCost =
271           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
272       Cost += AddCost * (LT.first - 1);
273     }
274     return Cost;
275   }
276   case Intrinsic::bitreverse: {
277     static const CostTblEntry BitreverseTbl[] = {
278         {Intrinsic::bitreverse, MVT::i32, 1},
279         {Intrinsic::bitreverse, MVT::i64, 1},
280         {Intrinsic::bitreverse, MVT::v8i8, 1},
281         {Intrinsic::bitreverse, MVT::v16i8, 1},
282         {Intrinsic::bitreverse, MVT::v4i16, 2},
283         {Intrinsic::bitreverse, MVT::v8i16, 2},
284         {Intrinsic::bitreverse, MVT::v2i32, 2},
285         {Intrinsic::bitreverse, MVT::v4i32, 2},
286         {Intrinsic::bitreverse, MVT::v1i64, 2},
287         {Intrinsic::bitreverse, MVT::v2i64, 2},
288     };
289     const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy);
290     const auto *Entry =
291         CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
292     if (Entry) {
293       // Cost Model is using the legal type(i32) that i8 and i16 will be
294       // converted to +1 so that we match the actual lowering cost
295       if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
296           TLI->getValueType(DL, RetTy, true) == MVT::i16)
297         return LegalisationCost.first * Entry->Cost + 1;
298 
299       return LegalisationCost.first * Entry->Cost;
300     }
301     break;
302   }
303   case Intrinsic::ctpop: {
304     static const CostTblEntry CtpopCostTbl[] = {
305         {ISD::CTPOP, MVT::v2i64, 4},
306         {ISD::CTPOP, MVT::v4i32, 3},
307         {ISD::CTPOP, MVT::v8i16, 2},
308         {ISD::CTPOP, MVT::v16i8, 1},
309         {ISD::CTPOP, MVT::i64,   4},
310         {ISD::CTPOP, MVT::v2i32, 3},
311         {ISD::CTPOP, MVT::v4i16, 2},
312         {ISD::CTPOP, MVT::v8i8,  1},
313         {ISD::CTPOP, MVT::i32,   5},
314     };
315     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
316     MVT MTy = LT.second;
317     if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
318       // Extra cost of +1 when illegal vector types are legalized by promoting
319       // the integer type.
320       int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
321                                             RetTy->getScalarSizeInBits()
322                           ? 1
323                           : 0;
324       return LT.first * Entry->Cost + ExtraCost;
325     }
326     break;
327   }
328   default:
329     break;
330   }
331   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
332 }
333 
334 /// The function will remove redundant reinterprets casting in the presence
335 /// of the control flow
336 static Optional<Instruction *> processPhiNode(InstCombiner &IC,
337                                               IntrinsicInst &II) {
338   SmallVector<Instruction *, 32> Worklist;
339   auto RequiredType = II.getType();
340 
341   auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
342   assert(PN && "Expected Phi Node!");
343 
344   // Don't create a new Phi unless we can remove the old one.
345   if (!PN->hasOneUse())
346     return None;
347 
348   for (Value *IncValPhi : PN->incoming_values()) {
349     auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
350     if (!Reinterpret ||
351         Reinterpret->getIntrinsicID() !=
352             Intrinsic::aarch64_sve_convert_to_svbool ||
353         RequiredType != Reinterpret->getArgOperand(0)->getType())
354       return None;
355   }
356 
357   // Create the new Phi
358   LLVMContext &Ctx = PN->getContext();
359   IRBuilder<> Builder(Ctx);
360   Builder.SetInsertPoint(PN);
361   PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
362   Worklist.push_back(PN);
363 
364   for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
365     auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
366     NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
367     Worklist.push_back(Reinterpret);
368   }
369 
370   // Cleanup Phi Node and reinterprets
371   return IC.replaceInstUsesWith(II, NPN);
372 }
373 
374 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC,
375                                                             IntrinsicInst &II) {
376   // If the reinterpret instruction operand is a PHI Node
377   if (isa<PHINode>(II.getArgOperand(0)))
378     return processPhiNode(IC, II);
379 
380   SmallVector<Instruction *, 32> CandidatesForRemoval;
381   Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
382 
383   const auto *IVTy = cast<VectorType>(II.getType());
384 
385   // Walk the chain of conversions.
386   while (Cursor) {
387     // If the type of the cursor has fewer lanes than the final result, zeroing
388     // must take place, which breaks the equivalence chain.
389     const auto *CursorVTy = cast<VectorType>(Cursor->getType());
390     if (CursorVTy->getElementCount().getKnownMinValue() <
391         IVTy->getElementCount().getKnownMinValue())
392       break;
393 
394     // If the cursor has the same type as I, it is a viable replacement.
395     if (Cursor->getType() == IVTy)
396       EarliestReplacement = Cursor;
397 
398     auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
399 
400     // If this is not an SVE conversion intrinsic, this is the end of the chain.
401     if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
402                                   Intrinsic::aarch64_sve_convert_to_svbool ||
403                               IntrinsicCursor->getIntrinsicID() ==
404                                   Intrinsic::aarch64_sve_convert_from_svbool))
405       break;
406 
407     CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
408     Cursor = IntrinsicCursor->getOperand(0);
409   }
410 
411   // If no viable replacement in the conversion chain was found, there is
412   // nothing to do.
413   if (!EarliestReplacement)
414     return None;
415 
416   return IC.replaceInstUsesWith(II, EarliestReplacement);
417 }
418 
419 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
420                                                  IntrinsicInst &II) {
421   IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
422   if (!Pg)
423     return None;
424 
425   if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
426     return None;
427 
428   const auto PTruePattern =
429       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
430   if (PTruePattern != AArch64SVEPredPattern::vl1)
431     return None;
432 
433   // The intrinsic is inserting into lane zero so use an insert instead.
434   auto *IdxTy = Type::getInt64Ty(II.getContext());
435   auto *Insert = InsertElementInst::Create(
436       II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
437   Insert->insertBefore(&II);
438   Insert->takeName(&II);
439 
440   return IC.replaceInstUsesWith(II, Insert);
441 }
442 
443 static Optional<Instruction *> instCombineSVEDupX(InstCombiner &IC,
444                                                   IntrinsicInst &II) {
445   // Replace DupX with a regular IR splat.
446   IRBuilder<> Builder(II.getContext());
447   Builder.SetInsertPoint(&II);
448   auto *RetTy = cast<ScalableVectorType>(II.getType());
449   Value *Splat =
450       Builder.CreateVectorSplat(RetTy->getElementCount(), II.getArgOperand(0));
451   Splat->takeName(&II);
452   return IC.replaceInstUsesWith(II, Splat);
453 }
454 
455 static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
456                                                    IntrinsicInst &II) {
457   LLVMContext &Ctx = II.getContext();
458   IRBuilder<> Builder(Ctx);
459   Builder.SetInsertPoint(&II);
460 
461   // Check that the predicate is all active
462   auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
463   if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
464     return None;
465 
466   const auto PTruePattern =
467       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
468   if (PTruePattern != AArch64SVEPredPattern::all)
469     return None;
470 
471   // Check that we have a compare of zero..
472   auto *SplatValue =
473       dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2)));
474   if (!SplatValue || !SplatValue->isZero())
475     return None;
476 
477   // ..against a dupq
478   auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
479   if (!DupQLane ||
480       DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
481     return None;
482 
483   // Where the dupq is a lane 0 replicate of a vector insert
484   if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
485     return None;
486 
487   auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
488   if (!VecIns ||
489       VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert)
490     return None;
491 
492   // Where the vector insert is a fixed constant vector insert into undef at
493   // index zero
494   if (!isa<UndefValue>(VecIns->getArgOperand(0)))
495     return None;
496 
497   if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
498     return None;
499 
500   auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
501   if (!ConstVec)
502     return None;
503 
504   auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
505   auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
506   if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
507     return None;
508 
509   unsigned NumElts = VecTy->getNumElements();
510   unsigned PredicateBits = 0;
511 
512   // Expand intrinsic operands to a 16-bit byte level predicate
513   for (unsigned I = 0; I < NumElts; ++I) {
514     auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
515     if (!Arg)
516       return None;
517     if (!Arg->isZero())
518       PredicateBits |= 1 << (I * (16 / NumElts));
519   }
520 
521   // If all bits are zero bail early with an empty predicate
522   if (PredicateBits == 0) {
523     auto *PFalse = Constant::getNullValue(II.getType());
524     PFalse->takeName(&II);
525     return IC.replaceInstUsesWith(II, PFalse);
526   }
527 
528   // Calculate largest predicate type used (where byte predicate is largest)
529   unsigned Mask = 8;
530   for (unsigned I = 0; I < 16; ++I)
531     if ((PredicateBits & (1 << I)) != 0)
532       Mask |= (I % 8);
533 
534   unsigned PredSize = Mask & -Mask;
535   auto *PredType = ScalableVectorType::get(
536       Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
537 
538   // Ensure all relevant bits are set
539   for (unsigned I = 0; I < 16; I += PredSize)
540     if ((PredicateBits & (1 << I)) == 0)
541       return None;
542 
543   auto *PTruePat =
544       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
545   auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
546                                         {PredType}, {PTruePat});
547   auto *ConvertToSVBool = Builder.CreateIntrinsic(
548       Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
549   auto *ConvertFromSVBool =
550       Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
551                               {II.getType()}, {ConvertToSVBool});
552 
553   ConvertFromSVBool->takeName(&II);
554   return IC.replaceInstUsesWith(II, ConvertFromSVBool);
555 }
556 
557 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC,
558                                                   IntrinsicInst &II) {
559   IRBuilder<> Builder(II.getContext());
560   Builder.SetInsertPoint(&II);
561   Value *Pg = II.getArgOperand(0);
562   Value *Vec = II.getArgOperand(1);
563   auto IntrinsicID = II.getIntrinsicID();
564   bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta;
565 
566   // lastX(splat(X)) --> X
567   if (auto *SplatVal = getSplatValue(Vec))
568     return IC.replaceInstUsesWith(II, SplatVal);
569 
570   // If x and/or y is a splat value then:
571   // lastX (binop (x, y)) --> binop(lastX(x), lastX(y))
572   Value *LHS, *RHS;
573   if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) {
574     if (isSplatValue(LHS) || isSplatValue(RHS)) {
575       auto *OldBinOp = cast<BinaryOperator>(Vec);
576       auto OpC = OldBinOp->getOpcode();
577       auto *NewLHS =
578           Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS});
579       auto *NewRHS =
580           Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS});
581       auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags(
582           OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II);
583       return IC.replaceInstUsesWith(II, NewBinOp);
584     }
585   }
586 
587   auto *C = dyn_cast<Constant>(Pg);
588   if (IsAfter && C && C->isNullValue()) {
589     // The intrinsic is extracting lane 0 so use an extract instead.
590     auto *IdxTy = Type::getInt64Ty(II.getContext());
591     auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
592     Extract->insertBefore(&II);
593     Extract->takeName(&II);
594     return IC.replaceInstUsesWith(II, Extract);
595   }
596 
597   auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
598   if (!IntrPG)
599     return None;
600 
601   if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
602     return None;
603 
604   const auto PTruePattern =
605       cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
606 
607   // Can the intrinsic's predicate be converted to a known constant index?
608   unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern);
609   if (!MinNumElts)
610     return None;
611 
612   unsigned Idx = MinNumElts - 1;
613   // Increment the index if extracting the element after the last active
614   // predicate element.
615   if (IsAfter)
616     ++Idx;
617 
618   // Ignore extracts whose index is larger than the known minimum vector
619   // length. NOTE: This is an artificial constraint where we prefer to
620   // maintain what the user asked for until an alternative is proven faster.
621   auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
622   if (Idx >= PgVTy->getMinNumElements())
623     return None;
624 
625   // The intrinsic is extracting a fixed lane so use an extract instead.
626   auto *IdxTy = Type::getInt64Ty(II.getContext());
627   auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
628   Extract->insertBefore(&II);
629   Extract->takeName(&II);
630   return IC.replaceInstUsesWith(II, Extract);
631 }
632 
633 static Optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
634                                                 IntrinsicInst &II) {
635   LLVMContext &Ctx = II.getContext();
636   IRBuilder<> Builder(Ctx);
637   Builder.SetInsertPoint(&II);
638   // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
639   // can work with RDFFR_PP for ptest elimination.
640   auto *AllPat =
641       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
642   auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
643                                         {II.getType()}, {AllPat});
644   auto *RDFFR =
645       Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue});
646   RDFFR->takeName(&II);
647   return IC.replaceInstUsesWith(II, RDFFR);
648 }
649 
650 static Optional<Instruction *>
651 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) {
652   const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
653 
654   if (Pattern == AArch64SVEPredPattern::all) {
655     LLVMContext &Ctx = II.getContext();
656     IRBuilder<> Builder(Ctx);
657     Builder.SetInsertPoint(&II);
658 
659     Constant *StepVal = ConstantInt::get(II.getType(), NumElts);
660     auto *VScale = Builder.CreateVScale(StepVal);
661     VScale->takeName(&II);
662     return IC.replaceInstUsesWith(II, VScale);
663   }
664 
665   unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern);
666 
667   return MinNumElts && NumElts >= MinNumElts
668              ? Optional<Instruction *>(IC.replaceInstUsesWith(
669                    II, ConstantInt::get(II.getType(), MinNumElts)))
670              : None;
671 }
672 
673 static Optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
674                                                    IntrinsicInst &II) {
675   IntrinsicInst *Op1 = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
676   IntrinsicInst *Op2 = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
677 
678   if (Op1 && Op2 &&
679       Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
680       Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
681       Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) {
682 
683     IRBuilder<> Builder(II.getContext());
684     Builder.SetInsertPoint(&II);
685 
686     Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)};
687     Type *Tys[] = {Op1->getArgOperand(0)->getType()};
688 
689     auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
690 
691     PTest->takeName(&II);
692     return IC.replaceInstUsesWith(II, PTest);
693   }
694 
695   return None;
696 }
697 
698 static Optional<Instruction *> instCombineSVEVectorFMLA(InstCombiner &IC,
699                                                         IntrinsicInst &II) {
700   // fold (fadd p a (fmul p b c)) -> (fma p a b c)
701   Value *P = II.getOperand(0);
702   Value *A = II.getOperand(1);
703   auto FMul = II.getOperand(2);
704   Value *B, *C;
705   if (!match(FMul, m_Intrinsic<Intrinsic::aarch64_sve_fmul>(
706                        m_Specific(P), m_Value(B), m_Value(C))))
707     return None;
708 
709   if (!FMul->hasOneUse())
710     return None;
711 
712   llvm::FastMathFlags FAddFlags = II.getFastMathFlags();
713   // Stop the combine when the flags on the inputs differ in case dropping flags
714   // would lead to us missing out on more beneficial optimizations.
715   if (FAddFlags != cast<CallInst>(FMul)->getFastMathFlags())
716     return None;
717   if (!FAddFlags.allowContract())
718     return None;
719 
720   IRBuilder<> Builder(II.getContext());
721   Builder.SetInsertPoint(&II);
722   auto FMLA = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fmla,
723                                       {II.getType()}, {P, A, B, C}, &II);
724   FMLA->setFastMathFlags(FAddFlags);
725   return IC.replaceInstUsesWith(II, FMLA);
726 }
727 
728 static Optional<Instruction *>
729 instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
730   IRBuilder<> Builder(II.getContext());
731   Builder.SetInsertPoint(&II);
732 
733   Value *Pred = II.getOperand(0);
734   Value *PtrOp = II.getOperand(1);
735   Type *VecTy = II.getType();
736   Value *VecPtr = Builder.CreateBitCast(PtrOp, VecTy->getPointerTo());
737 
738   if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
739                       m_ConstantInt<AArch64SVEPredPattern::all>()))) {
740     LoadInst *Load = Builder.CreateLoad(VecTy, VecPtr);
741     return IC.replaceInstUsesWith(II, Load);
742   }
743 
744   CallInst *MaskedLoad =
745       Builder.CreateMaskedLoad(VecTy, VecPtr, PtrOp->getPointerAlignment(DL),
746                                Pred, ConstantAggregateZero::get(VecTy));
747   return IC.replaceInstUsesWith(II, MaskedLoad);
748 }
749 
750 static Optional<Instruction *>
751 instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
752   IRBuilder<> Builder(II.getContext());
753   Builder.SetInsertPoint(&II);
754 
755   Value *VecOp = II.getOperand(0);
756   Value *Pred = II.getOperand(1);
757   Value *PtrOp = II.getOperand(2);
758   Value *VecPtr =
759       Builder.CreateBitCast(PtrOp, VecOp->getType()->getPointerTo());
760 
761   if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
762                       m_ConstantInt<AArch64SVEPredPattern::all>()))) {
763     Builder.CreateStore(VecOp, VecPtr);
764     return IC.eraseInstFromFunction(II);
765   }
766 
767   Builder.CreateMaskedStore(VecOp, VecPtr, PtrOp->getPointerAlignment(DL),
768                             Pred);
769   return IC.eraseInstFromFunction(II);
770 }
771 
772 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) {
773   switch (Intrinsic) {
774   case Intrinsic::aarch64_sve_fmul:
775     return Instruction::BinaryOps::FMul;
776   case Intrinsic::aarch64_sve_fadd:
777     return Instruction::BinaryOps::FAdd;
778   case Intrinsic::aarch64_sve_fsub:
779     return Instruction::BinaryOps::FSub;
780   default:
781     return Instruction::BinaryOpsEnd;
782   }
783 }
784 
785 static Optional<Instruction *> instCombineSVEVectorBinOp(InstCombiner &IC,
786                                                          IntrinsicInst &II) {
787   auto *OpPredicate = II.getOperand(0);
788   auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID());
789   if (BinOpCode == Instruction::BinaryOpsEnd ||
790       !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
791                               m_ConstantInt<AArch64SVEPredPattern::all>())))
792     return None;
793   IRBuilder<> Builder(II.getContext());
794   Builder.SetInsertPoint(&II);
795   Builder.setFastMathFlags(II.getFastMathFlags());
796   auto BinOp =
797       Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2));
798   return IC.replaceInstUsesWith(II, BinOp);
799 }
800 
801 static Optional<Instruction *> instCombineSVEVectorFAdd(InstCombiner &IC,
802                                                         IntrinsicInst &II) {
803   if (auto FMLA = instCombineSVEVectorFMLA(IC, II))
804     return FMLA;
805   return instCombineSVEVectorBinOp(IC, II);
806 }
807 
808 static Optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC,
809                                                        IntrinsicInst &II) {
810   auto *OpPredicate = II.getOperand(0);
811   auto *OpMultiplicand = II.getOperand(1);
812   auto *OpMultiplier = II.getOperand(2);
813 
814   IRBuilder<> Builder(II.getContext());
815   Builder.SetInsertPoint(&II);
816 
817   // Return true if a given instruction is a unit splat value, false otherwise.
818   auto IsUnitSplat = [](auto *I) {
819     auto *SplatValue = getSplatValue(I);
820     if (!SplatValue)
821       return false;
822     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
823   };
824 
825   // Return true if a given instruction is an aarch64_sve_dup intrinsic call
826   // with a unit splat value, false otherwise.
827   auto IsUnitDup = [](auto *I) {
828     auto *IntrI = dyn_cast<IntrinsicInst>(I);
829     if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup)
830       return false;
831 
832     auto *SplatValue = IntrI->getOperand(2);
833     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
834   };
835 
836   if (IsUnitSplat(OpMultiplier)) {
837     // [f]mul pg %n, (dupx 1) => %n
838     OpMultiplicand->takeName(&II);
839     return IC.replaceInstUsesWith(II, OpMultiplicand);
840   } else if (IsUnitDup(OpMultiplier)) {
841     // [f]mul pg %n, (dup pg 1) => %n
842     auto *DupInst = cast<IntrinsicInst>(OpMultiplier);
843     auto *DupPg = DupInst->getOperand(1);
844     // TODO: this is naive. The optimization is still valid if DupPg
845     // 'encompasses' OpPredicate, not only if they're the same predicate.
846     if (OpPredicate == DupPg) {
847       OpMultiplicand->takeName(&II);
848       return IC.replaceInstUsesWith(II, OpMultiplicand);
849     }
850   }
851 
852   return instCombineSVEVectorBinOp(IC, II);
853 }
854 
855 static Optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC,
856                                                     IntrinsicInst &II) {
857   IRBuilder<> Builder(II.getContext());
858   Builder.SetInsertPoint(&II);
859   Value *UnpackArg = II.getArgOperand(0);
860   auto *RetTy = cast<ScalableVectorType>(II.getType());
861   bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi ||
862                   II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo;
863 
864   // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X))
865   // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X))
866   if (auto *ScalarArg = getSplatValue(UnpackArg)) {
867     ScalarArg =
868         Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned);
869     Value *NewVal =
870         Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg);
871     NewVal->takeName(&II);
872     return IC.replaceInstUsesWith(II, NewVal);
873   }
874 
875   return None;
876 }
877 static Optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
878                                                  IntrinsicInst &II) {
879   auto *OpVal = II.getOperand(0);
880   auto *OpIndices = II.getOperand(1);
881   VectorType *VTy = cast<VectorType>(II.getType());
882 
883   // Check whether OpIndices is a constant splat value < minimal element count
884   // of result.
885   auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices));
886   if (!SplatValue ||
887       SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
888     return None;
889 
890   // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
891   // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
892   IRBuilder<> Builder(II.getContext());
893   Builder.SetInsertPoint(&II);
894   auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue);
895   auto *VectorSplat =
896       Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
897 
898   VectorSplat->takeName(&II);
899   return IC.replaceInstUsesWith(II, VectorSplat);
900 }
901 
902 static Optional<Instruction *> instCombineSVETupleGet(InstCombiner &IC,
903                                                       IntrinsicInst &II) {
904   // Try to remove sequences of tuple get/set.
905   Value *SetTuple, *SetIndex, *SetValue;
906   auto *GetTuple = II.getArgOperand(0);
907   auto *GetIndex = II.getArgOperand(1);
908   // Check that we have tuple_get(GetTuple, GetIndex) where GetTuple is a
909   // call to tuple_set i.e. tuple_set(SetTuple, SetIndex, SetValue).
910   // Make sure that the types of the current intrinsic and SetValue match
911   // in order to safely remove the sequence.
912   if (!match(GetTuple,
913              m_Intrinsic<Intrinsic::aarch64_sve_tuple_set>(
914                  m_Value(SetTuple), m_Value(SetIndex), m_Value(SetValue))) ||
915       SetValue->getType() != II.getType())
916     return None;
917   // Case where we get the same index right after setting it.
918   // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) --> SetValue
919   if (GetIndex == SetIndex)
920     return IC.replaceInstUsesWith(II, SetValue);
921   // If we are getting a different index than what was set in the tuple_set
922   // intrinsic. We can just set the input tuple to the one up in the chain.
923   // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex)
924   // --> tuple_get(SetTuple, GetIndex)
925   return IC.replaceOperand(II, 0, SetTuple);
926 }
927 
928 static Optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
929                                                  IntrinsicInst &II) {
930   // zip1(uzp1(A, B), uzp2(A, B)) --> A
931   // zip2(uzp1(A, B), uzp2(A, B)) --> B
932   Value *A, *B;
933   if (match(II.getArgOperand(0),
934             m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) &&
935       match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>(
936                                      m_Specific(A), m_Specific(B))))
937     return IC.replaceInstUsesWith(
938         II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B));
939 
940   return None;
941 }
942 
943 static Optional<Instruction *> instCombineLD1GatherIndex(InstCombiner &IC,
944                                                          IntrinsicInst &II) {
945   Value *Mask = II.getOperand(0);
946   Value *BasePtr = II.getOperand(1);
947   Value *Index = II.getOperand(2);
948   Type *Ty = II.getType();
949   Type *BasePtrTy = BasePtr->getType();
950   Value *PassThru = ConstantAggregateZero::get(Ty);
951 
952   // Contiguous gather => masked load.
953   // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1))
954   // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer)
955   Value *IndexBase;
956   if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
957                        m_Value(IndexBase), m_SpecificInt(1)))) {
958     IRBuilder<> Builder(II.getContext());
959     Builder.SetInsertPoint(&II);
960 
961     Align Alignment =
962         BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
963 
964     Type *VecPtrTy = PointerType::getUnqual(Ty);
965     Value *Ptr = Builder.CreateGEP(BasePtrTy->getPointerElementType(), BasePtr,
966                                    IndexBase);
967     Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);
968     CallInst *MaskedLoad =
969         Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru);
970     MaskedLoad->takeName(&II);
971     return IC.replaceInstUsesWith(II, MaskedLoad);
972   }
973 
974   return None;
975 }
976 
977 static Optional<Instruction *> instCombineST1ScatterIndex(InstCombiner &IC,
978                                                           IntrinsicInst &II) {
979   Value *Val = II.getOperand(0);
980   Value *Mask = II.getOperand(1);
981   Value *BasePtr = II.getOperand(2);
982   Value *Index = II.getOperand(3);
983   Type *Ty = Val->getType();
984   Type *BasePtrTy = BasePtr->getType();
985 
986   // Contiguous scatter => masked store.
987   // (sve.ld1.scatter.index Value Mask BasePtr (sve.index IndexBase 1))
988   // => (masked.store Value (gep BasePtr IndexBase) Align Mask)
989   Value *IndexBase;
990   if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
991                        m_Value(IndexBase), m_SpecificInt(1)))) {
992     IRBuilder<> Builder(II.getContext());
993     Builder.SetInsertPoint(&II);
994 
995     Align Alignment =
996         BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
997 
998     Value *Ptr = Builder.CreateGEP(BasePtrTy->getPointerElementType(), BasePtr,
999                                    IndexBase);
1000     Type *VecPtrTy = PointerType::getUnqual(Ty);
1001     Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);
1002 
1003     (void)Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask);
1004 
1005     return IC.eraseInstFromFunction(II);
1006   }
1007 
1008   return None;
1009 }
1010 
1011 Optional<Instruction *>
1012 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
1013                                      IntrinsicInst &II) const {
1014   Intrinsic::ID IID = II.getIntrinsicID();
1015   switch (IID) {
1016   default:
1017     break;
1018   case Intrinsic::aarch64_sve_convert_from_svbool:
1019     return instCombineConvertFromSVBool(IC, II);
1020   case Intrinsic::aarch64_sve_dup:
1021     return instCombineSVEDup(IC, II);
1022   case Intrinsic::aarch64_sve_dup_x:
1023     return instCombineSVEDupX(IC, II);
1024   case Intrinsic::aarch64_sve_cmpne:
1025   case Intrinsic::aarch64_sve_cmpne_wide:
1026     return instCombineSVECmpNE(IC, II);
1027   case Intrinsic::aarch64_sve_rdffr:
1028     return instCombineRDFFR(IC, II);
1029   case Intrinsic::aarch64_sve_lasta:
1030   case Intrinsic::aarch64_sve_lastb:
1031     return instCombineSVELast(IC, II);
1032   case Intrinsic::aarch64_sve_cntd:
1033     return instCombineSVECntElts(IC, II, 2);
1034   case Intrinsic::aarch64_sve_cntw:
1035     return instCombineSVECntElts(IC, II, 4);
1036   case Intrinsic::aarch64_sve_cnth:
1037     return instCombineSVECntElts(IC, II, 8);
1038   case Intrinsic::aarch64_sve_cntb:
1039     return instCombineSVECntElts(IC, II, 16);
1040   case Intrinsic::aarch64_sve_ptest_any:
1041   case Intrinsic::aarch64_sve_ptest_first:
1042   case Intrinsic::aarch64_sve_ptest_last:
1043     return instCombineSVEPTest(IC, II);
1044   case Intrinsic::aarch64_sve_mul:
1045   case Intrinsic::aarch64_sve_fmul:
1046     return instCombineSVEVectorMul(IC, II);
1047   case Intrinsic::aarch64_sve_fadd:
1048     return instCombineSVEVectorFAdd(IC, II);
1049   case Intrinsic::aarch64_sve_fsub:
1050     return instCombineSVEVectorBinOp(IC, II);
1051   case Intrinsic::aarch64_sve_tbl:
1052     return instCombineSVETBL(IC, II);
1053   case Intrinsic::aarch64_sve_uunpkhi:
1054   case Intrinsic::aarch64_sve_uunpklo:
1055   case Intrinsic::aarch64_sve_sunpkhi:
1056   case Intrinsic::aarch64_sve_sunpklo:
1057     return instCombineSVEUnpack(IC, II);
1058   case Intrinsic::aarch64_sve_tuple_get:
1059     return instCombineSVETupleGet(IC, II);
1060   case Intrinsic::aarch64_sve_zip1:
1061   case Intrinsic::aarch64_sve_zip2:
1062     return instCombineSVEZip(IC, II);
1063   case Intrinsic::aarch64_sve_ld1_gather_index:
1064     return instCombineLD1GatherIndex(IC, II);
1065   case Intrinsic::aarch64_sve_st1_scatter_index:
1066     return instCombineST1ScatterIndex(IC, II);
1067   case Intrinsic::aarch64_sve_ld1:
1068     return instCombineSVELD1(IC, II, DL);
1069   case Intrinsic::aarch64_sve_st1:
1070     return instCombineSVEST1(IC, II, DL);
1071   }
1072 
1073   return None;
1074 }
1075 
1076 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
1077                                            ArrayRef<const Value *> Args) {
1078 
1079   // A helper that returns a vector type from the given type. The number of
1080   // elements in type Ty determine the vector width.
1081   auto toVectorTy = [&](Type *ArgTy) {
1082     return VectorType::get(ArgTy->getScalarType(),
1083                            cast<VectorType>(DstTy)->getElementCount());
1084   };
1085 
1086   // Exit early if DstTy is not a vector type whose elements are at least
1087   // 16-bits wide.
1088   if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
1089     return false;
1090 
1091   // Determine if the operation has a widening variant. We consider both the
1092   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
1093   // instructions.
1094   //
1095   // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
1096   //       verify that their extending operands are eliminated during code
1097   //       generation.
1098   switch (Opcode) {
1099   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
1100   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
1101     break;
1102   default:
1103     return false;
1104   }
1105 
1106   // To be a widening instruction (either the "wide" or "long" versions), the
1107   // second operand must be a sign- or zero extend having a single user. We
1108   // only consider extends having a single user because they may otherwise not
1109   // be eliminated.
1110   if (Args.size() != 2 ||
1111       (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
1112       !Args[1]->hasOneUse())
1113     return false;
1114   auto *Extend = cast<CastInst>(Args[1]);
1115 
1116   // Legalize the destination type and ensure it can be used in a widening
1117   // operation.
1118   auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
1119   unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
1120   if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
1121     return false;
1122 
1123   // Legalize the source type and ensure it can be used in a widening
1124   // operation.
1125   auto *SrcTy = toVectorTy(Extend->getSrcTy());
1126   auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
1127   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
1128   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
1129     return false;
1130 
1131   // Get the total number of vector elements in the legalized types.
1132   InstructionCost NumDstEls =
1133       DstTyL.first * DstTyL.second.getVectorMinNumElements();
1134   InstructionCost NumSrcEls =
1135       SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
1136 
1137   // Return true if the legalized types have the same number of vector elements
1138   // and the destination element type size is twice that of the source type.
1139   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
1140 }
1141 
1142 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1143                                                  Type *Src,
1144                                                  TTI::CastContextHint CCH,
1145                                                  TTI::TargetCostKind CostKind,
1146                                                  const Instruction *I) {
1147   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1148   assert(ISD && "Invalid opcode");
1149 
1150   // If the cast is observable, and it is used by a widening instruction (e.g.,
1151   // uaddl, saddw, etc.), it may be free.
1152   if (I && I->hasOneUse()) {
1153     auto *SingleUser = cast<Instruction>(*I->user_begin());
1154     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
1155     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
1156       // If the cast is the second operand, it is free. We will generate either
1157       // a "wide" or "long" version of the widening instruction.
1158       if (I == SingleUser->getOperand(1))
1159         return 0;
1160       // If the cast is not the second operand, it will be free if it looks the
1161       // same as the second operand. In this case, we will generate a "long"
1162       // version of the widening instruction.
1163       if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
1164         if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
1165             cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
1166           return 0;
1167     }
1168   }
1169 
1170   // TODO: Allow non-throughput costs that aren't binary.
1171   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1172     if (CostKind != TTI::TCK_RecipThroughput)
1173       return Cost == 0 ? 0 : 1;
1174     return Cost;
1175   };
1176 
1177   EVT SrcTy = TLI->getValueType(DL, Src);
1178   EVT DstTy = TLI->getValueType(DL, Dst);
1179 
1180   if (!SrcTy.isSimple() || !DstTy.isSimple())
1181     return AdjustCost(
1182         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
1183 
1184   static const TypeConversionCostTblEntry
1185   ConversionTbl[] = {
1186     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
1187     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
1188     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
1189     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
1190 
1191     // Truncations on nxvmiN
1192     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
1193     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
1194     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
1195     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
1196     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
1197     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
1198     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
1199     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
1200     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
1201     { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 },
1202     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
1203     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
1204     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
1205     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
1206     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
1207     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
1208 
1209     // The number of shll instructions for the extension.
1210     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
1211     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
1212     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
1213     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
1214     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
1215     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
1216     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
1217     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
1218     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
1219     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
1220     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
1221     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
1222     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1223     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1224     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
1225     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
1226 
1227     // LowerVectorINT_TO_FP:
1228     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
1229     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1230     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1231     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
1232     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1233     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1234 
1235     // Complex: to v2f32
1236     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
1237     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
1238     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
1239     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
1240     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
1241     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
1242 
1243     // Complex: to v4f32
1244     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
1245     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1246     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
1247     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1248 
1249     // Complex: to v8f32
1250     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
1251     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
1252     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
1253     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
1254 
1255     // Complex: to v16f32
1256     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
1257     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
1258 
1259     // Complex: to v2f64
1260     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
1261     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
1262     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
1263     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
1264     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
1265     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
1266 
1267 
1268     // LowerVectorFP_TO_INT
1269     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
1270     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
1271     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1272     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1273     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1274     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1275 
1276     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
1277     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
1278     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
1279     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
1280     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
1281     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
1282     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
1283 
1284     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
1285     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1286     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
1287     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1288     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
1289 
1290     // Complex, from nxv2f32.
1291     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
1292     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
1293     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
1294     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
1295     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
1296     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
1297     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
1298     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
1299 
1300     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
1301     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
1302     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1303     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
1304     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
1305     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1306     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
1307 
1308     // Complex, from nxv2f64.
1309     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
1310     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
1311     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
1312     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
1313     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
1314     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
1315     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
1316     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
1317 
1318     // Complex, from nxv4f32.
1319     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
1320     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
1321     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
1322     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
1323     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
1324     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
1325     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
1326     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
1327 
1328     // Complex, from nxv8f64. Illegal -> illegal conversions not required.
1329     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
1330     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
1331     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
1332     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
1333 
1334     // Complex, from nxv4f64. Illegal -> illegal conversions not required.
1335     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
1336     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
1337     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
1338     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
1339     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
1340     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
1341 
1342     // Complex, from nxv8f32. Illegal -> illegal conversions not required.
1343     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
1344     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
1345     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
1346     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
1347 
1348     // Complex, from nxv8f16.
1349     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
1350     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
1351     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
1352     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
1353     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
1354     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
1355     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
1356     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
1357 
1358     // Complex, from nxv4f16.
1359     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
1360     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
1361     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
1362     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
1363     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
1364     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
1365     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
1366     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
1367 
1368     // Complex, from nxv2f16.
1369     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
1370     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
1371     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
1372     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
1373     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
1374     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
1375     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
1376     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
1377 
1378     // Truncate from nxvmf32 to nxvmf16.
1379     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
1380     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
1381     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
1382 
1383     // Truncate from nxvmf64 to nxvmf16.
1384     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
1385     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
1386     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
1387 
1388     // Truncate from nxvmf64 to nxvmf32.
1389     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
1390     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
1391     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
1392 
1393     // Extend from nxvmf16 to nxvmf32.
1394     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
1395     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
1396     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
1397 
1398     // Extend from nxvmf16 to nxvmf64.
1399     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
1400     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
1401     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
1402 
1403     // Extend from nxvmf32 to nxvmf64.
1404     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
1405     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
1406     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
1407 
1408   };
1409 
1410   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
1411                                                  DstTy.getSimpleVT(),
1412                                                  SrcTy.getSimpleVT()))
1413     return AdjustCost(Entry->Cost);
1414 
1415   return AdjustCost(
1416       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
1417 }
1418 
1419 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
1420                                                          Type *Dst,
1421                                                          VectorType *VecTy,
1422                                                          unsigned Index) {
1423 
1424   // Make sure we were given a valid extend opcode.
1425   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
1426          "Invalid opcode");
1427 
1428   // We are extending an element we extract from a vector, so the source type
1429   // of the extend is the element type of the vector.
1430   auto *Src = VecTy->getElementType();
1431 
1432   // Sign- and zero-extends are for integer types only.
1433   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
1434 
1435   // Get the cost for the extract. We compute the cost (if any) for the extend
1436   // below.
1437   InstructionCost Cost =
1438       getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
1439 
1440   // Legalize the types.
1441   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
1442   auto DstVT = TLI->getValueType(DL, Dst);
1443   auto SrcVT = TLI->getValueType(DL, Src);
1444   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1445 
1446   // If the resulting type is still a vector and the destination type is legal,
1447   // we may get the extension for free. If not, get the default cost for the
1448   // extend.
1449   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
1450     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1451                                    CostKind);
1452 
1453   // The destination type should be larger than the element type. If not, get
1454   // the default cost for the extend.
1455   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
1456     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1457                                    CostKind);
1458 
1459   switch (Opcode) {
1460   default:
1461     llvm_unreachable("Opcode should be either SExt or ZExt");
1462 
1463   // For sign-extends, we only need a smov, which performs the extension
1464   // automatically.
1465   case Instruction::SExt:
1466     return Cost;
1467 
1468   // For zero-extends, the extend is performed automatically by a umov unless
1469   // the destination type is i64 and the element type is i8 or i16.
1470   case Instruction::ZExt:
1471     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
1472       return Cost;
1473   }
1474 
1475   // If we are unable to perform the extend for free, get the default cost.
1476   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1477                                  CostKind);
1478 }
1479 
1480 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
1481                                                TTI::TargetCostKind CostKind,
1482                                                const Instruction *I) {
1483   if (CostKind != TTI::TCK_RecipThroughput)
1484     return Opcode == Instruction::PHI ? 0 : 1;
1485   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
1486   // Branches are assumed to be predicted.
1487   return 0;
1488 }
1489 
1490 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1491                                                    unsigned Index) {
1492   assert(Val->isVectorTy() && "This must be a vector type");
1493 
1494   if (Index != -1U) {
1495     // Legalize the type.
1496     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1497 
1498     // This type is legalized to a scalar type.
1499     if (!LT.second.isVector())
1500       return 0;
1501 
1502     // The type may be split. Normalize the index to the new type.
1503     unsigned Width = LT.second.getVectorNumElements();
1504     Index = Index % Width;
1505 
1506     // The element at index zero is already inside the vector.
1507     if (Index == 0)
1508       return 0;
1509   }
1510 
1511   // All other insert/extracts cost this much.
1512   return ST->getVectorInsertExtractBaseCost();
1513 }
1514 
1515 InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
1516     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1517     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
1518     TTI::OperandValueProperties Opd1PropInfo,
1519     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1520     const Instruction *CxtI) {
1521   // TODO: Handle more cost kinds.
1522   if (CostKind != TTI::TCK_RecipThroughput)
1523     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1524                                          Opd2Info, Opd1PropInfo,
1525                                          Opd2PropInfo, Args, CxtI);
1526 
1527   // Legalize the type.
1528   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1529 
1530   // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
1531   // add in the widening overhead specified by the sub-target. Since the
1532   // extends feeding widening instructions are performed automatically, they
1533   // aren't present in the generated code and have a zero cost. By adding a
1534   // widening overhead here, we attach the total cost of the combined operation
1535   // to the widening instruction.
1536   InstructionCost Cost = 0;
1537   if (isWideningInstruction(Ty, Opcode, Args))
1538     Cost += ST->getWideningBaseCost();
1539 
1540   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1541 
1542   switch (ISD) {
1543   default:
1544     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1545                                                 Opd2Info,
1546                                                 Opd1PropInfo, Opd2PropInfo);
1547   case ISD::SDIV:
1548     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
1549         Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
1550       // On AArch64, scalar signed division by constants power-of-two are
1551       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
1552       // The OperandValue properties many not be same as that of previous
1553       // operation; conservatively assume OP_None.
1554       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
1555                                      Opd1Info, Opd2Info,
1556                                      TargetTransformInfo::OP_None,
1557                                      TargetTransformInfo::OP_None);
1558       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
1559                                      Opd1Info, Opd2Info,
1560                                      TargetTransformInfo::OP_None,
1561                                      TargetTransformInfo::OP_None);
1562       Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind,
1563                                      Opd1Info, Opd2Info,
1564                                      TargetTransformInfo::OP_None,
1565                                      TargetTransformInfo::OP_None);
1566       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
1567                                      Opd1Info, Opd2Info,
1568                                      TargetTransformInfo::OP_None,
1569                                      TargetTransformInfo::OP_None);
1570       return Cost;
1571     }
1572     LLVM_FALLTHROUGH;
1573   case ISD::UDIV:
1574     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
1575       auto VT = TLI->getValueType(DL, Ty);
1576       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
1577         // Vector signed division by constant are expanded to the
1578         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
1579         // to MULHS + SUB + SRL + ADD + SRL.
1580         InstructionCost MulCost = getArithmeticInstrCost(
1581             Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info,
1582             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1583         InstructionCost AddCost = getArithmeticInstrCost(
1584             Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info,
1585             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1586         InstructionCost ShrCost = getArithmeticInstrCost(
1587             Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info,
1588             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1589         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
1590       }
1591     }
1592 
1593     Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1594                                           Opd2Info,
1595                                           Opd1PropInfo, Opd2PropInfo);
1596     if (Ty->isVectorTy()) {
1597       // On AArch64, vector divisions are not supported natively and are
1598       // expanded into scalar divisions of each pair of elements.
1599       Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind,
1600                                      Opd1Info, Opd2Info, Opd1PropInfo,
1601                                      Opd2PropInfo);
1602       Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
1603                                      Opd1Info, Opd2Info, Opd1PropInfo,
1604                                      Opd2PropInfo);
1605       // TODO: if one of the arguments is scalar, then it's not necessary to
1606       // double the cost of handling the vector elements.
1607       Cost += Cost;
1608     }
1609     return Cost;
1610 
1611   case ISD::MUL:
1612     if (LT.second != MVT::v2i64)
1613       return (Cost + 1) * LT.first;
1614     // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive
1615     // as elements are extracted from the vectors and the muls scalarized.
1616     // As getScalarizationOverhead is a bit too pessimistic, we estimate the
1617     // cost for a i64 vector directly here, which is:
1618     // - four i64 extracts,
1619     // - two i64 inserts, and
1620     // - two muls.
1621     // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with
1622     // LT.first = 2 the cost is 16.
1623     return LT.first * 8;
1624   case ISD::ADD:
1625   case ISD::XOR:
1626   case ISD::OR:
1627   case ISD::AND:
1628     // These nodes are marked as 'custom' for combining purposes only.
1629     // We know that they are legal. See LowerAdd in ISelLowering.
1630     return (Cost + 1) * LT.first;
1631 
1632   case ISD::FADD:
1633   case ISD::FSUB:
1634   case ISD::FMUL:
1635   case ISD::FDIV:
1636   case ISD::FNEG:
1637     // These nodes are marked as 'custom' just to lower them to SVE.
1638     // We know said lowering will incur no additional cost.
1639     if (!Ty->getScalarType()->isFP128Ty())
1640       return (Cost + 2) * LT.first;
1641 
1642     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1643                                                 Opd2Info,
1644                                                 Opd1PropInfo, Opd2PropInfo);
1645   }
1646 }
1647 
1648 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
1649                                                           ScalarEvolution *SE,
1650                                                           const SCEV *Ptr) {
1651   // Address computations in vectorized code with non-consecutive addresses will
1652   // likely result in more instructions compared to scalar code where the
1653   // computation can more often be merged into the index mode. The resulting
1654   // extra micro-ops can significantly decrease throughput.
1655   unsigned NumVectorInstToHideOverhead = 10;
1656   int MaxMergeDistance = 64;
1657 
1658   if (Ty->isVectorTy() && SE &&
1659       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1660     return NumVectorInstToHideOverhead;
1661 
1662   // In many cases the address computation is not merged into the instruction
1663   // addressing mode.
1664   return 1;
1665 }
1666 
1667 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1668                                                    Type *CondTy,
1669                                                    CmpInst::Predicate VecPred,
1670                                                    TTI::TargetCostKind CostKind,
1671                                                    const Instruction *I) {
1672   // TODO: Handle other cost kinds.
1673   if (CostKind != TTI::TCK_RecipThroughput)
1674     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1675                                      I);
1676 
1677   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1678   // We don't lower some vector selects well that are wider than the register
1679   // width.
1680   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
1681     // We would need this many instructions to hide the scalarization happening.
1682     const int AmortizationCost = 20;
1683 
1684     // If VecPred is not set, check if we can get a predicate from the context
1685     // instruction, if its type matches the requested ValTy.
1686     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
1687       CmpInst::Predicate CurrentPred;
1688       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
1689                             m_Value())))
1690         VecPred = CurrentPred;
1691     }
1692     // Check if we have a compare/select chain that can be lowered using CMxx &
1693     // BFI pair.
1694     if (CmpInst::isIntPredicate(VecPred)) {
1695       static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
1696                                           MVT::v8i16, MVT::v2i32, MVT::v4i32,
1697                                           MVT::v2i64};
1698       auto LT = TLI->getTypeLegalizationCost(DL, ValTy);
1699       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
1700         return LT.first;
1701     }
1702 
1703     static const TypeConversionCostTblEntry
1704     VectorSelectTbl[] = {
1705       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
1706       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
1707       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
1708       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
1709       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
1710       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
1711     };
1712 
1713     EVT SelCondTy = TLI->getValueType(DL, CondTy);
1714     EVT SelValTy = TLI->getValueType(DL, ValTy);
1715     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1716       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
1717                                                      SelCondTy.getSimpleVT(),
1718                                                      SelValTy.getSimpleVT()))
1719         return Entry->Cost;
1720     }
1721   }
1722   // The base case handles scalable vectors fine for now, since it treats the
1723   // cost as 1 * legalization cost.
1724   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1725 }
1726 
1727 AArch64TTIImpl::TTI::MemCmpExpansionOptions
1728 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
1729   TTI::MemCmpExpansionOptions Options;
1730   if (ST->requiresStrictAlign()) {
1731     // TODO: Add cost modeling for strict align. Misaligned loads expand to
1732     // a bunch of instructions when strict align is enabled.
1733     return Options;
1734   }
1735   Options.AllowOverlappingLoads = true;
1736   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
1737   Options.NumLoadsPerBlock = Options.MaxNumLoads;
1738   // TODO: Though vector loads usually perform well on AArch64, in some targets
1739   // they may wake up the FP unit, which raises the power consumption.  Perhaps
1740   // they could be used with no holds barred (-O3).
1741   Options.LoadSizes = {8, 4, 2, 1};
1742   return Options;
1743 }
1744 
1745 InstructionCost
1746 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1747                                       Align Alignment, unsigned AddressSpace,
1748                                       TTI::TargetCostKind CostKind) {
1749   if (!isa<ScalableVectorType>(Src))
1750     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1751                                         CostKind);
1752   auto LT = TLI->getTypeLegalizationCost(DL, Src);
1753   if (!LT.first.isValid())
1754     return InstructionCost::getInvalid();
1755 
1756   // The code-generator is currently not able to handle scalable vectors
1757   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1758   // it. This change will be removed when code-generation for these types is
1759   // sufficiently reliable.
1760   if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1))
1761     return InstructionCost::getInvalid();
1762 
1763   return LT.first * 2;
1764 }
1765 
1766 InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
1767     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1768     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1769   if (useNeonVector(DataTy))
1770     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1771                                          Alignment, CostKind, I);
1772   auto *VT = cast<VectorType>(DataTy);
1773   auto LT = TLI->getTypeLegalizationCost(DL, DataTy);
1774   if (!LT.first.isValid())
1775     return InstructionCost::getInvalid();
1776 
1777   // The code-generator is currently not able to handle scalable vectors
1778   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1779   // it. This change will be removed when code-generation for these types is
1780   // sufficiently reliable.
1781   if (cast<VectorType>(DataTy)->getElementCount() ==
1782       ElementCount::getScalable(1))
1783     return InstructionCost::getInvalid();
1784 
1785   ElementCount LegalVF = LT.second.getVectorElementCount();
1786   InstructionCost MemOpCost =
1787       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I);
1788   return LT.first * MemOpCost * getMaxNumElements(LegalVF);
1789 }
1790 
1791 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
1792   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
1793 }
1794 
1795 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
1796                                                 MaybeAlign Alignment,
1797                                                 unsigned AddressSpace,
1798                                                 TTI::TargetCostKind CostKind,
1799                                                 const Instruction *I) {
1800   EVT VT = TLI->getValueType(DL, Ty, true);
1801   // Type legalization can't handle structs
1802   if (VT == MVT::Other)
1803     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
1804                                   CostKind);
1805 
1806   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
1807   if (!LT.first.isValid())
1808     return InstructionCost::getInvalid();
1809 
1810   // The code-generator is currently not able to handle scalable vectors
1811   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1812   // it. This change will be removed when code-generation for these types is
1813   // sufficiently reliable.
1814   if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
1815     if (VTy->getElementCount() == ElementCount::getScalable(1))
1816       return InstructionCost::getInvalid();
1817 
1818   // TODO: consider latency as well for TCK_SizeAndLatency.
1819   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1820     return LT.first;
1821 
1822   if (CostKind != TTI::TCK_RecipThroughput)
1823     return 1;
1824 
1825   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
1826       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
1827     // Unaligned stores are extremely inefficient. We don't split all
1828     // unaligned 128-bit stores because the negative impact that has shown in
1829     // practice on inlined block copy code.
1830     // We make such stores expensive so that we will only vectorize if there
1831     // are 6 other instructions getting vectorized.
1832     const int AmortizationCost = 6;
1833 
1834     return LT.first * 2 * AmortizationCost;
1835   }
1836 
1837   // Check truncating stores and extending loads.
1838   if (useNeonVector(Ty) &&
1839       Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
1840     // v4i8 types are lowered to scalar a load/store and sshll/xtn.
1841     if (VT == MVT::v4i8)
1842       return 2;
1843     // Otherwise we need to scalarize.
1844     return cast<FixedVectorType>(Ty)->getNumElements() * 2;
1845   }
1846 
1847   return LT.first;
1848 }
1849 
1850 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
1851     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1852     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1853     bool UseMaskForCond, bool UseMaskForGaps) {
1854   assert(Factor >= 2 && "Invalid interleave factor");
1855   auto *VecVTy = cast<FixedVectorType>(VecTy);
1856 
1857   if (!UseMaskForCond && !UseMaskForGaps &&
1858       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
1859     unsigned NumElts = VecVTy->getNumElements();
1860     auto *SubVecTy =
1861         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1862 
1863     // ldN/stN only support legal vector types of size 64 or 128 in bits.
1864     // Accesses having vector types that are a multiple of 128 bits can be
1865     // matched to more than one ldN/stN instruction.
1866     bool UseScalable;
1867     if (NumElts % Factor == 0 &&
1868         TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
1869       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
1870   }
1871 
1872   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1873                                            Alignment, AddressSpace, CostKind,
1874                                            UseMaskForCond, UseMaskForGaps);
1875 }
1876 
1877 InstructionCost
1878 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
1879   InstructionCost Cost = 0;
1880   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1881   for (auto *I : Tys) {
1882     if (!I->isVectorTy())
1883       continue;
1884     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
1885         128)
1886       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
1887               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
1888   }
1889   return Cost;
1890 }
1891 
1892 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1893   return ST->getMaxInterleaveFactor();
1894 }
1895 
1896 // For Falkor, we want to avoid having too many strided loads in a loop since
1897 // that can exhaust the HW prefetcher resources.  We adjust the unroller
1898 // MaxCount preference below to attempt to ensure unrolling doesn't create too
1899 // many strided loads.
1900 static void
1901 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1902                               TargetTransformInfo::UnrollingPreferences &UP) {
1903   enum { MaxStridedLoads = 7 };
1904   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
1905     int StridedLoads = 0;
1906     // FIXME? We could make this more precise by looking at the CFG and
1907     // e.g. not counting loads in each side of an if-then-else diamond.
1908     for (const auto BB : L->blocks()) {
1909       for (auto &I : *BB) {
1910         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
1911         if (!LMemI)
1912           continue;
1913 
1914         Value *PtrValue = LMemI->getPointerOperand();
1915         if (L->isLoopInvariant(PtrValue))
1916           continue;
1917 
1918         const SCEV *LSCEV = SE.getSCEV(PtrValue);
1919         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
1920         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
1921           continue;
1922 
1923         // FIXME? We could take pairing of unrolled load copies into account
1924         // by looking at the AddRec, but we would probably have to limit this
1925         // to loops with no stores or other memory optimization barriers.
1926         ++StridedLoads;
1927         // We've seen enough strided loads that seeing more won't make a
1928         // difference.
1929         if (StridedLoads > MaxStridedLoads / 2)
1930           return StridedLoads;
1931       }
1932     }
1933     return StridedLoads;
1934   };
1935 
1936   int StridedLoads = countStridedLoads(L, SE);
1937   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
1938                     << " strided loads\n");
1939   // Pick the largest power of 2 unroll count that won't result in too many
1940   // strided loads.
1941   if (StridedLoads) {
1942     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
1943     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
1944                       << UP.MaxCount << '\n');
1945   }
1946 }
1947 
1948 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1949                                              TTI::UnrollingPreferences &UP,
1950                                              OptimizationRemarkEmitter *ORE) {
1951   // Enable partial unrolling and runtime unrolling.
1952   BaseT::getUnrollingPreferences(L, SE, UP, ORE);
1953 
1954   UP.UpperBound = true;
1955 
1956   // For inner loop, it is more likely to be a hot one, and the runtime check
1957   // can be promoted out from LICM pass, so the overhead is less, let's try
1958   // a larger threshold to unroll more loops.
1959   if (L->getLoopDepth() > 1)
1960     UP.PartialThreshold *= 2;
1961 
1962   // Disable partial & runtime unrolling on -Os.
1963   UP.PartialOptSizeThreshold = 0;
1964 
1965   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
1966       EnableFalkorHWPFUnrollFix)
1967     getFalkorUnrollingPreferences(L, SE, UP);
1968 
1969   // Scan the loop: don't unroll loops with calls as this could prevent
1970   // inlining. Don't unroll vector loops either, as they don't benefit much from
1971   // unrolling.
1972   for (auto *BB : L->getBlocks()) {
1973     for (auto &I : *BB) {
1974       // Don't unroll vectorised loop.
1975       if (I.getType()->isVectorTy())
1976         return;
1977 
1978       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1979         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1980           if (!isLoweredToCall(F))
1981             continue;
1982         }
1983         return;
1984       }
1985     }
1986   }
1987 
1988   // Enable runtime unrolling for in-order models
1989   // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
1990   // checking for that case, we can ensure that the default behaviour is
1991   // unchanged
1992   if (ST->getProcFamily() != AArch64Subtarget::Others &&
1993       !ST->getSchedModel().isOutOfOrder()) {
1994     UP.Runtime = true;
1995     UP.Partial = true;
1996     UP.UnrollRemainder = true;
1997     UP.DefaultUnrollRuntimeCount = 4;
1998 
1999     UP.UnrollAndJam = true;
2000     UP.UnrollAndJamInnerLoopThreshold = 60;
2001   }
2002 }
2003 
2004 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2005                                            TTI::PeelingPreferences &PP) {
2006   BaseT::getPeelingPreferences(L, SE, PP);
2007 }
2008 
2009 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2010                                                          Type *ExpectedType) {
2011   switch (Inst->getIntrinsicID()) {
2012   default:
2013     return nullptr;
2014   case Intrinsic::aarch64_neon_st2:
2015   case Intrinsic::aarch64_neon_st3:
2016   case Intrinsic::aarch64_neon_st4: {
2017     // Create a struct type
2018     StructType *ST = dyn_cast<StructType>(ExpectedType);
2019     if (!ST)
2020       return nullptr;
2021     unsigned NumElts = Inst->arg_size() - 1;
2022     if (ST->getNumElements() != NumElts)
2023       return nullptr;
2024     for (unsigned i = 0, e = NumElts; i != e; ++i) {
2025       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
2026         return nullptr;
2027     }
2028     Value *Res = UndefValue::get(ExpectedType);
2029     IRBuilder<> Builder(Inst);
2030     for (unsigned i = 0, e = NumElts; i != e; ++i) {
2031       Value *L = Inst->getArgOperand(i);
2032       Res = Builder.CreateInsertValue(Res, L, i);
2033     }
2034     return Res;
2035   }
2036   case Intrinsic::aarch64_neon_ld2:
2037   case Intrinsic::aarch64_neon_ld3:
2038   case Intrinsic::aarch64_neon_ld4:
2039     if (Inst->getType() == ExpectedType)
2040       return Inst;
2041     return nullptr;
2042   }
2043 }
2044 
2045 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
2046                                         MemIntrinsicInfo &Info) {
2047   switch (Inst->getIntrinsicID()) {
2048   default:
2049     break;
2050   case Intrinsic::aarch64_neon_ld2:
2051   case Intrinsic::aarch64_neon_ld3:
2052   case Intrinsic::aarch64_neon_ld4:
2053     Info.ReadMem = true;
2054     Info.WriteMem = false;
2055     Info.PtrVal = Inst->getArgOperand(0);
2056     break;
2057   case Intrinsic::aarch64_neon_st2:
2058   case Intrinsic::aarch64_neon_st3:
2059   case Intrinsic::aarch64_neon_st4:
2060     Info.ReadMem = false;
2061     Info.WriteMem = true;
2062     Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1);
2063     break;
2064   }
2065 
2066   switch (Inst->getIntrinsicID()) {
2067   default:
2068     return false;
2069   case Intrinsic::aarch64_neon_ld2:
2070   case Intrinsic::aarch64_neon_st2:
2071     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
2072     break;
2073   case Intrinsic::aarch64_neon_ld3:
2074   case Intrinsic::aarch64_neon_st3:
2075     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
2076     break;
2077   case Intrinsic::aarch64_neon_ld4:
2078   case Intrinsic::aarch64_neon_st4:
2079     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
2080     break;
2081   }
2082   return true;
2083 }
2084 
2085 /// See if \p I should be considered for address type promotion. We check if \p
2086 /// I is a sext with right type and used in memory accesses. If it used in a
2087 /// "complex" getelementptr, we allow it to be promoted without finding other
2088 /// sext instructions that sign extended the same initial value. A getelementptr
2089 /// is considered as "complex" if it has more than 2 operands.
2090 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
2091     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
2092   bool Considerable = false;
2093   AllowPromotionWithoutCommonHeader = false;
2094   if (!isa<SExtInst>(&I))
2095     return false;
2096   Type *ConsideredSExtType =
2097       Type::getInt64Ty(I.getParent()->getParent()->getContext());
2098   if (I.getType() != ConsideredSExtType)
2099     return false;
2100   // See if the sext is the one with the right type and used in at least one
2101   // GetElementPtrInst.
2102   for (const User *U : I.users()) {
2103     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
2104       Considerable = true;
2105       // A getelementptr is considered as "complex" if it has more than 2
2106       // operands. We will promote a SExt used in such complex GEP as we
2107       // expect some computation to be merged if they are done on 64 bits.
2108       if (GEPInst->getNumOperands() > 2) {
2109         AllowPromotionWithoutCommonHeader = true;
2110         break;
2111       }
2112     }
2113   }
2114   return Considerable;
2115 }
2116 
2117 bool AArch64TTIImpl::isLegalToVectorizeReduction(
2118     const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
2119   if (!VF.isScalable())
2120     return true;
2121 
2122   Type *Ty = RdxDesc.getRecurrenceType();
2123   if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty))
2124     return false;
2125 
2126   switch (RdxDesc.getRecurrenceKind()) {
2127   case RecurKind::Add:
2128   case RecurKind::FAdd:
2129   case RecurKind::And:
2130   case RecurKind::Or:
2131   case RecurKind::Xor:
2132   case RecurKind::SMin:
2133   case RecurKind::SMax:
2134   case RecurKind::UMin:
2135   case RecurKind::UMax:
2136   case RecurKind::FMin:
2137   case RecurKind::FMax:
2138   case RecurKind::SelectICmp:
2139   case RecurKind::SelectFCmp:
2140   case RecurKind::FMulAdd:
2141     return true;
2142   default:
2143     return false;
2144   }
2145 }
2146 
2147 InstructionCost
2148 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
2149                                        bool IsUnsigned,
2150                                        TTI::TargetCostKind CostKind) {
2151   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
2152 
2153   if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
2154     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
2155 
2156   assert((isa<ScalableVectorType>(Ty) == isa<ScalableVectorType>(CondTy)) &&
2157          "Both vector needs to be equally scalable");
2158 
2159   InstructionCost LegalizationCost = 0;
2160   if (LT.first > 1) {
2161     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
2162     unsigned MinMaxOpcode =
2163         Ty->isFPOrFPVectorTy()
2164             ? Intrinsic::maxnum
2165             : (IsUnsigned ? Intrinsic::umin : Intrinsic::smin);
2166     IntrinsicCostAttributes Attrs(MinMaxOpcode, LegalVTy, {LegalVTy, LegalVTy});
2167     LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1);
2168   }
2169 
2170   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
2171 }
2172 
2173 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
2174     unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
2175   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2176   InstructionCost LegalizationCost = 0;
2177   if (LT.first > 1) {
2178     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
2179     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
2180     LegalizationCost *= LT.first - 1;
2181   }
2182 
2183   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2184   assert(ISD && "Invalid opcode");
2185   // Add the final reduction cost for the legal horizontal reduction
2186   switch (ISD) {
2187   case ISD::ADD:
2188   case ISD::AND:
2189   case ISD::OR:
2190   case ISD::XOR:
2191   case ISD::FADD:
2192     return LegalizationCost + 2;
2193   default:
2194     return InstructionCost::getInvalid();
2195   }
2196 }
2197 
2198 InstructionCost
2199 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
2200                                            Optional<FastMathFlags> FMF,
2201                                            TTI::TargetCostKind CostKind) {
2202   if (TTI::requiresOrderedReduction(FMF)) {
2203     if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) {
2204       InstructionCost BaseCost =
2205           BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
2206       // Add on extra cost to reflect the extra overhead on some CPUs. We still
2207       // end up vectorizing for more computationally intensive loops.
2208       return BaseCost + FixedVTy->getNumElements();
2209     }
2210 
2211     if (Opcode != Instruction::FAdd)
2212       return InstructionCost::getInvalid();
2213 
2214     auto *VTy = cast<ScalableVectorType>(ValTy);
2215     InstructionCost Cost =
2216         getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
2217     Cost *= getMaxNumElements(VTy->getElementCount());
2218     return Cost;
2219   }
2220 
2221   if (isa<ScalableVectorType>(ValTy))
2222     return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
2223 
2224   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2225   MVT MTy = LT.second;
2226   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2227   assert(ISD && "Invalid opcode");
2228 
2229   // Horizontal adds can use the 'addv' instruction. We model the cost of these
2230   // instructions as twice a normal vector add, plus 1 for each legalization
2231   // step (LT.first). This is the only arithmetic vector reduction operation for
2232   // which we have an instruction.
2233   // OR, XOR and AND costs should match the codegen from:
2234   // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
2235   // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
2236   // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
2237   static const CostTblEntry CostTblNoPairwise[]{
2238       {ISD::ADD, MVT::v8i8,   2},
2239       {ISD::ADD, MVT::v16i8,  2},
2240       {ISD::ADD, MVT::v4i16,  2},
2241       {ISD::ADD, MVT::v8i16,  2},
2242       {ISD::ADD, MVT::v4i32,  2},
2243       {ISD::OR,  MVT::v8i8,  15},
2244       {ISD::OR,  MVT::v16i8, 17},
2245       {ISD::OR,  MVT::v4i16,  7},
2246       {ISD::OR,  MVT::v8i16,  9},
2247       {ISD::OR,  MVT::v2i32,  3},
2248       {ISD::OR,  MVT::v4i32,  5},
2249       {ISD::OR,  MVT::v2i64,  3},
2250       {ISD::XOR, MVT::v8i8,  15},
2251       {ISD::XOR, MVT::v16i8, 17},
2252       {ISD::XOR, MVT::v4i16,  7},
2253       {ISD::XOR, MVT::v8i16,  9},
2254       {ISD::XOR, MVT::v2i32,  3},
2255       {ISD::XOR, MVT::v4i32,  5},
2256       {ISD::XOR, MVT::v2i64,  3},
2257       {ISD::AND, MVT::v8i8,  15},
2258       {ISD::AND, MVT::v16i8, 17},
2259       {ISD::AND, MVT::v4i16,  7},
2260       {ISD::AND, MVT::v8i16,  9},
2261       {ISD::AND, MVT::v2i32,  3},
2262       {ISD::AND, MVT::v4i32,  5},
2263       {ISD::AND, MVT::v2i64,  3},
2264   };
2265   switch (ISD) {
2266   default:
2267     break;
2268   case ISD::ADD:
2269     if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
2270       return (LT.first - 1) + Entry->Cost;
2271     break;
2272   case ISD::XOR:
2273   case ISD::AND:
2274   case ISD::OR:
2275     const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
2276     if (!Entry)
2277       break;
2278     auto *ValVTy = cast<FixedVectorType>(ValTy);
2279     if (!ValVTy->getElementType()->isIntegerTy(1) &&
2280         MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
2281         isPowerOf2_32(ValVTy->getNumElements())) {
2282       InstructionCost ExtraCost = 0;
2283       if (LT.first != 1) {
2284         // Type needs to be split, so there is an extra cost of LT.first - 1
2285         // arithmetic ops.
2286         auto *Ty = FixedVectorType::get(ValTy->getElementType(),
2287                                         MTy.getVectorNumElements());
2288         ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
2289         ExtraCost *= LT.first - 1;
2290       }
2291       return Entry->Cost + ExtraCost;
2292     }
2293     break;
2294   }
2295   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
2296 }
2297 
2298 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) {
2299   static const CostTblEntry ShuffleTbl[] = {
2300       { TTI::SK_Splice, MVT::nxv16i8,  1 },
2301       { TTI::SK_Splice, MVT::nxv8i16,  1 },
2302       { TTI::SK_Splice, MVT::nxv4i32,  1 },
2303       { TTI::SK_Splice, MVT::nxv2i64,  1 },
2304       { TTI::SK_Splice, MVT::nxv2f16,  1 },
2305       { TTI::SK_Splice, MVT::nxv4f16,  1 },
2306       { TTI::SK_Splice, MVT::nxv8f16,  1 },
2307       { TTI::SK_Splice, MVT::nxv2bf16, 1 },
2308       { TTI::SK_Splice, MVT::nxv4bf16, 1 },
2309       { TTI::SK_Splice, MVT::nxv8bf16, 1 },
2310       { TTI::SK_Splice, MVT::nxv2f32,  1 },
2311       { TTI::SK_Splice, MVT::nxv4f32,  1 },
2312       { TTI::SK_Splice, MVT::nxv2f64,  1 },
2313   };
2314 
2315   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
2316   Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
2317   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2318   EVT PromotedVT = LT.second.getScalarType() == MVT::i1
2319                        ? TLI->getPromotedVTForPredicate(EVT(LT.second))
2320                        : LT.second;
2321   Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
2322   InstructionCost LegalizationCost = 0;
2323   if (Index < 0) {
2324     LegalizationCost =
2325         getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
2326                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
2327         getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
2328                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
2329   }
2330 
2331   // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
2332   // Cost performed on a promoted type.
2333   if (LT.second.getScalarType() == MVT::i1) {
2334     LegalizationCost +=
2335         getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
2336                          TTI::CastContextHint::None, CostKind) +
2337         getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
2338                          TTI::CastContextHint::None, CostKind);
2339   }
2340   const auto *Entry =
2341       CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
2342   assert(Entry && "Illegal Type for Splice");
2343   LegalizationCost += Entry->Cost;
2344   return LegalizationCost * LT.first;
2345 }
2346 
2347 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
2348                                                VectorType *Tp,
2349                                                ArrayRef<int> Mask, int Index,
2350                                                VectorType *SubTp) {
2351   Kind = improveShuffleKindFromMask(Kind, Mask);
2352   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
2353       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
2354       Kind == TTI::SK_Reverse) {
2355     static const CostTblEntry ShuffleTbl[] = {
2356       // Broadcast shuffle kinds can be performed with 'dup'.
2357       { TTI::SK_Broadcast, MVT::v8i8,  1 },
2358       { TTI::SK_Broadcast, MVT::v16i8, 1 },
2359       { TTI::SK_Broadcast, MVT::v4i16, 1 },
2360       { TTI::SK_Broadcast, MVT::v8i16, 1 },
2361       { TTI::SK_Broadcast, MVT::v2i32, 1 },
2362       { TTI::SK_Broadcast, MVT::v4i32, 1 },
2363       { TTI::SK_Broadcast, MVT::v2i64, 1 },
2364       { TTI::SK_Broadcast, MVT::v2f32, 1 },
2365       { TTI::SK_Broadcast, MVT::v4f32, 1 },
2366       { TTI::SK_Broadcast, MVT::v2f64, 1 },
2367       // Transpose shuffle kinds can be performed with 'trn1/trn2' and
2368       // 'zip1/zip2' instructions.
2369       { TTI::SK_Transpose, MVT::v8i8,  1 },
2370       { TTI::SK_Transpose, MVT::v16i8, 1 },
2371       { TTI::SK_Transpose, MVT::v4i16, 1 },
2372       { TTI::SK_Transpose, MVT::v8i16, 1 },
2373       { TTI::SK_Transpose, MVT::v2i32, 1 },
2374       { TTI::SK_Transpose, MVT::v4i32, 1 },
2375       { TTI::SK_Transpose, MVT::v2i64, 1 },
2376       { TTI::SK_Transpose, MVT::v2f32, 1 },
2377       { TTI::SK_Transpose, MVT::v4f32, 1 },
2378       { TTI::SK_Transpose, MVT::v2f64, 1 },
2379       // Select shuffle kinds.
2380       // TODO: handle vXi8/vXi16.
2381       { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
2382       { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
2383       { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
2384       { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
2385       { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
2386       { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
2387       // PermuteSingleSrc shuffle kinds.
2388       { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
2389       { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
2390       { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
2391       { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
2392       { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
2393       { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
2394       { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case.
2395       { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case.
2396       { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case.
2397       { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl
2398       { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl
2399       { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl
2400       { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl
2401       { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl
2402       // Reverse can be lowered with `rev`.
2403       { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov.
2404       { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT
2405       { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov.
2406       { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov.
2407       { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT
2408       { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov.
2409       // Broadcast shuffle kinds for scalable vectors
2410       { TTI::SK_Broadcast, MVT::nxv16i8,  1 },
2411       { TTI::SK_Broadcast, MVT::nxv8i16,  1 },
2412       { TTI::SK_Broadcast, MVT::nxv4i32,  1 },
2413       { TTI::SK_Broadcast, MVT::nxv2i64,  1 },
2414       { TTI::SK_Broadcast, MVT::nxv2f16,  1 },
2415       { TTI::SK_Broadcast, MVT::nxv4f16,  1 },
2416       { TTI::SK_Broadcast, MVT::nxv8f16,  1 },
2417       { TTI::SK_Broadcast, MVT::nxv2bf16, 1 },
2418       { TTI::SK_Broadcast, MVT::nxv4bf16, 1 },
2419       { TTI::SK_Broadcast, MVT::nxv8bf16, 1 },
2420       { TTI::SK_Broadcast, MVT::nxv2f32,  1 },
2421       { TTI::SK_Broadcast, MVT::nxv4f32,  1 },
2422       { TTI::SK_Broadcast, MVT::nxv2f64,  1 },
2423       { TTI::SK_Broadcast, MVT::nxv16i1,  1 },
2424       { TTI::SK_Broadcast, MVT::nxv8i1,   1 },
2425       { TTI::SK_Broadcast, MVT::nxv4i1,   1 },
2426       { TTI::SK_Broadcast, MVT::nxv2i1,   1 },
2427       // Handle the cases for vector.reverse with scalable vectors
2428       { TTI::SK_Reverse, MVT::nxv16i8,  1 },
2429       { TTI::SK_Reverse, MVT::nxv8i16,  1 },
2430       { TTI::SK_Reverse, MVT::nxv4i32,  1 },
2431       { TTI::SK_Reverse, MVT::nxv2i64,  1 },
2432       { TTI::SK_Reverse, MVT::nxv2f16,  1 },
2433       { TTI::SK_Reverse, MVT::nxv4f16,  1 },
2434       { TTI::SK_Reverse, MVT::nxv8f16,  1 },
2435       { TTI::SK_Reverse, MVT::nxv2bf16, 1 },
2436       { TTI::SK_Reverse, MVT::nxv4bf16, 1 },
2437       { TTI::SK_Reverse, MVT::nxv8bf16, 1 },
2438       { TTI::SK_Reverse, MVT::nxv2f32,  1 },
2439       { TTI::SK_Reverse, MVT::nxv4f32,  1 },
2440       { TTI::SK_Reverse, MVT::nxv2f64,  1 },
2441       { TTI::SK_Reverse, MVT::nxv16i1,  1 },
2442       { TTI::SK_Reverse, MVT::nxv8i1,   1 },
2443       { TTI::SK_Reverse, MVT::nxv4i1,   1 },
2444       { TTI::SK_Reverse, MVT::nxv2i1,   1 },
2445     };
2446     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
2447     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
2448       return LT.first * Entry->Cost;
2449   }
2450   if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
2451     return getSpliceCost(Tp, Index);
2452   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
2453 }
2454