1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/IVDescriptors.h"
13 #include "llvm/Analysis/LoopInfo.h"
14 #include "llvm/Analysis/TargetTransformInfo.h"
15 #include "llvm/CodeGen/BasicTTIImpl.h"
16 #include "llvm/CodeGen/CostTable.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/IntrinsicsAArch64.h"
21 #include "llvm/IR/PatternMatch.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Transforms/InstCombine/InstCombiner.h"
24 #include <algorithm>
25 using namespace llvm;
26 using namespace llvm::PatternMatch;
27 
28 #define DEBUG_TYPE "aarch64tti"
29 
30 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
31                                                cl::init(true), cl::Hidden);
32 
33 static cl::opt<unsigned> SVEGatherOverhead("sve-gather-overhead", cl::init(10),
34                                            cl::Hidden);
35 
36 static cl::opt<unsigned> SVEScatterOverhead("sve-scatter-overhead",
37                                             cl::init(10), cl::Hidden);
38 
39 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
40                                          const Function *Callee) const {
41   const TargetMachine &TM = getTLI()->getTargetMachine();
42 
43   const FeatureBitset &CallerBits =
44       TM.getSubtargetImpl(*Caller)->getFeatureBits();
45   const FeatureBitset &CalleeBits =
46       TM.getSubtargetImpl(*Callee)->getFeatureBits();
47 
48   // Inline a callee if its target-features are a subset of the callers
49   // target-features.
50   return (CallerBits & CalleeBits) == CalleeBits;
51 }
52 
53 /// Calculate the cost of materializing a 64-bit value. This helper
54 /// method might only calculate a fraction of a larger immediate. Therefore it
55 /// is valid to return a cost of ZERO.
56 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
57   // Check if the immediate can be encoded within an instruction.
58   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
59     return 0;
60 
61   if (Val < 0)
62     Val = ~Val;
63 
64   // Calculate how many moves we will need to materialize this constant.
65   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
66   AArch64_IMM::expandMOVImm(Val, 64, Insn);
67   return Insn.size();
68 }
69 
70 /// Calculate the cost of materializing the given constant.
71 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
72                                               TTI::TargetCostKind CostKind) {
73   assert(Ty->isIntegerTy());
74 
75   unsigned BitSize = Ty->getPrimitiveSizeInBits();
76   if (BitSize == 0)
77     return ~0U;
78 
79   // Sign-extend all constants to a multiple of 64-bit.
80   APInt ImmVal = Imm;
81   if (BitSize & 0x3f)
82     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
83 
84   // Split the constant into 64-bit chunks and calculate the cost for each
85   // chunk.
86   InstructionCost Cost = 0;
87   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
88     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
89     int64_t Val = Tmp.getSExtValue();
90     Cost += getIntImmCost(Val);
91   }
92   // We need at least one instruction to materialze the constant.
93   return std::max<InstructionCost>(1, Cost);
94 }
95 
96 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
97                                                   const APInt &Imm, Type *Ty,
98                                                   TTI::TargetCostKind CostKind,
99                                                   Instruction *Inst) {
100   assert(Ty->isIntegerTy());
101 
102   unsigned BitSize = Ty->getPrimitiveSizeInBits();
103   // There is no cost model for constants with a bit size of 0. Return TCC_Free
104   // here, so that constant hoisting will ignore this constant.
105   if (BitSize == 0)
106     return TTI::TCC_Free;
107 
108   unsigned ImmIdx = ~0U;
109   switch (Opcode) {
110   default:
111     return TTI::TCC_Free;
112   case Instruction::GetElementPtr:
113     // Always hoist the base address of a GetElementPtr.
114     if (Idx == 0)
115       return 2 * TTI::TCC_Basic;
116     return TTI::TCC_Free;
117   case Instruction::Store:
118     ImmIdx = 0;
119     break;
120   case Instruction::Add:
121   case Instruction::Sub:
122   case Instruction::Mul:
123   case Instruction::UDiv:
124   case Instruction::SDiv:
125   case Instruction::URem:
126   case Instruction::SRem:
127   case Instruction::And:
128   case Instruction::Or:
129   case Instruction::Xor:
130   case Instruction::ICmp:
131     ImmIdx = 1;
132     break;
133   // Always return TCC_Free for the shift value of a shift instruction.
134   case Instruction::Shl:
135   case Instruction::LShr:
136   case Instruction::AShr:
137     if (Idx == 1)
138       return TTI::TCC_Free;
139     break;
140   case Instruction::Trunc:
141   case Instruction::ZExt:
142   case Instruction::SExt:
143   case Instruction::IntToPtr:
144   case Instruction::PtrToInt:
145   case Instruction::BitCast:
146   case Instruction::PHI:
147   case Instruction::Call:
148   case Instruction::Select:
149   case Instruction::Ret:
150   case Instruction::Load:
151     break;
152   }
153 
154   if (Idx == ImmIdx) {
155     int NumConstants = (BitSize + 63) / 64;
156     InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
157     return (Cost <= NumConstants * TTI::TCC_Basic)
158                ? static_cast<int>(TTI::TCC_Free)
159                : Cost;
160   }
161   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
162 }
163 
164 InstructionCost
165 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
166                                     const APInt &Imm, Type *Ty,
167                                     TTI::TargetCostKind CostKind) {
168   assert(Ty->isIntegerTy());
169 
170   unsigned BitSize = Ty->getPrimitiveSizeInBits();
171   // There is no cost model for constants with a bit size of 0. Return TCC_Free
172   // here, so that constant hoisting will ignore this constant.
173   if (BitSize == 0)
174     return TTI::TCC_Free;
175 
176   // Most (all?) AArch64 intrinsics do not support folding immediates into the
177   // selected instruction, so we compute the materialization cost for the
178   // immediate directly.
179   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
180     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
181 
182   switch (IID) {
183   default:
184     return TTI::TCC_Free;
185   case Intrinsic::sadd_with_overflow:
186   case Intrinsic::uadd_with_overflow:
187   case Intrinsic::ssub_with_overflow:
188   case Intrinsic::usub_with_overflow:
189   case Intrinsic::smul_with_overflow:
190   case Intrinsic::umul_with_overflow:
191     if (Idx == 1) {
192       int NumConstants = (BitSize + 63) / 64;
193       InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
194       return (Cost <= NumConstants * TTI::TCC_Basic)
195                  ? static_cast<int>(TTI::TCC_Free)
196                  : Cost;
197     }
198     break;
199   case Intrinsic::experimental_stackmap:
200     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
201       return TTI::TCC_Free;
202     break;
203   case Intrinsic::experimental_patchpoint_void:
204   case Intrinsic::experimental_patchpoint_i64:
205     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
206       return TTI::TCC_Free;
207     break;
208   case Intrinsic::experimental_gc_statepoint:
209     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
210       return TTI::TCC_Free;
211     break;
212   }
213   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
214 }
215 
216 TargetTransformInfo::PopcntSupportKind
217 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
218   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
219   if (TyWidth == 32 || TyWidth == 64)
220     return TTI::PSK_FastHardware;
221   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
222   return TTI::PSK_Software;
223 }
224 
225 InstructionCost
226 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
227                                       TTI::TargetCostKind CostKind) {
228   auto *RetTy = ICA.getReturnType();
229   switch (ICA.getID()) {
230   case Intrinsic::umin:
231   case Intrinsic::umax:
232   case Intrinsic::smin:
233   case Intrinsic::smax: {
234     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
235                                         MVT::v8i16, MVT::v2i32, MVT::v4i32};
236     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
237     // v2i64 types get converted to cmp+bif hence the cost of 2
238     if (LT.second == MVT::v2i64)
239       return LT.first * 2;
240     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
241       return LT.first;
242     break;
243   }
244   case Intrinsic::sadd_sat:
245   case Intrinsic::ssub_sat:
246   case Intrinsic::uadd_sat:
247   case Intrinsic::usub_sat: {
248     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
249                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
250                                      MVT::v2i64};
251     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
252     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
253     // need to extend the type, as it uses shr(qadd(shl, shl)).
254     unsigned Instrs =
255         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
256     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
257       return LT.first * Instrs;
258     break;
259   }
260   case Intrinsic::abs: {
261     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
262                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
263                                      MVT::v2i64};
264     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
265     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
266       return LT.first;
267     break;
268   }
269   case Intrinsic::experimental_stepvector: {
270     InstructionCost Cost = 1; // Cost of the `index' instruction
271     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
272     // Legalisation of illegal vectors involves an `index' instruction plus
273     // (LT.first - 1) vector adds.
274     if (LT.first > 1) {
275       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
276       InstructionCost AddCost =
277           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
278       Cost += AddCost * (LT.first - 1);
279     }
280     return Cost;
281   }
282   case Intrinsic::bitreverse: {
283     static const CostTblEntry BitreverseTbl[] = {
284         {Intrinsic::bitreverse, MVT::i32, 1},
285         {Intrinsic::bitreverse, MVT::i64, 1},
286         {Intrinsic::bitreverse, MVT::v8i8, 1},
287         {Intrinsic::bitreverse, MVT::v16i8, 1},
288         {Intrinsic::bitreverse, MVT::v4i16, 2},
289         {Intrinsic::bitreverse, MVT::v8i16, 2},
290         {Intrinsic::bitreverse, MVT::v2i32, 2},
291         {Intrinsic::bitreverse, MVT::v4i32, 2},
292         {Intrinsic::bitreverse, MVT::v1i64, 2},
293         {Intrinsic::bitreverse, MVT::v2i64, 2},
294     };
295     const auto LegalisationCost = TLI->getTypeLegalizationCost(DL, RetTy);
296     const auto *Entry =
297         CostTableLookup(BitreverseTbl, ICA.getID(), LegalisationCost.second);
298     if (Entry) {
299       // Cost Model is using the legal type(i32) that i8 and i16 will be
300       // converted to +1 so that we match the actual lowering cost
301       if (TLI->getValueType(DL, RetTy, true) == MVT::i8 ||
302           TLI->getValueType(DL, RetTy, true) == MVT::i16)
303         return LegalisationCost.first * Entry->Cost + 1;
304 
305       return LegalisationCost.first * Entry->Cost;
306     }
307     break;
308   }
309   case Intrinsic::ctpop: {
310     static const CostTblEntry CtpopCostTbl[] = {
311         {ISD::CTPOP, MVT::v2i64, 4},
312         {ISD::CTPOP, MVT::v4i32, 3},
313         {ISD::CTPOP, MVT::v8i16, 2},
314         {ISD::CTPOP, MVT::v16i8, 1},
315         {ISD::CTPOP, MVT::i64,   4},
316         {ISD::CTPOP, MVT::v2i32, 3},
317         {ISD::CTPOP, MVT::v4i16, 2},
318         {ISD::CTPOP, MVT::v8i8,  1},
319         {ISD::CTPOP, MVT::i32,   5},
320     };
321     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
322     MVT MTy = LT.second;
323     if (const auto *Entry = CostTableLookup(CtpopCostTbl, ISD::CTPOP, MTy)) {
324       // Extra cost of +1 when illegal vector types are legalized by promoting
325       // the integer type.
326       int ExtraCost = MTy.isVector() && MTy.getScalarSizeInBits() !=
327                                             RetTy->getScalarSizeInBits()
328                           ? 1
329                           : 0;
330       return LT.first * Entry->Cost + ExtraCost;
331     }
332     break;
333   }
334   case Intrinsic::sadd_with_overflow:
335   case Intrinsic::uadd_with_overflow:
336   case Intrinsic::ssub_with_overflow:
337   case Intrinsic::usub_with_overflow:
338   case Intrinsic::smul_with_overflow:
339   case Intrinsic::umul_with_overflow: {
340     static const CostTblEntry WithOverflowCostTbl[] = {
341         {Intrinsic::sadd_with_overflow, MVT::i8, 3},
342         {Intrinsic::uadd_with_overflow, MVT::i8, 3},
343         {Intrinsic::sadd_with_overflow, MVT::i16, 3},
344         {Intrinsic::uadd_with_overflow, MVT::i16, 3},
345         {Intrinsic::sadd_with_overflow, MVT::i32, 1},
346         {Intrinsic::uadd_with_overflow, MVT::i32, 1},
347         {Intrinsic::sadd_with_overflow, MVT::i64, 1},
348         {Intrinsic::uadd_with_overflow, MVT::i64, 1},
349         {Intrinsic::ssub_with_overflow, MVT::i8, 3},
350         {Intrinsic::usub_with_overflow, MVT::i8, 3},
351         {Intrinsic::ssub_with_overflow, MVT::i16, 3},
352         {Intrinsic::usub_with_overflow, MVT::i16, 3},
353         {Intrinsic::ssub_with_overflow, MVT::i32, 1},
354         {Intrinsic::usub_with_overflow, MVT::i32, 1},
355         {Intrinsic::ssub_with_overflow, MVT::i64, 1},
356         {Intrinsic::usub_with_overflow, MVT::i64, 1},
357         {Intrinsic::smul_with_overflow, MVT::i8, 5},
358         {Intrinsic::umul_with_overflow, MVT::i8, 4},
359         {Intrinsic::smul_with_overflow, MVT::i16, 5},
360         {Intrinsic::umul_with_overflow, MVT::i16, 4},
361         {Intrinsic::smul_with_overflow, MVT::i32, 2}, // eg umull;tst
362         {Intrinsic::umul_with_overflow, MVT::i32, 2}, // eg umull;cmp sxtw
363         {Intrinsic::smul_with_overflow, MVT::i64, 3}, // eg mul;smulh;cmp
364         {Intrinsic::umul_with_overflow, MVT::i64, 3}, // eg mul;umulh;cmp asr
365     };
366     EVT MTy = TLI->getValueType(DL, RetTy->getContainedType(0), true);
367     if (MTy.isSimple())
368       if (const auto *Entry = CostTableLookup(WithOverflowCostTbl, ICA.getID(),
369                                               MTy.getSimpleVT()))
370         return Entry->Cost;
371     break;
372   }
373   default:
374     break;
375   }
376   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
377 }
378 
379 /// The function will remove redundant reinterprets casting in the presence
380 /// of the control flow
381 static Optional<Instruction *> processPhiNode(InstCombiner &IC,
382                                               IntrinsicInst &II) {
383   SmallVector<Instruction *, 32> Worklist;
384   auto RequiredType = II.getType();
385 
386   auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
387   assert(PN && "Expected Phi Node!");
388 
389   // Don't create a new Phi unless we can remove the old one.
390   if (!PN->hasOneUse())
391     return None;
392 
393   for (Value *IncValPhi : PN->incoming_values()) {
394     auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
395     if (!Reinterpret ||
396         Reinterpret->getIntrinsicID() !=
397             Intrinsic::aarch64_sve_convert_to_svbool ||
398         RequiredType != Reinterpret->getArgOperand(0)->getType())
399       return None;
400   }
401 
402   // Create the new Phi
403   LLVMContext &Ctx = PN->getContext();
404   IRBuilder<> Builder(Ctx);
405   Builder.SetInsertPoint(PN);
406   PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
407   Worklist.push_back(PN);
408 
409   for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
410     auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
411     NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
412     Worklist.push_back(Reinterpret);
413   }
414 
415   // Cleanup Phi Node and reinterprets
416   return IC.replaceInstUsesWith(II, NPN);
417 }
418 
419 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC,
420                                                             IntrinsicInst &II) {
421   // If the reinterpret instruction operand is a PHI Node
422   if (isa<PHINode>(II.getArgOperand(0)))
423     return processPhiNode(IC, II);
424 
425   SmallVector<Instruction *, 32> CandidatesForRemoval;
426   Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
427 
428   const auto *IVTy = cast<VectorType>(II.getType());
429 
430   // Walk the chain of conversions.
431   while (Cursor) {
432     // If the type of the cursor has fewer lanes than the final result, zeroing
433     // must take place, which breaks the equivalence chain.
434     const auto *CursorVTy = cast<VectorType>(Cursor->getType());
435     if (CursorVTy->getElementCount().getKnownMinValue() <
436         IVTy->getElementCount().getKnownMinValue())
437       break;
438 
439     // If the cursor has the same type as I, it is a viable replacement.
440     if (Cursor->getType() == IVTy)
441       EarliestReplacement = Cursor;
442 
443     auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
444 
445     // If this is not an SVE conversion intrinsic, this is the end of the chain.
446     if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
447                                   Intrinsic::aarch64_sve_convert_to_svbool ||
448                               IntrinsicCursor->getIntrinsicID() ==
449                                   Intrinsic::aarch64_sve_convert_from_svbool))
450       break;
451 
452     CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
453     Cursor = IntrinsicCursor->getOperand(0);
454   }
455 
456   // If no viable replacement in the conversion chain was found, there is
457   // nothing to do.
458   if (!EarliestReplacement)
459     return None;
460 
461   return IC.replaceInstUsesWith(II, EarliestReplacement);
462 }
463 
464 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
465                                                  IntrinsicInst &II) {
466   IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
467   if (!Pg)
468     return None;
469 
470   if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
471     return None;
472 
473   const auto PTruePattern =
474       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
475   if (PTruePattern != AArch64SVEPredPattern::vl1)
476     return None;
477 
478   // The intrinsic is inserting into lane zero so use an insert instead.
479   auto *IdxTy = Type::getInt64Ty(II.getContext());
480   auto *Insert = InsertElementInst::Create(
481       II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
482   Insert->insertBefore(&II);
483   Insert->takeName(&II);
484 
485   return IC.replaceInstUsesWith(II, Insert);
486 }
487 
488 static Optional<Instruction *> instCombineSVEDupX(InstCombiner &IC,
489                                                   IntrinsicInst &II) {
490   // Replace DupX with a regular IR splat.
491   IRBuilder<> Builder(II.getContext());
492   Builder.SetInsertPoint(&II);
493   auto *RetTy = cast<ScalableVectorType>(II.getType());
494   Value *Splat =
495       Builder.CreateVectorSplat(RetTy->getElementCount(), II.getArgOperand(0));
496   Splat->takeName(&II);
497   return IC.replaceInstUsesWith(II, Splat);
498 }
499 
500 static Optional<Instruction *> instCombineSVECmpNE(InstCombiner &IC,
501                                                    IntrinsicInst &II) {
502   LLVMContext &Ctx = II.getContext();
503   IRBuilder<> Builder(Ctx);
504   Builder.SetInsertPoint(&II);
505 
506   // Check that the predicate is all active
507   auto *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
508   if (!Pg || Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
509     return None;
510 
511   const auto PTruePattern =
512       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
513   if (PTruePattern != AArch64SVEPredPattern::all)
514     return None;
515 
516   // Check that we have a compare of zero..
517   auto *SplatValue =
518       dyn_cast_or_null<ConstantInt>(getSplatValue(II.getArgOperand(2)));
519   if (!SplatValue || !SplatValue->isZero())
520     return None;
521 
522   // ..against a dupq
523   auto *DupQLane = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
524   if (!DupQLane ||
525       DupQLane->getIntrinsicID() != Intrinsic::aarch64_sve_dupq_lane)
526     return None;
527 
528   // Where the dupq is a lane 0 replicate of a vector insert
529   if (!cast<ConstantInt>(DupQLane->getArgOperand(1))->isZero())
530     return None;
531 
532   auto *VecIns = dyn_cast<IntrinsicInst>(DupQLane->getArgOperand(0));
533   if (!VecIns ||
534       VecIns->getIntrinsicID() != Intrinsic::experimental_vector_insert)
535     return None;
536 
537   // Where the vector insert is a fixed constant vector insert into undef at
538   // index zero
539   if (!isa<UndefValue>(VecIns->getArgOperand(0)))
540     return None;
541 
542   if (!cast<ConstantInt>(VecIns->getArgOperand(2))->isZero())
543     return None;
544 
545   auto *ConstVec = dyn_cast<Constant>(VecIns->getArgOperand(1));
546   if (!ConstVec)
547     return None;
548 
549   auto *VecTy = dyn_cast<FixedVectorType>(ConstVec->getType());
550   auto *OutTy = dyn_cast<ScalableVectorType>(II.getType());
551   if (!VecTy || !OutTy || VecTy->getNumElements() != OutTy->getMinNumElements())
552     return None;
553 
554   unsigned NumElts = VecTy->getNumElements();
555   unsigned PredicateBits = 0;
556 
557   // Expand intrinsic operands to a 16-bit byte level predicate
558   for (unsigned I = 0; I < NumElts; ++I) {
559     auto *Arg = dyn_cast<ConstantInt>(ConstVec->getAggregateElement(I));
560     if (!Arg)
561       return None;
562     if (!Arg->isZero())
563       PredicateBits |= 1 << (I * (16 / NumElts));
564   }
565 
566   // If all bits are zero bail early with an empty predicate
567   if (PredicateBits == 0) {
568     auto *PFalse = Constant::getNullValue(II.getType());
569     PFalse->takeName(&II);
570     return IC.replaceInstUsesWith(II, PFalse);
571   }
572 
573   // Calculate largest predicate type used (where byte predicate is largest)
574   unsigned Mask = 8;
575   for (unsigned I = 0; I < 16; ++I)
576     if ((PredicateBits & (1 << I)) != 0)
577       Mask |= (I % 8);
578 
579   unsigned PredSize = Mask & -Mask;
580   auto *PredType = ScalableVectorType::get(
581       Type::getInt1Ty(Ctx), AArch64::SVEBitsPerBlock / (PredSize * 8));
582 
583   // Ensure all relevant bits are set
584   for (unsigned I = 0; I < 16; I += PredSize)
585     if ((PredicateBits & (1 << I)) == 0)
586       return None;
587 
588   auto *PTruePat =
589       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
590   auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
591                                         {PredType}, {PTruePat});
592   auto *ConvertToSVBool = Builder.CreateIntrinsic(
593       Intrinsic::aarch64_sve_convert_to_svbool, {PredType}, {PTrue});
594   auto *ConvertFromSVBool =
595       Builder.CreateIntrinsic(Intrinsic::aarch64_sve_convert_from_svbool,
596                               {II.getType()}, {ConvertToSVBool});
597 
598   ConvertFromSVBool->takeName(&II);
599   return IC.replaceInstUsesWith(II, ConvertFromSVBool);
600 }
601 
602 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC,
603                                                   IntrinsicInst &II) {
604   IRBuilder<> Builder(II.getContext());
605   Builder.SetInsertPoint(&II);
606   Value *Pg = II.getArgOperand(0);
607   Value *Vec = II.getArgOperand(1);
608   auto IntrinsicID = II.getIntrinsicID();
609   bool IsAfter = IntrinsicID == Intrinsic::aarch64_sve_lasta;
610 
611   // lastX(splat(X)) --> X
612   if (auto *SplatVal = getSplatValue(Vec))
613     return IC.replaceInstUsesWith(II, SplatVal);
614 
615   // If x and/or y is a splat value then:
616   // lastX (binop (x, y)) --> binop(lastX(x), lastX(y))
617   Value *LHS, *RHS;
618   if (match(Vec, m_OneUse(m_BinOp(m_Value(LHS), m_Value(RHS))))) {
619     if (isSplatValue(LHS) || isSplatValue(RHS)) {
620       auto *OldBinOp = cast<BinaryOperator>(Vec);
621       auto OpC = OldBinOp->getOpcode();
622       auto *NewLHS =
623           Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, LHS});
624       auto *NewRHS =
625           Builder.CreateIntrinsic(IntrinsicID, {Vec->getType()}, {Pg, RHS});
626       auto *NewBinOp = BinaryOperator::CreateWithCopiedFlags(
627           OpC, NewLHS, NewRHS, OldBinOp, OldBinOp->getName(), &II);
628       return IC.replaceInstUsesWith(II, NewBinOp);
629     }
630   }
631 
632   auto *C = dyn_cast<Constant>(Pg);
633   if (IsAfter && C && C->isNullValue()) {
634     // The intrinsic is extracting lane 0 so use an extract instead.
635     auto *IdxTy = Type::getInt64Ty(II.getContext());
636     auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
637     Extract->insertBefore(&II);
638     Extract->takeName(&II);
639     return IC.replaceInstUsesWith(II, Extract);
640   }
641 
642   auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
643   if (!IntrPG)
644     return None;
645 
646   if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
647     return None;
648 
649   const auto PTruePattern =
650       cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
651 
652   // Can the intrinsic's predicate be converted to a known constant index?
653   unsigned MinNumElts = getNumElementsFromSVEPredPattern(PTruePattern);
654   if (!MinNumElts)
655     return None;
656 
657   unsigned Idx = MinNumElts - 1;
658   // Increment the index if extracting the element after the last active
659   // predicate element.
660   if (IsAfter)
661     ++Idx;
662 
663   // Ignore extracts whose index is larger than the known minimum vector
664   // length. NOTE: This is an artificial constraint where we prefer to
665   // maintain what the user asked for until an alternative is proven faster.
666   auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
667   if (Idx >= PgVTy->getMinNumElements())
668     return None;
669 
670   // The intrinsic is extracting a fixed lane so use an extract instead.
671   auto *IdxTy = Type::getInt64Ty(II.getContext());
672   auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
673   Extract->insertBefore(&II);
674   Extract->takeName(&II);
675   return IC.replaceInstUsesWith(II, Extract);
676 }
677 
678 static Optional<Instruction *> instCombineRDFFR(InstCombiner &IC,
679                                                 IntrinsicInst &II) {
680   LLVMContext &Ctx = II.getContext();
681   IRBuilder<> Builder(Ctx);
682   Builder.SetInsertPoint(&II);
683   // Replace rdffr with predicated rdffr.z intrinsic, so that optimizePTestInstr
684   // can work with RDFFR_PP for ptest elimination.
685   auto *AllPat =
686       ConstantInt::get(Type::getInt32Ty(Ctx), AArch64SVEPredPattern::all);
687   auto *PTrue = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_ptrue,
688                                         {II.getType()}, {AllPat});
689   auto *RDFFR =
690       Builder.CreateIntrinsic(Intrinsic::aarch64_sve_rdffr_z, {}, {PTrue});
691   RDFFR->takeName(&II);
692   return IC.replaceInstUsesWith(II, RDFFR);
693 }
694 
695 static Optional<Instruction *>
696 instCombineSVECntElts(InstCombiner &IC, IntrinsicInst &II, unsigned NumElts) {
697   const auto Pattern = cast<ConstantInt>(II.getArgOperand(0))->getZExtValue();
698 
699   if (Pattern == AArch64SVEPredPattern::all) {
700     LLVMContext &Ctx = II.getContext();
701     IRBuilder<> Builder(Ctx);
702     Builder.SetInsertPoint(&II);
703 
704     Constant *StepVal = ConstantInt::get(II.getType(), NumElts);
705     auto *VScale = Builder.CreateVScale(StepVal);
706     VScale->takeName(&II);
707     return IC.replaceInstUsesWith(II, VScale);
708   }
709 
710   unsigned MinNumElts = getNumElementsFromSVEPredPattern(Pattern);
711 
712   return MinNumElts && NumElts >= MinNumElts
713              ? Optional<Instruction *>(IC.replaceInstUsesWith(
714                    II, ConstantInt::get(II.getType(), MinNumElts)))
715              : None;
716 }
717 
718 static Optional<Instruction *> instCombineSVEPTest(InstCombiner &IC,
719                                                    IntrinsicInst &II) {
720   IntrinsicInst *Op1 = dyn_cast<IntrinsicInst>(II.getArgOperand(0));
721   IntrinsicInst *Op2 = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
722 
723   if (Op1 && Op2 &&
724       Op1->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
725       Op2->getIntrinsicID() == Intrinsic::aarch64_sve_convert_to_svbool &&
726       Op1->getArgOperand(0)->getType() == Op2->getArgOperand(0)->getType()) {
727 
728     IRBuilder<> Builder(II.getContext());
729     Builder.SetInsertPoint(&II);
730 
731     Value *Ops[] = {Op1->getArgOperand(0), Op2->getArgOperand(0)};
732     Type *Tys[] = {Op1->getArgOperand(0)->getType()};
733 
734     auto *PTest = Builder.CreateIntrinsic(II.getIntrinsicID(), Tys, Ops);
735 
736     PTest->takeName(&II);
737     return IC.replaceInstUsesWith(II, PTest);
738   }
739 
740   return None;
741 }
742 
743 static Optional<Instruction *> instCombineSVEVectorFMLA(InstCombiner &IC,
744                                                         IntrinsicInst &II) {
745   // fold (fadd p a (fmul p b c)) -> (fma p a b c)
746   Value *P = II.getOperand(0);
747   Value *A = II.getOperand(1);
748   auto FMul = II.getOperand(2);
749   Value *B, *C;
750   if (!match(FMul, m_Intrinsic<Intrinsic::aarch64_sve_fmul>(
751                        m_Specific(P), m_Value(B), m_Value(C))))
752     return None;
753 
754   if (!FMul->hasOneUse())
755     return None;
756 
757   llvm::FastMathFlags FAddFlags = II.getFastMathFlags();
758   // Stop the combine when the flags on the inputs differ in case dropping flags
759   // would lead to us missing out on more beneficial optimizations.
760   if (FAddFlags != cast<CallInst>(FMul)->getFastMathFlags())
761     return None;
762   if (!FAddFlags.allowContract())
763     return None;
764 
765   IRBuilder<> Builder(II.getContext());
766   Builder.SetInsertPoint(&II);
767   auto FMLA = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_fmla,
768                                       {II.getType()}, {P, A, B, C}, &II);
769   FMLA->setFastMathFlags(FAddFlags);
770   return IC.replaceInstUsesWith(II, FMLA);
771 }
772 
773 static bool isAllActivePredicate(Value *Pred) {
774   // Look through convert.from.svbool(convert.to.svbool(...) chain.
775   Value *UncastedPred;
776   if (match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_convert_from_svbool>(
777                       m_Intrinsic<Intrinsic::aarch64_sve_convert_to_svbool>(
778                           m_Value(UncastedPred)))))
779     // If the predicate has the same or less lanes than the uncasted
780     // predicate then we know the casting has no effect.
781     if (cast<ScalableVectorType>(Pred->getType())->getMinNumElements() <=
782         cast<ScalableVectorType>(UncastedPred->getType())->getMinNumElements())
783       Pred = UncastedPred;
784 
785   return match(Pred, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
786                          m_ConstantInt<AArch64SVEPredPattern::all>()));
787 }
788 
789 static Optional<Instruction *>
790 instCombineSVELD1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
791   IRBuilder<> Builder(II.getContext());
792   Builder.SetInsertPoint(&II);
793 
794   Value *Pred = II.getOperand(0);
795   Value *PtrOp = II.getOperand(1);
796   Type *VecTy = II.getType();
797   Value *VecPtr = Builder.CreateBitCast(PtrOp, VecTy->getPointerTo());
798 
799   if (isAllActivePredicate(Pred)) {
800     LoadInst *Load = Builder.CreateLoad(VecTy, VecPtr);
801     return IC.replaceInstUsesWith(II, Load);
802   }
803 
804   CallInst *MaskedLoad =
805       Builder.CreateMaskedLoad(VecTy, VecPtr, PtrOp->getPointerAlignment(DL),
806                                Pred, ConstantAggregateZero::get(VecTy));
807   return IC.replaceInstUsesWith(II, MaskedLoad);
808 }
809 
810 static Optional<Instruction *>
811 instCombineSVEST1(InstCombiner &IC, IntrinsicInst &II, const DataLayout &DL) {
812   IRBuilder<> Builder(II.getContext());
813   Builder.SetInsertPoint(&II);
814 
815   Value *VecOp = II.getOperand(0);
816   Value *Pred = II.getOperand(1);
817   Value *PtrOp = II.getOperand(2);
818   Value *VecPtr =
819       Builder.CreateBitCast(PtrOp, VecOp->getType()->getPointerTo());
820 
821   if (isAllActivePredicate(Pred)) {
822     Builder.CreateStore(VecOp, VecPtr);
823     return IC.eraseInstFromFunction(II);
824   }
825 
826   Builder.CreateMaskedStore(VecOp, VecPtr, PtrOp->getPointerAlignment(DL),
827                             Pred);
828   return IC.eraseInstFromFunction(II);
829 }
830 
831 static Instruction::BinaryOps intrinsicIDToBinOpCode(unsigned Intrinsic) {
832   switch (Intrinsic) {
833   case Intrinsic::aarch64_sve_fmul:
834     return Instruction::BinaryOps::FMul;
835   case Intrinsic::aarch64_sve_fadd:
836     return Instruction::BinaryOps::FAdd;
837   case Intrinsic::aarch64_sve_fsub:
838     return Instruction::BinaryOps::FSub;
839   default:
840     return Instruction::BinaryOpsEnd;
841   }
842 }
843 
844 static Optional<Instruction *> instCombineSVEVectorBinOp(InstCombiner &IC,
845                                                          IntrinsicInst &II) {
846   auto *OpPredicate = II.getOperand(0);
847   auto BinOpCode = intrinsicIDToBinOpCode(II.getIntrinsicID());
848   if (BinOpCode == Instruction::BinaryOpsEnd ||
849       !match(OpPredicate, m_Intrinsic<Intrinsic::aarch64_sve_ptrue>(
850                               m_ConstantInt<AArch64SVEPredPattern::all>())))
851     return None;
852   IRBuilder<> Builder(II.getContext());
853   Builder.SetInsertPoint(&II);
854   Builder.setFastMathFlags(II.getFastMathFlags());
855   auto BinOp =
856       Builder.CreateBinOp(BinOpCode, II.getOperand(1), II.getOperand(2));
857   return IC.replaceInstUsesWith(II, BinOp);
858 }
859 
860 static Optional<Instruction *> instCombineSVEVectorFAdd(InstCombiner &IC,
861                                                         IntrinsicInst &II) {
862   if (auto FMLA = instCombineSVEVectorFMLA(IC, II))
863     return FMLA;
864   return instCombineSVEVectorBinOp(IC, II);
865 }
866 
867 static Optional<Instruction *> instCombineSVEVectorMul(InstCombiner &IC,
868                                                        IntrinsicInst &II) {
869   auto *OpPredicate = II.getOperand(0);
870   auto *OpMultiplicand = II.getOperand(1);
871   auto *OpMultiplier = II.getOperand(2);
872 
873   IRBuilder<> Builder(II.getContext());
874   Builder.SetInsertPoint(&II);
875 
876   // Return true if a given instruction is a unit splat value, false otherwise.
877   auto IsUnitSplat = [](auto *I) {
878     auto *SplatValue = getSplatValue(I);
879     if (!SplatValue)
880       return false;
881     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
882   };
883 
884   // Return true if a given instruction is an aarch64_sve_dup intrinsic call
885   // with a unit splat value, false otherwise.
886   auto IsUnitDup = [](auto *I) {
887     auto *IntrI = dyn_cast<IntrinsicInst>(I);
888     if (!IntrI || IntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup)
889       return false;
890 
891     auto *SplatValue = IntrI->getOperand(2);
892     return match(SplatValue, m_FPOne()) || match(SplatValue, m_One());
893   };
894 
895   if (IsUnitSplat(OpMultiplier)) {
896     // [f]mul pg %n, (dupx 1) => %n
897     OpMultiplicand->takeName(&II);
898     return IC.replaceInstUsesWith(II, OpMultiplicand);
899   } else if (IsUnitDup(OpMultiplier)) {
900     // [f]mul pg %n, (dup pg 1) => %n
901     auto *DupInst = cast<IntrinsicInst>(OpMultiplier);
902     auto *DupPg = DupInst->getOperand(1);
903     // TODO: this is naive. The optimization is still valid if DupPg
904     // 'encompasses' OpPredicate, not only if they're the same predicate.
905     if (OpPredicate == DupPg) {
906       OpMultiplicand->takeName(&II);
907       return IC.replaceInstUsesWith(II, OpMultiplicand);
908     }
909   }
910 
911   return instCombineSVEVectorBinOp(IC, II);
912 }
913 
914 static Optional<Instruction *> instCombineSVEUnpack(InstCombiner &IC,
915                                                     IntrinsicInst &II) {
916   IRBuilder<> Builder(II.getContext());
917   Builder.SetInsertPoint(&II);
918   Value *UnpackArg = II.getArgOperand(0);
919   auto *RetTy = cast<ScalableVectorType>(II.getType());
920   bool IsSigned = II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpkhi ||
921                   II.getIntrinsicID() == Intrinsic::aarch64_sve_sunpklo;
922 
923   // Hi = uunpkhi(splat(X)) --> Hi = splat(extend(X))
924   // Lo = uunpklo(splat(X)) --> Lo = splat(extend(X))
925   if (auto *ScalarArg = getSplatValue(UnpackArg)) {
926     ScalarArg =
927         Builder.CreateIntCast(ScalarArg, RetTy->getScalarType(), IsSigned);
928     Value *NewVal =
929         Builder.CreateVectorSplat(RetTy->getElementCount(), ScalarArg);
930     NewVal->takeName(&II);
931     return IC.replaceInstUsesWith(II, NewVal);
932   }
933 
934   return None;
935 }
936 static Optional<Instruction *> instCombineSVETBL(InstCombiner &IC,
937                                                  IntrinsicInst &II) {
938   auto *OpVal = II.getOperand(0);
939   auto *OpIndices = II.getOperand(1);
940   VectorType *VTy = cast<VectorType>(II.getType());
941 
942   // Check whether OpIndices is a constant splat value < minimal element count
943   // of result.
944   auto *SplatValue = dyn_cast_or_null<ConstantInt>(getSplatValue(OpIndices));
945   if (!SplatValue ||
946       SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
947     return None;
948 
949   // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
950   // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
951   IRBuilder<> Builder(II.getContext());
952   Builder.SetInsertPoint(&II);
953   auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue);
954   auto *VectorSplat =
955       Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
956 
957   VectorSplat->takeName(&II);
958   return IC.replaceInstUsesWith(II, VectorSplat);
959 }
960 
961 static Optional<Instruction *> instCombineSVETupleGet(InstCombiner &IC,
962                                                       IntrinsicInst &II) {
963   // Try to remove sequences of tuple get/set.
964   Value *SetTuple, *SetIndex, *SetValue;
965   auto *GetTuple = II.getArgOperand(0);
966   auto *GetIndex = II.getArgOperand(1);
967   // Check that we have tuple_get(GetTuple, GetIndex) where GetTuple is a
968   // call to tuple_set i.e. tuple_set(SetTuple, SetIndex, SetValue).
969   // Make sure that the types of the current intrinsic and SetValue match
970   // in order to safely remove the sequence.
971   if (!match(GetTuple,
972              m_Intrinsic<Intrinsic::aarch64_sve_tuple_set>(
973                  m_Value(SetTuple), m_Value(SetIndex), m_Value(SetValue))) ||
974       SetValue->getType() != II.getType())
975     return None;
976   // Case where we get the same index right after setting it.
977   // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex) --> SetValue
978   if (GetIndex == SetIndex)
979     return IC.replaceInstUsesWith(II, SetValue);
980   // If we are getting a different index than what was set in the tuple_set
981   // intrinsic. We can just set the input tuple to the one up in the chain.
982   // tuple_get(tuple_set(SetTuple, SetIndex, SetValue), GetIndex)
983   // --> tuple_get(SetTuple, GetIndex)
984   return IC.replaceOperand(II, 0, SetTuple);
985 }
986 
987 static Optional<Instruction *> instCombineSVEZip(InstCombiner &IC,
988                                                  IntrinsicInst &II) {
989   // zip1(uzp1(A, B), uzp2(A, B)) --> A
990   // zip2(uzp1(A, B), uzp2(A, B)) --> B
991   Value *A, *B;
992   if (match(II.getArgOperand(0),
993             m_Intrinsic<Intrinsic::aarch64_sve_uzp1>(m_Value(A), m_Value(B))) &&
994       match(II.getArgOperand(1), m_Intrinsic<Intrinsic::aarch64_sve_uzp2>(
995                                      m_Specific(A), m_Specific(B))))
996     return IC.replaceInstUsesWith(
997         II, (II.getIntrinsicID() == Intrinsic::aarch64_sve_zip1 ? A : B));
998 
999   return None;
1000 }
1001 
1002 static Optional<Instruction *> instCombineLD1GatherIndex(InstCombiner &IC,
1003                                                          IntrinsicInst &II) {
1004   Value *Mask = II.getOperand(0);
1005   Value *BasePtr = II.getOperand(1);
1006   Value *Index = II.getOperand(2);
1007   Type *Ty = II.getType();
1008   Type *BasePtrTy = BasePtr->getType();
1009   Value *PassThru = ConstantAggregateZero::get(Ty);
1010 
1011   // Contiguous gather => masked load.
1012   // (sve.ld1.gather.index Mask BasePtr (sve.index IndexBase 1))
1013   // => (masked.load (gep BasePtr IndexBase) Align Mask zeroinitializer)
1014   Value *IndexBase;
1015   if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
1016                        m_Value(IndexBase), m_SpecificInt(1)))) {
1017     IRBuilder<> Builder(II.getContext());
1018     Builder.SetInsertPoint(&II);
1019 
1020     Align Alignment =
1021         BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
1022 
1023     Type *VecPtrTy = PointerType::getUnqual(Ty);
1024     Value *Ptr = Builder.CreateGEP(BasePtrTy->getPointerElementType(), BasePtr,
1025                                    IndexBase);
1026     Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);
1027     CallInst *MaskedLoad =
1028         Builder.CreateMaskedLoad(Ty, Ptr, Alignment, Mask, PassThru);
1029     MaskedLoad->takeName(&II);
1030     return IC.replaceInstUsesWith(II, MaskedLoad);
1031   }
1032 
1033   return None;
1034 }
1035 
1036 static Optional<Instruction *> instCombineST1ScatterIndex(InstCombiner &IC,
1037                                                           IntrinsicInst &II) {
1038   Value *Val = II.getOperand(0);
1039   Value *Mask = II.getOperand(1);
1040   Value *BasePtr = II.getOperand(2);
1041   Value *Index = II.getOperand(3);
1042   Type *Ty = Val->getType();
1043   Type *BasePtrTy = BasePtr->getType();
1044 
1045   // Contiguous scatter => masked store.
1046   // (sve.ld1.scatter.index Value Mask BasePtr (sve.index IndexBase 1))
1047   // => (masked.store Value (gep BasePtr IndexBase) Align Mask)
1048   Value *IndexBase;
1049   if (match(Index, m_Intrinsic<Intrinsic::aarch64_sve_index>(
1050                        m_Value(IndexBase), m_SpecificInt(1)))) {
1051     IRBuilder<> Builder(II.getContext());
1052     Builder.SetInsertPoint(&II);
1053 
1054     Align Alignment =
1055         BasePtr->getPointerAlignment(II.getModule()->getDataLayout());
1056 
1057     Value *Ptr = Builder.CreateGEP(BasePtrTy->getPointerElementType(), BasePtr,
1058                                    IndexBase);
1059     Type *VecPtrTy = PointerType::getUnqual(Ty);
1060     Ptr = Builder.CreateBitCast(Ptr, VecPtrTy);
1061 
1062     (void)Builder.CreateMaskedStore(Val, Ptr, Alignment, Mask);
1063 
1064     return IC.eraseInstFromFunction(II);
1065   }
1066 
1067   return None;
1068 }
1069 
1070 static Optional<Instruction *> instCombineSVESDIV(InstCombiner &IC,
1071                                                   IntrinsicInst &II) {
1072   IRBuilder<> Builder(II.getContext());
1073   Builder.SetInsertPoint(&II);
1074   Type *Int32Ty = Builder.getInt32Ty();
1075   Value *Pred = II.getOperand(0);
1076   Value *Vec = II.getOperand(1);
1077   Value *DivVec = II.getOperand(2);
1078 
1079   Value *SplatValue = getSplatValue(DivVec);
1080   ConstantInt *SplatConstantInt = dyn_cast_or_null<ConstantInt>(SplatValue);
1081   if (!SplatConstantInt)
1082     return None;
1083   APInt Divisor = SplatConstantInt->getValue();
1084 
1085   if (Divisor.isPowerOf2()) {
1086     Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
1087     auto ASRD = Builder.CreateIntrinsic(
1088         Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
1089     return IC.replaceInstUsesWith(II, ASRD);
1090   }
1091   if (Divisor.isNegatedPowerOf2()) {
1092     Divisor.negate();
1093     Constant *DivisorLog2 = ConstantInt::get(Int32Ty, Divisor.logBase2());
1094     auto ASRD = Builder.CreateIntrinsic(
1095         Intrinsic::aarch64_sve_asrd, {II.getType()}, {Pred, Vec, DivisorLog2});
1096     auto NEG = Builder.CreateIntrinsic(Intrinsic::aarch64_sve_neg,
1097                                        {ASRD->getType()}, {ASRD, Pred, ASRD});
1098     return IC.replaceInstUsesWith(II, NEG);
1099   }
1100 
1101   return None;
1102 }
1103 
1104 Optional<Instruction *>
1105 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
1106                                      IntrinsicInst &II) const {
1107   Intrinsic::ID IID = II.getIntrinsicID();
1108   switch (IID) {
1109   default:
1110     break;
1111   case Intrinsic::aarch64_sve_convert_from_svbool:
1112     return instCombineConvertFromSVBool(IC, II);
1113   case Intrinsic::aarch64_sve_dup:
1114     return instCombineSVEDup(IC, II);
1115   case Intrinsic::aarch64_sve_dup_x:
1116     return instCombineSVEDupX(IC, II);
1117   case Intrinsic::aarch64_sve_cmpne:
1118   case Intrinsic::aarch64_sve_cmpne_wide:
1119     return instCombineSVECmpNE(IC, II);
1120   case Intrinsic::aarch64_sve_rdffr:
1121     return instCombineRDFFR(IC, II);
1122   case Intrinsic::aarch64_sve_lasta:
1123   case Intrinsic::aarch64_sve_lastb:
1124     return instCombineSVELast(IC, II);
1125   case Intrinsic::aarch64_sve_cntd:
1126     return instCombineSVECntElts(IC, II, 2);
1127   case Intrinsic::aarch64_sve_cntw:
1128     return instCombineSVECntElts(IC, II, 4);
1129   case Intrinsic::aarch64_sve_cnth:
1130     return instCombineSVECntElts(IC, II, 8);
1131   case Intrinsic::aarch64_sve_cntb:
1132     return instCombineSVECntElts(IC, II, 16);
1133   case Intrinsic::aarch64_sve_ptest_any:
1134   case Intrinsic::aarch64_sve_ptest_first:
1135   case Intrinsic::aarch64_sve_ptest_last:
1136     return instCombineSVEPTest(IC, II);
1137   case Intrinsic::aarch64_sve_mul:
1138   case Intrinsic::aarch64_sve_fmul:
1139     return instCombineSVEVectorMul(IC, II);
1140   case Intrinsic::aarch64_sve_fadd:
1141     return instCombineSVEVectorFAdd(IC, II);
1142   case Intrinsic::aarch64_sve_fsub:
1143     return instCombineSVEVectorBinOp(IC, II);
1144   case Intrinsic::aarch64_sve_tbl:
1145     return instCombineSVETBL(IC, II);
1146   case Intrinsic::aarch64_sve_uunpkhi:
1147   case Intrinsic::aarch64_sve_uunpklo:
1148   case Intrinsic::aarch64_sve_sunpkhi:
1149   case Intrinsic::aarch64_sve_sunpklo:
1150     return instCombineSVEUnpack(IC, II);
1151   case Intrinsic::aarch64_sve_tuple_get:
1152     return instCombineSVETupleGet(IC, II);
1153   case Intrinsic::aarch64_sve_zip1:
1154   case Intrinsic::aarch64_sve_zip2:
1155     return instCombineSVEZip(IC, II);
1156   case Intrinsic::aarch64_sve_ld1_gather_index:
1157     return instCombineLD1GatherIndex(IC, II);
1158   case Intrinsic::aarch64_sve_st1_scatter_index:
1159     return instCombineST1ScatterIndex(IC, II);
1160   case Intrinsic::aarch64_sve_ld1:
1161     return instCombineSVELD1(IC, II, DL);
1162   case Intrinsic::aarch64_sve_st1:
1163     return instCombineSVEST1(IC, II, DL);
1164   case Intrinsic::aarch64_sve_sdiv:
1165     return instCombineSVESDIV(IC, II);
1166   }
1167 
1168   return None;
1169 }
1170 
1171 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
1172                                            ArrayRef<const Value *> Args) {
1173 
1174   // A helper that returns a vector type from the given type. The number of
1175   // elements in type Ty determine the vector width.
1176   auto toVectorTy = [&](Type *ArgTy) {
1177     return VectorType::get(ArgTy->getScalarType(),
1178                            cast<VectorType>(DstTy)->getElementCount());
1179   };
1180 
1181   // Exit early if DstTy is not a vector type whose elements are at least
1182   // 16-bits wide.
1183   if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
1184     return false;
1185 
1186   // Determine if the operation has a widening variant. We consider both the
1187   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
1188   // instructions.
1189   //
1190   // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
1191   //       verify that their extending operands are eliminated during code
1192   //       generation.
1193   switch (Opcode) {
1194   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
1195   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
1196     break;
1197   default:
1198     return false;
1199   }
1200 
1201   // To be a widening instruction (either the "wide" or "long" versions), the
1202   // second operand must be a sign- or zero extend having a single user. We
1203   // only consider extends having a single user because they may otherwise not
1204   // be eliminated.
1205   if (Args.size() != 2 ||
1206       (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
1207       !Args[1]->hasOneUse())
1208     return false;
1209   auto *Extend = cast<CastInst>(Args[1]);
1210 
1211   // Legalize the destination type and ensure it can be used in a widening
1212   // operation.
1213   auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
1214   unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
1215   if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
1216     return false;
1217 
1218   // Legalize the source type and ensure it can be used in a widening
1219   // operation.
1220   auto *SrcTy = toVectorTy(Extend->getSrcTy());
1221   auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
1222   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
1223   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
1224     return false;
1225 
1226   // Get the total number of vector elements in the legalized types.
1227   InstructionCost NumDstEls =
1228       DstTyL.first * DstTyL.second.getVectorMinNumElements();
1229   InstructionCost NumSrcEls =
1230       SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
1231 
1232   // Return true if the legalized types have the same number of vector elements
1233   // and the destination element type size is twice that of the source type.
1234   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
1235 }
1236 
1237 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1238                                                  Type *Src,
1239                                                  TTI::CastContextHint CCH,
1240                                                  TTI::TargetCostKind CostKind,
1241                                                  const Instruction *I) {
1242   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1243   assert(ISD && "Invalid opcode");
1244 
1245   // If the cast is observable, and it is used by a widening instruction (e.g.,
1246   // uaddl, saddw, etc.), it may be free.
1247   if (I && I->hasOneUse()) {
1248     auto *SingleUser = cast<Instruction>(*I->user_begin());
1249     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
1250     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
1251       // If the cast is the second operand, it is free. We will generate either
1252       // a "wide" or "long" version of the widening instruction.
1253       if (I == SingleUser->getOperand(1))
1254         return 0;
1255       // If the cast is not the second operand, it will be free if it looks the
1256       // same as the second operand. In this case, we will generate a "long"
1257       // version of the widening instruction.
1258       if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
1259         if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
1260             cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
1261           return 0;
1262     }
1263   }
1264 
1265   // TODO: Allow non-throughput costs that aren't binary.
1266   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1267     if (CostKind != TTI::TCK_RecipThroughput)
1268       return Cost == 0 ? 0 : 1;
1269     return Cost;
1270   };
1271 
1272   EVT SrcTy = TLI->getValueType(DL, Src);
1273   EVT DstTy = TLI->getValueType(DL, Dst);
1274 
1275   if (!SrcTy.isSimple() || !DstTy.isSimple())
1276     return AdjustCost(
1277         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
1278 
1279   static const TypeConversionCostTblEntry
1280   ConversionTbl[] = {
1281     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
1282     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
1283     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
1284     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
1285 
1286     // Truncations on nxvmiN
1287     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
1288     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
1289     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
1290     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
1291     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
1292     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
1293     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
1294     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
1295     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
1296     { ISD::TRUNCATE, MVT::nxv16i1, MVT::nxv16i8, 1 },
1297     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
1298     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
1299     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
1300     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
1301     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
1302     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
1303 
1304     // The number of shll instructions for the extension.
1305     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
1306     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
1307     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
1308     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
1309     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
1310     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
1311     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
1312     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
1313     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
1314     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
1315     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
1316     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
1317     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1318     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1319     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
1320     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
1321 
1322     // LowerVectorINT_TO_FP:
1323     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
1324     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1325     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1326     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
1327     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1328     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1329 
1330     // Complex: to v2f32
1331     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
1332     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
1333     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
1334     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
1335     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
1336     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
1337 
1338     // Complex: to v4f32
1339     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
1340     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1341     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
1342     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1343 
1344     // Complex: to v8f32
1345     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
1346     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
1347     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
1348     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
1349 
1350     // Complex: to v16f32
1351     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
1352     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
1353 
1354     // Complex: to v2f64
1355     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
1356     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
1357     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
1358     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
1359     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
1360     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
1361 
1362 
1363     // LowerVectorFP_TO_INT
1364     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
1365     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
1366     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1367     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1368     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1369     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1370 
1371     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
1372     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
1373     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
1374     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
1375     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
1376     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
1377     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
1378 
1379     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
1380     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
1381     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
1382     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
1383     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
1384 
1385     // Complex, from nxv2f32.
1386     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
1387     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
1388     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
1389     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
1390     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
1391     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
1392     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
1393     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
1394 
1395     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
1396     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
1397     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
1398     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
1399     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
1400     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
1401     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
1402 
1403     // Complex, from nxv2f64.
1404     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
1405     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
1406     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
1407     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
1408     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
1409     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
1410     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
1411     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
1412 
1413     // Complex, from nxv4f32.
1414     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
1415     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
1416     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
1417     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
1418     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
1419     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
1420     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
1421     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
1422 
1423     // Complex, from nxv8f64. Illegal -> illegal conversions not required.
1424     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
1425     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
1426     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
1427     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
1428 
1429     // Complex, from nxv4f64. Illegal -> illegal conversions not required.
1430     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
1431     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
1432     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
1433     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
1434     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
1435     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
1436 
1437     // Complex, from nxv8f32. Illegal -> illegal conversions not required.
1438     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
1439     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
1440     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
1441     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
1442 
1443     // Complex, from nxv8f16.
1444     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
1445     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
1446     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
1447     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
1448     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
1449     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
1450     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
1451     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
1452 
1453     // Complex, from nxv4f16.
1454     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
1455     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
1456     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
1457     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
1458     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
1459     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
1460     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
1461     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
1462 
1463     // Complex, from nxv2f16.
1464     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
1465     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
1466     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
1467     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
1468     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
1469     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
1470     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
1471     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
1472 
1473     // Truncate from nxvmf32 to nxvmf16.
1474     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
1475     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
1476     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
1477 
1478     // Truncate from nxvmf64 to nxvmf16.
1479     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
1480     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
1481     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
1482 
1483     // Truncate from nxvmf64 to nxvmf32.
1484     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
1485     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
1486     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
1487 
1488     // Extend from nxvmf16 to nxvmf32.
1489     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
1490     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
1491     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
1492 
1493     // Extend from nxvmf16 to nxvmf64.
1494     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
1495     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
1496     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
1497 
1498     // Extend from nxvmf32 to nxvmf64.
1499     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
1500     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
1501     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
1502 
1503   };
1504 
1505   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
1506                                                  DstTy.getSimpleVT(),
1507                                                  SrcTy.getSimpleVT()))
1508     return AdjustCost(Entry->Cost);
1509 
1510   return AdjustCost(
1511       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
1512 }
1513 
1514 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
1515                                                          Type *Dst,
1516                                                          VectorType *VecTy,
1517                                                          unsigned Index) {
1518 
1519   // Make sure we were given a valid extend opcode.
1520   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
1521          "Invalid opcode");
1522 
1523   // We are extending an element we extract from a vector, so the source type
1524   // of the extend is the element type of the vector.
1525   auto *Src = VecTy->getElementType();
1526 
1527   // Sign- and zero-extends are for integer types only.
1528   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
1529 
1530   // Get the cost for the extract. We compute the cost (if any) for the extend
1531   // below.
1532   InstructionCost Cost =
1533       getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
1534 
1535   // Legalize the types.
1536   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
1537   auto DstVT = TLI->getValueType(DL, Dst);
1538   auto SrcVT = TLI->getValueType(DL, Src);
1539   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1540 
1541   // If the resulting type is still a vector and the destination type is legal,
1542   // we may get the extension for free. If not, get the default cost for the
1543   // extend.
1544   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
1545     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1546                                    CostKind);
1547 
1548   // The destination type should be larger than the element type. If not, get
1549   // the default cost for the extend.
1550   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
1551     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1552                                    CostKind);
1553 
1554   switch (Opcode) {
1555   default:
1556     llvm_unreachable("Opcode should be either SExt or ZExt");
1557 
1558   // For sign-extends, we only need a smov, which performs the extension
1559   // automatically.
1560   case Instruction::SExt:
1561     return Cost;
1562 
1563   // For zero-extends, the extend is performed automatically by a umov unless
1564   // the destination type is i64 and the element type is i8 or i16.
1565   case Instruction::ZExt:
1566     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
1567       return Cost;
1568   }
1569 
1570   // If we are unable to perform the extend for free, get the default cost.
1571   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
1572                                  CostKind);
1573 }
1574 
1575 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
1576                                                TTI::TargetCostKind CostKind,
1577                                                const Instruction *I) {
1578   if (CostKind != TTI::TCK_RecipThroughput)
1579     return Opcode == Instruction::PHI ? 0 : 1;
1580   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
1581   // Branches are assumed to be predicted.
1582   return 0;
1583 }
1584 
1585 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
1586                                                    unsigned Index) {
1587   assert(Val->isVectorTy() && "This must be a vector type");
1588 
1589   if (Index != -1U) {
1590     // Legalize the type.
1591     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
1592 
1593     // This type is legalized to a scalar type.
1594     if (!LT.second.isVector())
1595       return 0;
1596 
1597     // The type may be split. Normalize the index to the new type.
1598     unsigned Width = LT.second.getVectorNumElements();
1599     Index = Index % Width;
1600 
1601     // The element at index zero is already inside the vector.
1602     if (Index == 0)
1603       return 0;
1604   }
1605 
1606   // All other insert/extracts cost this much.
1607   return ST->getVectorInsertExtractBaseCost();
1608 }
1609 
1610 InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
1611     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1612     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
1613     TTI::OperandValueProperties Opd1PropInfo,
1614     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1615     const Instruction *CxtI) {
1616   // TODO: Handle more cost kinds.
1617   if (CostKind != TTI::TCK_RecipThroughput)
1618     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1619                                          Opd2Info, Opd1PropInfo,
1620                                          Opd2PropInfo, Args, CxtI);
1621 
1622   // Legalize the type.
1623   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1624 
1625   // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
1626   // add in the widening overhead specified by the sub-target. Since the
1627   // extends feeding widening instructions are performed automatically, they
1628   // aren't present in the generated code and have a zero cost. By adding a
1629   // widening overhead here, we attach the total cost of the combined operation
1630   // to the widening instruction.
1631   InstructionCost Cost = 0;
1632   if (isWideningInstruction(Ty, Opcode, Args))
1633     Cost += ST->getWideningBaseCost();
1634 
1635   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1636 
1637   switch (ISD) {
1638   default:
1639     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1640                                                 Opd2Info,
1641                                                 Opd1PropInfo, Opd2PropInfo);
1642   case ISD::SDIV:
1643     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
1644         Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
1645       // On AArch64, scalar signed division by constants power-of-two are
1646       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
1647       // The OperandValue properties many not be same as that of previous
1648       // operation; conservatively assume OP_None.
1649       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
1650                                      Opd1Info, Opd2Info,
1651                                      TargetTransformInfo::OP_None,
1652                                      TargetTransformInfo::OP_None);
1653       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
1654                                      Opd1Info, Opd2Info,
1655                                      TargetTransformInfo::OP_None,
1656                                      TargetTransformInfo::OP_None);
1657       Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind,
1658                                      Opd1Info, Opd2Info,
1659                                      TargetTransformInfo::OP_None,
1660                                      TargetTransformInfo::OP_None);
1661       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
1662                                      Opd1Info, Opd2Info,
1663                                      TargetTransformInfo::OP_None,
1664                                      TargetTransformInfo::OP_None);
1665       return Cost;
1666     }
1667     LLVM_FALLTHROUGH;
1668   case ISD::UDIV:
1669     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
1670       auto VT = TLI->getValueType(DL, Ty);
1671       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
1672         // Vector signed division by constant are expanded to the
1673         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
1674         // to MULHS + SUB + SRL + ADD + SRL.
1675         InstructionCost MulCost = getArithmeticInstrCost(
1676             Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info,
1677             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1678         InstructionCost AddCost = getArithmeticInstrCost(
1679             Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info,
1680             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1681         InstructionCost ShrCost = getArithmeticInstrCost(
1682             Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info,
1683             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1684         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
1685       }
1686     }
1687 
1688     Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1689                                           Opd2Info,
1690                                           Opd1PropInfo, Opd2PropInfo);
1691     if (Ty->isVectorTy()) {
1692       // On AArch64, vector divisions are not supported natively and are
1693       // expanded into scalar divisions of each pair of elements.
1694       Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind,
1695                                      Opd1Info, Opd2Info, Opd1PropInfo,
1696                                      Opd2PropInfo);
1697       Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
1698                                      Opd1Info, Opd2Info, Opd1PropInfo,
1699                                      Opd2PropInfo);
1700       // TODO: if one of the arguments is scalar, then it's not necessary to
1701       // double the cost of handling the vector elements.
1702       Cost += Cost;
1703     }
1704     return Cost;
1705 
1706   case ISD::MUL:
1707     if (LT.second != MVT::v2i64)
1708       return (Cost + 1) * LT.first;
1709     // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive
1710     // as elements are extracted from the vectors and the muls scalarized.
1711     // As getScalarizationOverhead is a bit too pessimistic, we estimate the
1712     // cost for a i64 vector directly here, which is:
1713     // - four i64 extracts,
1714     // - two i64 inserts, and
1715     // - two muls.
1716     // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with
1717     // LT.first = 2 the cost is 16.
1718     return LT.first * 8;
1719   case ISD::ADD:
1720   case ISD::XOR:
1721   case ISD::OR:
1722   case ISD::AND:
1723     // These nodes are marked as 'custom' for combining purposes only.
1724     // We know that they are legal. See LowerAdd in ISelLowering.
1725     return (Cost + 1) * LT.first;
1726 
1727   case ISD::FADD:
1728   case ISD::FSUB:
1729   case ISD::FMUL:
1730   case ISD::FDIV:
1731   case ISD::FNEG:
1732     // These nodes are marked as 'custom' just to lower them to SVE.
1733     // We know said lowering will incur no additional cost.
1734     if (!Ty->getScalarType()->isFP128Ty())
1735       return (Cost + 2) * LT.first;
1736 
1737     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1738                                                 Opd2Info,
1739                                                 Opd1PropInfo, Opd2PropInfo);
1740   }
1741 }
1742 
1743 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
1744                                                           ScalarEvolution *SE,
1745                                                           const SCEV *Ptr) {
1746   // Address computations in vectorized code with non-consecutive addresses will
1747   // likely result in more instructions compared to scalar code where the
1748   // computation can more often be merged into the index mode. The resulting
1749   // extra micro-ops can significantly decrease throughput.
1750   unsigned NumVectorInstToHideOverhead = 10;
1751   int MaxMergeDistance = 64;
1752 
1753   if (Ty->isVectorTy() && SE &&
1754       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1755     return NumVectorInstToHideOverhead;
1756 
1757   // In many cases the address computation is not merged into the instruction
1758   // addressing mode.
1759   return 1;
1760 }
1761 
1762 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1763                                                    Type *CondTy,
1764                                                    CmpInst::Predicate VecPred,
1765                                                    TTI::TargetCostKind CostKind,
1766                                                    const Instruction *I) {
1767   // TODO: Handle other cost kinds.
1768   if (CostKind != TTI::TCK_RecipThroughput)
1769     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1770                                      I);
1771 
1772   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1773   // We don't lower some vector selects well that are wider than the register
1774   // width.
1775   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
1776     // We would need this many instructions to hide the scalarization happening.
1777     const int AmortizationCost = 20;
1778 
1779     // If VecPred is not set, check if we can get a predicate from the context
1780     // instruction, if its type matches the requested ValTy.
1781     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
1782       CmpInst::Predicate CurrentPred;
1783       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
1784                             m_Value())))
1785         VecPred = CurrentPred;
1786     }
1787     // Check if we have a compare/select chain that can be lowered using CMxx &
1788     // BFI pair.
1789     if (CmpInst::isIntPredicate(VecPred)) {
1790       static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
1791                                           MVT::v8i16, MVT::v2i32, MVT::v4i32,
1792                                           MVT::v2i64};
1793       auto LT = TLI->getTypeLegalizationCost(DL, ValTy);
1794       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
1795         return LT.first;
1796     }
1797 
1798     static const TypeConversionCostTblEntry
1799     VectorSelectTbl[] = {
1800       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
1801       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
1802       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
1803       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
1804       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
1805       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
1806     };
1807 
1808     EVT SelCondTy = TLI->getValueType(DL, CondTy);
1809     EVT SelValTy = TLI->getValueType(DL, ValTy);
1810     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1811       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
1812                                                      SelCondTy.getSimpleVT(),
1813                                                      SelValTy.getSimpleVT()))
1814         return Entry->Cost;
1815     }
1816   }
1817   // The base case handles scalable vectors fine for now, since it treats the
1818   // cost as 1 * legalization cost.
1819   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1820 }
1821 
1822 AArch64TTIImpl::TTI::MemCmpExpansionOptions
1823 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
1824   TTI::MemCmpExpansionOptions Options;
1825   if (ST->requiresStrictAlign()) {
1826     // TODO: Add cost modeling for strict align. Misaligned loads expand to
1827     // a bunch of instructions when strict align is enabled.
1828     return Options;
1829   }
1830   Options.AllowOverlappingLoads = true;
1831   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
1832   Options.NumLoadsPerBlock = Options.MaxNumLoads;
1833   // TODO: Though vector loads usually perform well on AArch64, in some targets
1834   // they may wake up the FP unit, which raises the power consumption.  Perhaps
1835   // they could be used with no holds barred (-O3).
1836   Options.LoadSizes = {8, 4, 2, 1};
1837   return Options;
1838 }
1839 
1840 InstructionCost
1841 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1842                                       Align Alignment, unsigned AddressSpace,
1843                                       TTI::TargetCostKind CostKind) {
1844   if (useNeonVector(Src))
1845     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1846                                         CostKind);
1847   auto LT = TLI->getTypeLegalizationCost(DL, Src);
1848   if (!LT.first.isValid())
1849     return InstructionCost::getInvalid();
1850 
1851   // The code-generator is currently not able to handle scalable vectors
1852   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1853   // it. This change will be removed when code-generation for these types is
1854   // sufficiently reliable.
1855   if (cast<VectorType>(Src)->getElementCount() == ElementCount::getScalable(1))
1856     return InstructionCost::getInvalid();
1857 
1858   return LT.first * 2;
1859 }
1860 
1861 static unsigned getSVEGatherScatterOverhead(unsigned Opcode) {
1862   return Opcode == Instruction::Load ? SVEGatherOverhead : SVEScatterOverhead;
1863 }
1864 
1865 InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
1866     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1867     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1868   if (useNeonVector(DataTy))
1869     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1870                                          Alignment, CostKind, I);
1871   auto *VT = cast<VectorType>(DataTy);
1872   auto LT = TLI->getTypeLegalizationCost(DL, DataTy);
1873   if (!LT.first.isValid())
1874     return InstructionCost::getInvalid();
1875 
1876   // The code-generator is currently not able to handle scalable vectors
1877   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1878   // it. This change will be removed when code-generation for these types is
1879   // sufficiently reliable.
1880   if (cast<VectorType>(DataTy)->getElementCount() ==
1881       ElementCount::getScalable(1))
1882     return InstructionCost::getInvalid();
1883 
1884   ElementCount LegalVF = LT.second.getVectorElementCount();
1885   InstructionCost MemOpCost =
1886       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I);
1887   // Add on an overhead cost for using gathers/scatters.
1888   // TODO: At the moment this is applied unilaterally for all CPUs, but at some
1889   // point we may want a per-CPU overhead.
1890   MemOpCost *= getSVEGatherScatterOverhead(Opcode);
1891   return LT.first * MemOpCost * getMaxNumElements(LegalVF);
1892 }
1893 
1894 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
1895   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
1896 }
1897 
1898 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
1899                                                 MaybeAlign Alignment,
1900                                                 unsigned AddressSpace,
1901                                                 TTI::TargetCostKind CostKind,
1902                                                 const Instruction *I) {
1903   EVT VT = TLI->getValueType(DL, Ty, true);
1904   // Type legalization can't handle structs
1905   if (VT == MVT::Other)
1906     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
1907                                   CostKind);
1908 
1909   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
1910   if (!LT.first.isValid())
1911     return InstructionCost::getInvalid();
1912 
1913   // The code-generator is currently not able to handle scalable vectors
1914   // of <vscale x 1 x eltty> yet, so return an invalid cost to avoid selecting
1915   // it. This change will be removed when code-generation for these types is
1916   // sufficiently reliable.
1917   if (auto *VTy = dyn_cast<ScalableVectorType>(Ty))
1918     if (VTy->getElementCount() == ElementCount::getScalable(1))
1919       return InstructionCost::getInvalid();
1920 
1921   // TODO: consider latency as well for TCK_SizeAndLatency.
1922   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1923     return LT.first;
1924 
1925   if (CostKind != TTI::TCK_RecipThroughput)
1926     return 1;
1927 
1928   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
1929       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
1930     // Unaligned stores are extremely inefficient. We don't split all
1931     // unaligned 128-bit stores because the negative impact that has shown in
1932     // practice on inlined block copy code.
1933     // We make such stores expensive so that we will only vectorize if there
1934     // are 6 other instructions getting vectorized.
1935     const int AmortizationCost = 6;
1936 
1937     return LT.first * 2 * AmortizationCost;
1938   }
1939 
1940   // Check truncating stores and extending loads.
1941   if (useNeonVector(Ty) &&
1942       Ty->getScalarSizeInBits() != LT.second.getScalarSizeInBits()) {
1943     // v4i8 types are lowered to scalar a load/store and sshll/xtn.
1944     if (VT == MVT::v4i8)
1945       return 2;
1946     // Otherwise we need to scalarize.
1947     return cast<FixedVectorType>(Ty)->getNumElements() * 2;
1948   }
1949 
1950   return LT.first;
1951 }
1952 
1953 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
1954     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1955     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1956     bool UseMaskForCond, bool UseMaskForGaps) {
1957   assert(Factor >= 2 && "Invalid interleave factor");
1958   auto *VecVTy = cast<FixedVectorType>(VecTy);
1959 
1960   if (!UseMaskForCond && !UseMaskForGaps &&
1961       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
1962     unsigned NumElts = VecVTy->getNumElements();
1963     auto *SubVecTy =
1964         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1965 
1966     // ldN/stN only support legal vector types of size 64 or 128 in bits.
1967     // Accesses having vector types that are a multiple of 128 bits can be
1968     // matched to more than one ldN/stN instruction.
1969     bool UseScalable;
1970     if (NumElts % Factor == 0 &&
1971         TLI->isLegalInterleavedAccessType(SubVecTy, DL, UseScalable))
1972       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL, UseScalable);
1973   }
1974 
1975   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1976                                            Alignment, AddressSpace, CostKind,
1977                                            UseMaskForCond, UseMaskForGaps);
1978 }
1979 
1980 InstructionCost
1981 AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
1982   InstructionCost Cost = 0;
1983   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1984   for (auto *I : Tys) {
1985     if (!I->isVectorTy())
1986       continue;
1987     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
1988         128)
1989       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
1990               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
1991   }
1992   return Cost;
1993 }
1994 
1995 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1996   return ST->getMaxInterleaveFactor();
1997 }
1998 
1999 // For Falkor, we want to avoid having too many strided loads in a loop since
2000 // that can exhaust the HW prefetcher resources.  We adjust the unroller
2001 // MaxCount preference below to attempt to ensure unrolling doesn't create too
2002 // many strided loads.
2003 static void
2004 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2005                               TargetTransformInfo::UnrollingPreferences &UP) {
2006   enum { MaxStridedLoads = 7 };
2007   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
2008     int StridedLoads = 0;
2009     // FIXME? We could make this more precise by looking at the CFG and
2010     // e.g. not counting loads in each side of an if-then-else diamond.
2011     for (const auto BB : L->blocks()) {
2012       for (auto &I : *BB) {
2013         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
2014         if (!LMemI)
2015           continue;
2016 
2017         Value *PtrValue = LMemI->getPointerOperand();
2018         if (L->isLoopInvariant(PtrValue))
2019           continue;
2020 
2021         const SCEV *LSCEV = SE.getSCEV(PtrValue);
2022         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
2023         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
2024           continue;
2025 
2026         // FIXME? We could take pairing of unrolled load copies into account
2027         // by looking at the AddRec, but we would probably have to limit this
2028         // to loops with no stores or other memory optimization barriers.
2029         ++StridedLoads;
2030         // We've seen enough strided loads that seeing more won't make a
2031         // difference.
2032         if (StridedLoads > MaxStridedLoads / 2)
2033           return StridedLoads;
2034       }
2035     }
2036     return StridedLoads;
2037   };
2038 
2039   int StridedLoads = countStridedLoads(L, SE);
2040   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
2041                     << " strided loads\n");
2042   // Pick the largest power of 2 unroll count that won't result in too many
2043   // strided loads.
2044   if (StridedLoads) {
2045     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
2046     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
2047                       << UP.MaxCount << '\n');
2048   }
2049 }
2050 
2051 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2052                                              TTI::UnrollingPreferences &UP,
2053                                              OptimizationRemarkEmitter *ORE) {
2054   // Enable partial unrolling and runtime unrolling.
2055   BaseT::getUnrollingPreferences(L, SE, UP, ORE);
2056 
2057   UP.UpperBound = true;
2058 
2059   // For inner loop, it is more likely to be a hot one, and the runtime check
2060   // can be promoted out from LICM pass, so the overhead is less, let's try
2061   // a larger threshold to unroll more loops.
2062   if (L->getLoopDepth() > 1)
2063     UP.PartialThreshold *= 2;
2064 
2065   // Disable partial & runtime unrolling on -Os.
2066   UP.PartialOptSizeThreshold = 0;
2067 
2068   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
2069       EnableFalkorHWPFUnrollFix)
2070     getFalkorUnrollingPreferences(L, SE, UP);
2071 
2072   // Scan the loop: don't unroll loops with calls as this could prevent
2073   // inlining. Don't unroll vector loops either, as they don't benefit much from
2074   // unrolling.
2075   for (auto *BB : L->getBlocks()) {
2076     for (auto &I : *BB) {
2077       // Don't unroll vectorised loop.
2078       if (I.getType()->isVectorTy())
2079         return;
2080 
2081       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2082         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2083           if (!isLoweredToCall(F))
2084             continue;
2085         }
2086         return;
2087       }
2088     }
2089   }
2090 
2091   // Enable runtime unrolling for in-order models
2092   // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
2093   // checking for that case, we can ensure that the default behaviour is
2094   // unchanged
2095   if (ST->getProcFamily() != AArch64Subtarget::Others &&
2096       !ST->getSchedModel().isOutOfOrder()) {
2097     UP.Runtime = true;
2098     UP.Partial = true;
2099     UP.UnrollRemainder = true;
2100     UP.DefaultUnrollRuntimeCount = 4;
2101 
2102     UP.UnrollAndJam = true;
2103     UP.UnrollAndJamInnerLoopThreshold = 60;
2104   }
2105 }
2106 
2107 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2108                                            TTI::PeelingPreferences &PP) {
2109   BaseT::getPeelingPreferences(L, SE, PP);
2110 }
2111 
2112 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
2113                                                          Type *ExpectedType) {
2114   switch (Inst->getIntrinsicID()) {
2115   default:
2116     return nullptr;
2117   case Intrinsic::aarch64_neon_st2:
2118   case Intrinsic::aarch64_neon_st3:
2119   case Intrinsic::aarch64_neon_st4: {
2120     // Create a struct type
2121     StructType *ST = dyn_cast<StructType>(ExpectedType);
2122     if (!ST)
2123       return nullptr;
2124     unsigned NumElts = Inst->arg_size() - 1;
2125     if (ST->getNumElements() != NumElts)
2126       return nullptr;
2127     for (unsigned i = 0, e = NumElts; i != e; ++i) {
2128       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
2129         return nullptr;
2130     }
2131     Value *Res = UndefValue::get(ExpectedType);
2132     IRBuilder<> Builder(Inst);
2133     for (unsigned i = 0, e = NumElts; i != e; ++i) {
2134       Value *L = Inst->getArgOperand(i);
2135       Res = Builder.CreateInsertValue(Res, L, i);
2136     }
2137     return Res;
2138   }
2139   case Intrinsic::aarch64_neon_ld2:
2140   case Intrinsic::aarch64_neon_ld3:
2141   case Intrinsic::aarch64_neon_ld4:
2142     if (Inst->getType() == ExpectedType)
2143       return Inst;
2144     return nullptr;
2145   }
2146 }
2147 
2148 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
2149                                         MemIntrinsicInfo &Info) {
2150   switch (Inst->getIntrinsicID()) {
2151   default:
2152     break;
2153   case Intrinsic::aarch64_neon_ld2:
2154   case Intrinsic::aarch64_neon_ld3:
2155   case Intrinsic::aarch64_neon_ld4:
2156     Info.ReadMem = true;
2157     Info.WriteMem = false;
2158     Info.PtrVal = Inst->getArgOperand(0);
2159     break;
2160   case Intrinsic::aarch64_neon_st2:
2161   case Intrinsic::aarch64_neon_st3:
2162   case Intrinsic::aarch64_neon_st4:
2163     Info.ReadMem = false;
2164     Info.WriteMem = true;
2165     Info.PtrVal = Inst->getArgOperand(Inst->arg_size() - 1);
2166     break;
2167   }
2168 
2169   switch (Inst->getIntrinsicID()) {
2170   default:
2171     return false;
2172   case Intrinsic::aarch64_neon_ld2:
2173   case Intrinsic::aarch64_neon_st2:
2174     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
2175     break;
2176   case Intrinsic::aarch64_neon_ld3:
2177   case Intrinsic::aarch64_neon_st3:
2178     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
2179     break;
2180   case Intrinsic::aarch64_neon_ld4:
2181   case Intrinsic::aarch64_neon_st4:
2182     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
2183     break;
2184   }
2185   return true;
2186 }
2187 
2188 /// See if \p I should be considered for address type promotion. We check if \p
2189 /// I is a sext with right type and used in memory accesses. If it used in a
2190 /// "complex" getelementptr, we allow it to be promoted without finding other
2191 /// sext instructions that sign extended the same initial value. A getelementptr
2192 /// is considered as "complex" if it has more than 2 operands.
2193 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
2194     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
2195   bool Considerable = false;
2196   AllowPromotionWithoutCommonHeader = false;
2197   if (!isa<SExtInst>(&I))
2198     return false;
2199   Type *ConsideredSExtType =
2200       Type::getInt64Ty(I.getParent()->getParent()->getContext());
2201   if (I.getType() != ConsideredSExtType)
2202     return false;
2203   // See if the sext is the one with the right type and used in at least one
2204   // GetElementPtrInst.
2205   for (const User *U : I.users()) {
2206     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
2207       Considerable = true;
2208       // A getelementptr is considered as "complex" if it has more than 2
2209       // operands. We will promote a SExt used in such complex GEP as we
2210       // expect some computation to be merged if they are done on 64 bits.
2211       if (GEPInst->getNumOperands() > 2) {
2212         AllowPromotionWithoutCommonHeader = true;
2213         break;
2214       }
2215     }
2216   }
2217   return Considerable;
2218 }
2219 
2220 bool AArch64TTIImpl::isLegalToVectorizeReduction(
2221     const RecurrenceDescriptor &RdxDesc, ElementCount VF) const {
2222   if (!VF.isScalable())
2223     return true;
2224 
2225   Type *Ty = RdxDesc.getRecurrenceType();
2226   if (Ty->isBFloatTy() || !isElementTypeLegalForScalableVector(Ty))
2227     return false;
2228 
2229   switch (RdxDesc.getRecurrenceKind()) {
2230   case RecurKind::Add:
2231   case RecurKind::FAdd:
2232   case RecurKind::And:
2233   case RecurKind::Or:
2234   case RecurKind::Xor:
2235   case RecurKind::SMin:
2236   case RecurKind::SMax:
2237   case RecurKind::UMin:
2238   case RecurKind::UMax:
2239   case RecurKind::FMin:
2240   case RecurKind::FMax:
2241   case RecurKind::SelectICmp:
2242   case RecurKind::SelectFCmp:
2243   case RecurKind::FMulAdd:
2244     return true;
2245   default:
2246     return false;
2247   }
2248 }
2249 
2250 InstructionCost
2251 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
2252                                        bool IsUnsigned,
2253                                        TTI::TargetCostKind CostKind) {
2254   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
2255 
2256   if (LT.second.getScalarType() == MVT::f16 && !ST->hasFullFP16())
2257     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
2258 
2259   assert((isa<ScalableVectorType>(Ty) == isa<ScalableVectorType>(CondTy)) &&
2260          "Both vector needs to be equally scalable");
2261 
2262   InstructionCost LegalizationCost = 0;
2263   if (LT.first > 1) {
2264     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
2265     unsigned MinMaxOpcode =
2266         Ty->isFPOrFPVectorTy()
2267             ? Intrinsic::maxnum
2268             : (IsUnsigned ? Intrinsic::umin : Intrinsic::smin);
2269     IntrinsicCostAttributes Attrs(MinMaxOpcode, LegalVTy, {LegalVTy, LegalVTy});
2270     LegalizationCost = getIntrinsicInstrCost(Attrs, CostKind) * (LT.first - 1);
2271   }
2272 
2273   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
2274 }
2275 
2276 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
2277     unsigned Opcode, VectorType *ValTy, TTI::TargetCostKind CostKind) {
2278   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2279   InstructionCost LegalizationCost = 0;
2280   if (LT.first > 1) {
2281     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
2282     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
2283     LegalizationCost *= LT.first - 1;
2284   }
2285 
2286   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2287   assert(ISD && "Invalid opcode");
2288   // Add the final reduction cost for the legal horizontal reduction
2289   switch (ISD) {
2290   case ISD::ADD:
2291   case ISD::AND:
2292   case ISD::OR:
2293   case ISD::XOR:
2294   case ISD::FADD:
2295     return LegalizationCost + 2;
2296   default:
2297     return InstructionCost::getInvalid();
2298   }
2299 }
2300 
2301 InstructionCost
2302 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
2303                                            Optional<FastMathFlags> FMF,
2304                                            TTI::TargetCostKind CostKind) {
2305   if (TTI::requiresOrderedReduction(FMF)) {
2306     if (auto *FixedVTy = dyn_cast<FixedVectorType>(ValTy)) {
2307       InstructionCost BaseCost =
2308           BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
2309       // Add on extra cost to reflect the extra overhead on some CPUs. We still
2310       // end up vectorizing for more computationally intensive loops.
2311       return BaseCost + FixedVTy->getNumElements();
2312     }
2313 
2314     if (Opcode != Instruction::FAdd)
2315       return InstructionCost::getInvalid();
2316 
2317     auto *VTy = cast<ScalableVectorType>(ValTy);
2318     InstructionCost Cost =
2319         getArithmeticInstrCost(Opcode, VTy->getScalarType(), CostKind);
2320     Cost *= getMaxNumElements(VTy->getElementCount());
2321     return Cost;
2322   }
2323 
2324   if (isa<ScalableVectorType>(ValTy))
2325     return getArithmeticReductionCostSVE(Opcode, ValTy, CostKind);
2326 
2327   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2328   MVT MTy = LT.second;
2329   int ISD = TLI->InstructionOpcodeToISD(Opcode);
2330   assert(ISD && "Invalid opcode");
2331 
2332   // Horizontal adds can use the 'addv' instruction. We model the cost of these
2333   // instructions as twice a normal vector add, plus 1 for each legalization
2334   // step (LT.first). This is the only arithmetic vector reduction operation for
2335   // which we have an instruction.
2336   // OR, XOR and AND costs should match the codegen from:
2337   // OR: llvm/test/CodeGen/AArch64/reduce-or.ll
2338   // XOR: llvm/test/CodeGen/AArch64/reduce-xor.ll
2339   // AND: llvm/test/CodeGen/AArch64/reduce-and.ll
2340   static const CostTblEntry CostTblNoPairwise[]{
2341       {ISD::ADD, MVT::v8i8,   2},
2342       {ISD::ADD, MVT::v16i8,  2},
2343       {ISD::ADD, MVT::v4i16,  2},
2344       {ISD::ADD, MVT::v8i16,  2},
2345       {ISD::ADD, MVT::v4i32,  2},
2346       {ISD::OR,  MVT::v8i8,  15},
2347       {ISD::OR,  MVT::v16i8, 17},
2348       {ISD::OR,  MVT::v4i16,  7},
2349       {ISD::OR,  MVT::v8i16,  9},
2350       {ISD::OR,  MVT::v2i32,  3},
2351       {ISD::OR,  MVT::v4i32,  5},
2352       {ISD::OR,  MVT::v2i64,  3},
2353       {ISD::XOR, MVT::v8i8,  15},
2354       {ISD::XOR, MVT::v16i8, 17},
2355       {ISD::XOR, MVT::v4i16,  7},
2356       {ISD::XOR, MVT::v8i16,  9},
2357       {ISD::XOR, MVT::v2i32,  3},
2358       {ISD::XOR, MVT::v4i32,  5},
2359       {ISD::XOR, MVT::v2i64,  3},
2360       {ISD::AND, MVT::v8i8,  15},
2361       {ISD::AND, MVT::v16i8, 17},
2362       {ISD::AND, MVT::v4i16,  7},
2363       {ISD::AND, MVT::v8i16,  9},
2364       {ISD::AND, MVT::v2i32,  3},
2365       {ISD::AND, MVT::v4i32,  5},
2366       {ISD::AND, MVT::v2i64,  3},
2367   };
2368   switch (ISD) {
2369   default:
2370     break;
2371   case ISD::ADD:
2372     if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
2373       return (LT.first - 1) + Entry->Cost;
2374     break;
2375   case ISD::XOR:
2376   case ISD::AND:
2377   case ISD::OR:
2378     const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy);
2379     if (!Entry)
2380       break;
2381     auto *ValVTy = cast<FixedVectorType>(ValTy);
2382     if (!ValVTy->getElementType()->isIntegerTy(1) &&
2383         MTy.getVectorNumElements() <= ValVTy->getNumElements() &&
2384         isPowerOf2_32(ValVTy->getNumElements())) {
2385       InstructionCost ExtraCost = 0;
2386       if (LT.first != 1) {
2387         // Type needs to be split, so there is an extra cost of LT.first - 1
2388         // arithmetic ops.
2389         auto *Ty = FixedVectorType::get(ValTy->getElementType(),
2390                                         MTy.getVectorNumElements());
2391         ExtraCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
2392         ExtraCost *= LT.first - 1;
2393       }
2394       return Entry->Cost + ExtraCost;
2395     }
2396     break;
2397   }
2398   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
2399 }
2400 
2401 InstructionCost AArch64TTIImpl::getSpliceCost(VectorType *Tp, int Index) {
2402   static const CostTblEntry ShuffleTbl[] = {
2403       { TTI::SK_Splice, MVT::nxv16i8,  1 },
2404       { TTI::SK_Splice, MVT::nxv8i16,  1 },
2405       { TTI::SK_Splice, MVT::nxv4i32,  1 },
2406       { TTI::SK_Splice, MVT::nxv2i64,  1 },
2407       { TTI::SK_Splice, MVT::nxv2f16,  1 },
2408       { TTI::SK_Splice, MVT::nxv4f16,  1 },
2409       { TTI::SK_Splice, MVT::nxv8f16,  1 },
2410       { TTI::SK_Splice, MVT::nxv2bf16, 1 },
2411       { TTI::SK_Splice, MVT::nxv4bf16, 1 },
2412       { TTI::SK_Splice, MVT::nxv8bf16, 1 },
2413       { TTI::SK_Splice, MVT::nxv2f32,  1 },
2414       { TTI::SK_Splice, MVT::nxv4f32,  1 },
2415       { TTI::SK_Splice, MVT::nxv2f64,  1 },
2416   };
2417 
2418   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
2419   Type *LegalVTy = EVT(LT.second).getTypeForEVT(Tp->getContext());
2420   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
2421   EVT PromotedVT = LT.second.getScalarType() == MVT::i1
2422                        ? TLI->getPromotedVTForPredicate(EVT(LT.second))
2423                        : LT.second;
2424   Type *PromotedVTy = EVT(PromotedVT).getTypeForEVT(Tp->getContext());
2425   InstructionCost LegalizationCost = 0;
2426   if (Index < 0) {
2427     LegalizationCost =
2428         getCmpSelInstrCost(Instruction::ICmp, PromotedVTy, PromotedVTy,
2429                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
2430         getCmpSelInstrCost(Instruction::Select, PromotedVTy, LegalVTy,
2431                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
2432   }
2433 
2434   // Predicated splice are promoted when lowering. See AArch64ISelLowering.cpp
2435   // Cost performed on a promoted type.
2436   if (LT.second.getScalarType() == MVT::i1) {
2437     LegalizationCost +=
2438         getCastInstrCost(Instruction::ZExt, PromotedVTy, LegalVTy,
2439                          TTI::CastContextHint::None, CostKind) +
2440         getCastInstrCost(Instruction::Trunc, LegalVTy, PromotedVTy,
2441                          TTI::CastContextHint::None, CostKind);
2442   }
2443   const auto *Entry =
2444       CostTableLookup(ShuffleTbl, TTI::SK_Splice, PromotedVT.getSimpleVT());
2445   assert(Entry && "Illegal Type for Splice");
2446   LegalizationCost += Entry->Cost;
2447   return LegalizationCost * LT.first;
2448 }
2449 
2450 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
2451                                                VectorType *Tp,
2452                                                ArrayRef<int> Mask, int Index,
2453                                                VectorType *SubTp) {
2454   Kind = improveShuffleKindFromMask(Kind, Mask);
2455   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
2456       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
2457       Kind == TTI::SK_Reverse) {
2458     static const CostTblEntry ShuffleTbl[] = {
2459       // Broadcast shuffle kinds can be performed with 'dup'.
2460       { TTI::SK_Broadcast, MVT::v8i8,  1 },
2461       { TTI::SK_Broadcast, MVT::v16i8, 1 },
2462       { TTI::SK_Broadcast, MVT::v4i16, 1 },
2463       { TTI::SK_Broadcast, MVT::v8i16, 1 },
2464       { TTI::SK_Broadcast, MVT::v2i32, 1 },
2465       { TTI::SK_Broadcast, MVT::v4i32, 1 },
2466       { TTI::SK_Broadcast, MVT::v2i64, 1 },
2467       { TTI::SK_Broadcast, MVT::v2f32, 1 },
2468       { TTI::SK_Broadcast, MVT::v4f32, 1 },
2469       { TTI::SK_Broadcast, MVT::v2f64, 1 },
2470       // Transpose shuffle kinds can be performed with 'trn1/trn2' and
2471       // 'zip1/zip2' instructions.
2472       { TTI::SK_Transpose, MVT::v8i8,  1 },
2473       { TTI::SK_Transpose, MVT::v16i8, 1 },
2474       { TTI::SK_Transpose, MVT::v4i16, 1 },
2475       { TTI::SK_Transpose, MVT::v8i16, 1 },
2476       { TTI::SK_Transpose, MVT::v2i32, 1 },
2477       { TTI::SK_Transpose, MVT::v4i32, 1 },
2478       { TTI::SK_Transpose, MVT::v2i64, 1 },
2479       { TTI::SK_Transpose, MVT::v2f32, 1 },
2480       { TTI::SK_Transpose, MVT::v4f32, 1 },
2481       { TTI::SK_Transpose, MVT::v2f64, 1 },
2482       // Select shuffle kinds.
2483       // TODO: handle vXi8/vXi16.
2484       { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
2485       { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
2486       { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
2487       { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
2488       { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
2489       { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
2490       // PermuteSingleSrc shuffle kinds.
2491       { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
2492       { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
2493       { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
2494       { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
2495       { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
2496       { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
2497       { TTI::SK_PermuteSingleSrc, MVT::v4i16, 3 }, // perfectshuffle worst case.
2498       { TTI::SK_PermuteSingleSrc, MVT::v4f16, 3 }, // perfectshuffle worst case.
2499       { TTI::SK_PermuteSingleSrc, MVT::v4bf16, 3 }, // perfectshuffle worst case.
2500       { TTI::SK_PermuteSingleSrc, MVT::v8i16, 8 }, // constpool + load + tbl
2501       { TTI::SK_PermuteSingleSrc, MVT::v8f16, 8 }, // constpool + load + tbl
2502       { TTI::SK_PermuteSingleSrc, MVT::v8bf16, 8 }, // constpool + load + tbl
2503       { TTI::SK_PermuteSingleSrc, MVT::v8i8, 8 }, // constpool + load + tbl
2504       { TTI::SK_PermuteSingleSrc, MVT::v16i8, 8 }, // constpool + load + tbl
2505       // Reverse can be lowered with `rev`.
2506       { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov.
2507       { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT
2508       { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov.
2509       { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov.
2510       { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT
2511       { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov.
2512       // Broadcast shuffle kinds for scalable vectors
2513       { TTI::SK_Broadcast, MVT::nxv16i8,  1 },
2514       { TTI::SK_Broadcast, MVT::nxv8i16,  1 },
2515       { TTI::SK_Broadcast, MVT::nxv4i32,  1 },
2516       { TTI::SK_Broadcast, MVT::nxv2i64,  1 },
2517       { TTI::SK_Broadcast, MVT::nxv2f16,  1 },
2518       { TTI::SK_Broadcast, MVT::nxv4f16,  1 },
2519       { TTI::SK_Broadcast, MVT::nxv8f16,  1 },
2520       { TTI::SK_Broadcast, MVT::nxv2bf16, 1 },
2521       { TTI::SK_Broadcast, MVT::nxv4bf16, 1 },
2522       { TTI::SK_Broadcast, MVT::nxv8bf16, 1 },
2523       { TTI::SK_Broadcast, MVT::nxv2f32,  1 },
2524       { TTI::SK_Broadcast, MVT::nxv4f32,  1 },
2525       { TTI::SK_Broadcast, MVT::nxv2f64,  1 },
2526       { TTI::SK_Broadcast, MVT::nxv16i1,  1 },
2527       { TTI::SK_Broadcast, MVT::nxv8i1,   1 },
2528       { TTI::SK_Broadcast, MVT::nxv4i1,   1 },
2529       { TTI::SK_Broadcast, MVT::nxv2i1,   1 },
2530       // Handle the cases for vector.reverse with scalable vectors
2531       { TTI::SK_Reverse, MVT::nxv16i8,  1 },
2532       { TTI::SK_Reverse, MVT::nxv8i16,  1 },
2533       { TTI::SK_Reverse, MVT::nxv4i32,  1 },
2534       { TTI::SK_Reverse, MVT::nxv2i64,  1 },
2535       { TTI::SK_Reverse, MVT::nxv2f16,  1 },
2536       { TTI::SK_Reverse, MVT::nxv4f16,  1 },
2537       { TTI::SK_Reverse, MVT::nxv8f16,  1 },
2538       { TTI::SK_Reverse, MVT::nxv2bf16, 1 },
2539       { TTI::SK_Reverse, MVT::nxv4bf16, 1 },
2540       { TTI::SK_Reverse, MVT::nxv8bf16, 1 },
2541       { TTI::SK_Reverse, MVT::nxv2f32,  1 },
2542       { TTI::SK_Reverse, MVT::nxv4f32,  1 },
2543       { TTI::SK_Reverse, MVT::nxv2f64,  1 },
2544       { TTI::SK_Reverse, MVT::nxv16i1,  1 },
2545       { TTI::SK_Reverse, MVT::nxv8i1,   1 },
2546       { TTI::SK_Reverse, MVT::nxv4i1,   1 },
2547       { TTI::SK_Reverse, MVT::nxv2i1,   1 },
2548     };
2549     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
2550     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
2551       return LT.first * Entry->Cost;
2552   }
2553   if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
2554     return getSpliceCost(Tp, Index);
2555   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
2556 }
2557