1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AArch64TargetTransformInfo.h"
10 #include "AArch64ExpandImm.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/TargetTransformInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/TargetLowering.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/IntrinsicsAArch64.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include <algorithm>
23 using namespace llvm;
24 using namespace llvm::PatternMatch;
25 
26 #define DEBUG_TYPE "aarch64tti"
27 
28 static cl::opt<bool> EnableFalkorHWPFUnrollFix("enable-falkor-hwpf-unroll-fix",
29                                                cl::init(true), cl::Hidden);
30 
31 bool AArch64TTIImpl::areInlineCompatible(const Function *Caller,
32                                          const Function *Callee) const {
33   const TargetMachine &TM = getTLI()->getTargetMachine();
34 
35   const FeatureBitset &CallerBits =
36       TM.getSubtargetImpl(*Caller)->getFeatureBits();
37   const FeatureBitset &CalleeBits =
38       TM.getSubtargetImpl(*Callee)->getFeatureBits();
39 
40   // Inline a callee if its target-features are a subset of the callers
41   // target-features.
42   return (CallerBits & CalleeBits) == CalleeBits;
43 }
44 
45 /// Calculate the cost of materializing a 64-bit value. This helper
46 /// method might only calculate a fraction of a larger immediate. Therefore it
47 /// is valid to return a cost of ZERO.
48 InstructionCost AArch64TTIImpl::getIntImmCost(int64_t Val) {
49   // Check if the immediate can be encoded within an instruction.
50   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
51     return 0;
52 
53   if (Val < 0)
54     Val = ~Val;
55 
56   // Calculate how many moves we will need to materialize this constant.
57   SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
58   AArch64_IMM::expandMOVImm(Val, 64, Insn);
59   return Insn.size();
60 }
61 
62 /// Calculate the cost of materializing the given constant.
63 InstructionCost AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
64                                               TTI::TargetCostKind CostKind) {
65   assert(Ty->isIntegerTy());
66 
67   unsigned BitSize = Ty->getPrimitiveSizeInBits();
68   if (BitSize == 0)
69     return ~0U;
70 
71   // Sign-extend all constants to a multiple of 64-bit.
72   APInt ImmVal = Imm;
73   if (BitSize & 0x3f)
74     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
75 
76   // Split the constant into 64-bit chunks and calculate the cost for each
77   // chunk.
78   InstructionCost Cost = 0;
79   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
80     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
81     int64_t Val = Tmp.getSExtValue();
82     Cost += getIntImmCost(Val);
83   }
84   // We need at least one instruction to materialze the constant.
85   return std::max<InstructionCost>(1, Cost);
86 }
87 
88 InstructionCost AArch64TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
89                                                   const APInt &Imm, Type *Ty,
90                                                   TTI::TargetCostKind CostKind,
91                                                   Instruction *Inst) {
92   assert(Ty->isIntegerTy());
93 
94   unsigned BitSize = Ty->getPrimitiveSizeInBits();
95   // There is no cost model for constants with a bit size of 0. Return TCC_Free
96   // here, so that constant hoisting will ignore this constant.
97   if (BitSize == 0)
98     return TTI::TCC_Free;
99 
100   unsigned ImmIdx = ~0U;
101   switch (Opcode) {
102   default:
103     return TTI::TCC_Free;
104   case Instruction::GetElementPtr:
105     // Always hoist the base address of a GetElementPtr.
106     if (Idx == 0)
107       return 2 * TTI::TCC_Basic;
108     return TTI::TCC_Free;
109   case Instruction::Store:
110     ImmIdx = 0;
111     break;
112   case Instruction::Add:
113   case Instruction::Sub:
114   case Instruction::Mul:
115   case Instruction::UDiv:
116   case Instruction::SDiv:
117   case Instruction::URem:
118   case Instruction::SRem:
119   case Instruction::And:
120   case Instruction::Or:
121   case Instruction::Xor:
122   case Instruction::ICmp:
123     ImmIdx = 1;
124     break;
125   // Always return TCC_Free for the shift value of a shift instruction.
126   case Instruction::Shl:
127   case Instruction::LShr:
128   case Instruction::AShr:
129     if (Idx == 1)
130       return TTI::TCC_Free;
131     break;
132   case Instruction::Trunc:
133   case Instruction::ZExt:
134   case Instruction::SExt:
135   case Instruction::IntToPtr:
136   case Instruction::PtrToInt:
137   case Instruction::BitCast:
138   case Instruction::PHI:
139   case Instruction::Call:
140   case Instruction::Select:
141   case Instruction::Ret:
142   case Instruction::Load:
143     break;
144   }
145 
146   if (Idx == ImmIdx) {
147     int NumConstants = (BitSize + 63) / 64;
148     InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
149     return (Cost <= NumConstants * TTI::TCC_Basic)
150                ? static_cast<int>(TTI::TCC_Free)
151                : Cost;
152   }
153   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
154 }
155 
156 InstructionCost
157 AArch64TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
158                                     const APInt &Imm, Type *Ty,
159                                     TTI::TargetCostKind CostKind) {
160   assert(Ty->isIntegerTy());
161 
162   unsigned BitSize = Ty->getPrimitiveSizeInBits();
163   // There is no cost model for constants with a bit size of 0. Return TCC_Free
164   // here, so that constant hoisting will ignore this constant.
165   if (BitSize == 0)
166     return TTI::TCC_Free;
167 
168   // Most (all?) AArch64 intrinsics do not support folding immediates into the
169   // selected instruction, so we compute the materialization cost for the
170   // immediate directly.
171   if (IID >= Intrinsic::aarch64_addg && IID <= Intrinsic::aarch64_udiv)
172     return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
173 
174   switch (IID) {
175   default:
176     return TTI::TCC_Free;
177   case Intrinsic::sadd_with_overflow:
178   case Intrinsic::uadd_with_overflow:
179   case Intrinsic::ssub_with_overflow:
180   case Intrinsic::usub_with_overflow:
181   case Intrinsic::smul_with_overflow:
182   case Intrinsic::umul_with_overflow:
183     if (Idx == 1) {
184       int NumConstants = (BitSize + 63) / 64;
185       InstructionCost Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
186       return (Cost <= NumConstants * TTI::TCC_Basic)
187                  ? static_cast<int>(TTI::TCC_Free)
188                  : Cost;
189     }
190     break;
191   case Intrinsic::experimental_stackmap:
192     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
193       return TTI::TCC_Free;
194     break;
195   case Intrinsic::experimental_patchpoint_void:
196   case Intrinsic::experimental_patchpoint_i64:
197     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
198       return TTI::TCC_Free;
199     break;
200   case Intrinsic::experimental_gc_statepoint:
201     if ((Idx < 5) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
202       return TTI::TCC_Free;
203     break;
204   }
205   return AArch64TTIImpl::getIntImmCost(Imm, Ty, CostKind);
206 }
207 
208 TargetTransformInfo::PopcntSupportKind
209 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
210   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
211   if (TyWidth == 32 || TyWidth == 64)
212     return TTI::PSK_FastHardware;
213   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
214   return TTI::PSK_Software;
215 }
216 
217 InstructionCost
218 AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
219                                       TTI::TargetCostKind CostKind) {
220   auto *RetTy = ICA.getReturnType();
221   switch (ICA.getID()) {
222   case Intrinsic::umin:
223   case Intrinsic::umax: {
224     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
225     // umin(x,y) -> sub(x,usubsat(x,y))
226     // umax(x,y) -> add(x,usubsat(y,x))
227     if (LT.second == MVT::v2i64)
228       return LT.first * 2;
229     LLVM_FALLTHROUGH;
230   }
231   case Intrinsic::smin:
232   case Intrinsic::smax: {
233     static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
234                                         MVT::v8i16, MVT::v2i32, MVT::v4i32};
235     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
236     if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
237       return LT.first;
238     break;
239   }
240   case Intrinsic::sadd_sat:
241   case Intrinsic::ssub_sat:
242   case Intrinsic::uadd_sat:
243   case Intrinsic::usub_sat: {
244     static const auto ValidSatTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
245                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
246                                      MVT::v2i64};
247     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
248     // This is a base cost of 1 for the vadd, plus 3 extract shifts if we
249     // need to extend the type, as it uses shr(qadd(shl, shl)).
250     unsigned Instrs =
251         LT.second.getScalarSizeInBits() == RetTy->getScalarSizeInBits() ? 1 : 4;
252     if (any_of(ValidSatTys, [&LT](MVT M) { return M == LT.second; }))
253       return LT.first * Instrs;
254     break;
255   }
256   case Intrinsic::abs: {
257     static const auto ValidAbsTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
258                                      MVT::v8i16, MVT::v2i32, MVT::v4i32,
259                                      MVT::v2i64};
260     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
261     if (any_of(ValidAbsTys, [&LT](MVT M) { return M == LT.second; }))
262       return LT.first;
263     break;
264   }
265   case Intrinsic::experimental_stepvector: {
266     InstructionCost Cost = 1; // Cost of the `index' instruction
267     auto LT = TLI->getTypeLegalizationCost(DL, RetTy);
268     // Legalisation of illegal vectors involves an `index' instruction plus
269     // (LT.first - 1) vector adds.
270     if (LT.first > 1) {
271       Type *LegalVTy = EVT(LT.second).getTypeForEVT(RetTy->getContext());
272       InstructionCost AddCost =
273           getArithmeticInstrCost(Instruction::Add, LegalVTy, CostKind);
274       Cost += AddCost * (LT.first - 1);
275     }
276     return Cost;
277   }
278   default:
279     break;
280   }
281   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
282 }
283 
284 /// The function will remove redundant reinterprets casting in the presence
285 /// of the control flow
286 static Optional<Instruction *> processPhiNode(InstCombiner &IC,
287                                               IntrinsicInst &II) {
288   SmallVector<Instruction *, 32> Worklist;
289   auto RequiredType = II.getType();
290 
291   auto *PN = dyn_cast<PHINode>(II.getArgOperand(0));
292   assert(PN && "Expected Phi Node!");
293 
294   // Don't create a new Phi unless we can remove the old one.
295   if (!PN->hasOneUse())
296     return None;
297 
298   for (Value *IncValPhi : PN->incoming_values()) {
299     auto *Reinterpret = dyn_cast<IntrinsicInst>(IncValPhi);
300     if (!Reinterpret ||
301         Reinterpret->getIntrinsicID() !=
302             Intrinsic::aarch64_sve_convert_to_svbool ||
303         RequiredType != Reinterpret->getArgOperand(0)->getType())
304       return None;
305   }
306 
307   // Create the new Phi
308   LLVMContext &Ctx = PN->getContext();
309   IRBuilder<> Builder(Ctx);
310   Builder.SetInsertPoint(PN);
311   PHINode *NPN = Builder.CreatePHI(RequiredType, PN->getNumIncomingValues());
312   Worklist.push_back(PN);
313 
314   for (unsigned I = 0; I < PN->getNumIncomingValues(); I++) {
315     auto *Reinterpret = cast<Instruction>(PN->getIncomingValue(I));
316     NPN->addIncoming(Reinterpret->getOperand(0), PN->getIncomingBlock(I));
317     Worklist.push_back(Reinterpret);
318   }
319 
320   // Cleanup Phi Node and reinterprets
321   return IC.replaceInstUsesWith(II, NPN);
322 }
323 
324 static Optional<Instruction *> instCombineConvertFromSVBool(InstCombiner &IC,
325                                                             IntrinsicInst &II) {
326   // If the reinterpret instruction operand is a PHI Node
327   if (isa<PHINode>(II.getArgOperand(0)))
328     return processPhiNode(IC, II);
329 
330   SmallVector<Instruction *, 32> CandidatesForRemoval;
331   Value *Cursor = II.getOperand(0), *EarliestReplacement = nullptr;
332 
333   const auto *IVTy = cast<VectorType>(II.getType());
334 
335   // Walk the chain of conversions.
336   while (Cursor) {
337     // If the type of the cursor has fewer lanes than the final result, zeroing
338     // must take place, which breaks the equivalence chain.
339     const auto *CursorVTy = cast<VectorType>(Cursor->getType());
340     if (CursorVTy->getElementCount().getKnownMinValue() <
341         IVTy->getElementCount().getKnownMinValue())
342       break;
343 
344     // If the cursor has the same type as I, it is a viable replacement.
345     if (Cursor->getType() == IVTy)
346       EarliestReplacement = Cursor;
347 
348     auto *IntrinsicCursor = dyn_cast<IntrinsicInst>(Cursor);
349 
350     // If this is not an SVE conversion intrinsic, this is the end of the chain.
351     if (!IntrinsicCursor || !(IntrinsicCursor->getIntrinsicID() ==
352                                   Intrinsic::aarch64_sve_convert_to_svbool ||
353                               IntrinsicCursor->getIntrinsicID() ==
354                                   Intrinsic::aarch64_sve_convert_from_svbool))
355       break;
356 
357     CandidatesForRemoval.insert(CandidatesForRemoval.begin(), IntrinsicCursor);
358     Cursor = IntrinsicCursor->getOperand(0);
359   }
360 
361   // If no viable replacement in the conversion chain was found, there is
362   // nothing to do.
363   if (!EarliestReplacement)
364     return None;
365 
366   return IC.replaceInstUsesWith(II, EarliestReplacement);
367 }
368 
369 static Optional<Instruction *> instCombineSVEDup(InstCombiner &IC,
370                                                  IntrinsicInst &II) {
371   IntrinsicInst *Pg = dyn_cast<IntrinsicInst>(II.getArgOperand(1));
372   if (!Pg)
373     return None;
374 
375   if (Pg->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
376     return None;
377 
378   const auto PTruePattern =
379       cast<ConstantInt>(Pg->getOperand(0))->getZExtValue();
380   if (PTruePattern != AArch64SVEPredPattern::vl1)
381     return None;
382 
383   // The intrinsic is inserting into lane zero so use an insert instead.
384   auto *IdxTy = Type::getInt64Ty(II.getContext());
385   auto *Insert = InsertElementInst::Create(
386       II.getArgOperand(0), II.getArgOperand(2), ConstantInt::get(IdxTy, 0));
387   Insert->insertBefore(&II);
388   Insert->takeName(&II);
389 
390   return IC.replaceInstUsesWith(II, Insert);
391 }
392 
393 static Optional<Instruction *> instCombineSVELast(InstCombiner &IC,
394                                                   IntrinsicInst &II) {
395   Value *Pg = II.getArgOperand(0);
396   Value *Vec = II.getArgOperand(1);
397   bool IsAfter = II.getIntrinsicID() == Intrinsic::aarch64_sve_lasta;
398 
399   auto *C = dyn_cast<Constant>(Pg);
400   if (IsAfter && C && C->isNullValue()) {
401     // The intrinsic is extracting lane 0 so use an extract instead.
402     auto *IdxTy = Type::getInt64Ty(II.getContext());
403     auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, 0));
404     Extract->insertBefore(&II);
405     Extract->takeName(&II);
406     return IC.replaceInstUsesWith(II, Extract);
407   }
408 
409   auto *IntrPG = dyn_cast<IntrinsicInst>(Pg);
410   if (!IntrPG)
411     return None;
412 
413   if (IntrPG->getIntrinsicID() != Intrinsic::aarch64_sve_ptrue)
414     return None;
415 
416   const auto PTruePattern =
417       cast<ConstantInt>(IntrPG->getOperand(0))->getZExtValue();
418 
419   // Can the intrinsic's predicate be converted to a known constant index?
420   unsigned Idx;
421   switch (PTruePattern) {
422   default:
423     return None;
424   case AArch64SVEPredPattern::vl1:
425     Idx = 0;
426     break;
427   case AArch64SVEPredPattern::vl2:
428     Idx = 1;
429     break;
430   case AArch64SVEPredPattern::vl3:
431     Idx = 2;
432     break;
433   case AArch64SVEPredPattern::vl4:
434     Idx = 3;
435     break;
436   case AArch64SVEPredPattern::vl5:
437     Idx = 4;
438     break;
439   case AArch64SVEPredPattern::vl6:
440     Idx = 5;
441     break;
442   case AArch64SVEPredPattern::vl7:
443     Idx = 6;
444     break;
445   case AArch64SVEPredPattern::vl8:
446     Idx = 7;
447     break;
448   case AArch64SVEPredPattern::vl16:
449     Idx = 15;
450     break;
451   }
452 
453   // Increment the index if extracting the element after the last active
454   // predicate element.
455   if (IsAfter)
456     ++Idx;
457 
458   // Ignore extracts whose index is larger than the known minimum vector
459   // length. NOTE: This is an artificial constraint where we prefer to
460   // maintain what the user asked for until an alternative is proven faster.
461   auto *PgVTy = cast<ScalableVectorType>(Pg->getType());
462   if (Idx >= PgVTy->getMinNumElements())
463     return None;
464 
465   // The intrinsic is extracting a fixed lane so use an extract instead.
466   auto *IdxTy = Type::getInt64Ty(II.getContext());
467   auto *Extract = ExtractElementInst::Create(Vec, ConstantInt::get(IdxTy, Idx));
468   Extract->insertBefore(&II);
469   Extract->takeName(&II);
470   return IC.replaceInstUsesWith(II, Extract);
471 }
472 
473 Optional<Instruction *>
474 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
475                                      IntrinsicInst &II) const {
476   Intrinsic::ID IID = II.getIntrinsicID();
477   switch (IID) {
478   default:
479     break;
480   case Intrinsic::aarch64_sve_convert_from_svbool:
481     return instCombineConvertFromSVBool(IC, II);
482   case Intrinsic::aarch64_sve_dup:
483     return instCombineSVEDup(IC, II);
484   case Intrinsic::aarch64_sve_lasta:
485   case Intrinsic::aarch64_sve_lastb:
486     return instCombineSVELast(IC, II);
487   }
488 
489   return None;
490 }
491 
492 bool AArch64TTIImpl::isWideningInstruction(Type *DstTy, unsigned Opcode,
493                                            ArrayRef<const Value *> Args) {
494 
495   // A helper that returns a vector type from the given type. The number of
496   // elements in type Ty determine the vector width.
497   auto toVectorTy = [&](Type *ArgTy) {
498     return VectorType::get(ArgTy->getScalarType(),
499                            cast<VectorType>(DstTy)->getElementCount());
500   };
501 
502   // Exit early if DstTy is not a vector type whose elements are at least
503   // 16-bits wide.
504   if (!DstTy->isVectorTy() || DstTy->getScalarSizeInBits() < 16)
505     return false;
506 
507   // Determine if the operation has a widening variant. We consider both the
508   // "long" (e.g., usubl) and "wide" (e.g., usubw) versions of the
509   // instructions.
510   //
511   // TODO: Add additional widening operations (e.g., mul, shl, etc.) once we
512   //       verify that their extending operands are eliminated during code
513   //       generation.
514   switch (Opcode) {
515   case Instruction::Add: // UADDL(2), SADDL(2), UADDW(2), SADDW(2).
516   case Instruction::Sub: // USUBL(2), SSUBL(2), USUBW(2), SSUBW(2).
517     break;
518   default:
519     return false;
520   }
521 
522   // To be a widening instruction (either the "wide" or "long" versions), the
523   // second operand must be a sign- or zero extend having a single user. We
524   // only consider extends having a single user because they may otherwise not
525   // be eliminated.
526   if (Args.size() != 2 ||
527       (!isa<SExtInst>(Args[1]) && !isa<ZExtInst>(Args[1])) ||
528       !Args[1]->hasOneUse())
529     return false;
530   auto *Extend = cast<CastInst>(Args[1]);
531 
532   // Legalize the destination type and ensure it can be used in a widening
533   // operation.
534   auto DstTyL = TLI->getTypeLegalizationCost(DL, DstTy);
535   unsigned DstElTySize = DstTyL.second.getScalarSizeInBits();
536   if (!DstTyL.second.isVector() || DstElTySize != DstTy->getScalarSizeInBits())
537     return false;
538 
539   // Legalize the source type and ensure it can be used in a widening
540   // operation.
541   auto *SrcTy = toVectorTy(Extend->getSrcTy());
542   auto SrcTyL = TLI->getTypeLegalizationCost(DL, SrcTy);
543   unsigned SrcElTySize = SrcTyL.second.getScalarSizeInBits();
544   if (!SrcTyL.second.isVector() || SrcElTySize != SrcTy->getScalarSizeInBits())
545     return false;
546 
547   // Get the total number of vector elements in the legalized types.
548   InstructionCost NumDstEls =
549       DstTyL.first * DstTyL.second.getVectorMinNumElements();
550   InstructionCost NumSrcEls =
551       SrcTyL.first * SrcTyL.second.getVectorMinNumElements();
552 
553   // Return true if the legalized types have the same number of vector elements
554   // and the destination element type size is twice that of the source type.
555   return NumDstEls == NumSrcEls && 2 * SrcElTySize == DstElTySize;
556 }
557 
558 InstructionCost AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
559                                                  Type *Src,
560                                                  TTI::CastContextHint CCH,
561                                                  TTI::TargetCostKind CostKind,
562                                                  const Instruction *I) {
563   int ISD = TLI->InstructionOpcodeToISD(Opcode);
564   assert(ISD && "Invalid opcode");
565 
566   // If the cast is observable, and it is used by a widening instruction (e.g.,
567   // uaddl, saddw, etc.), it may be free.
568   if (I && I->hasOneUse()) {
569     auto *SingleUser = cast<Instruction>(*I->user_begin());
570     SmallVector<const Value *, 4> Operands(SingleUser->operand_values());
571     if (isWideningInstruction(Dst, SingleUser->getOpcode(), Operands)) {
572       // If the cast is the second operand, it is free. We will generate either
573       // a "wide" or "long" version of the widening instruction.
574       if (I == SingleUser->getOperand(1))
575         return 0;
576       // If the cast is not the second operand, it will be free if it looks the
577       // same as the second operand. In this case, we will generate a "long"
578       // version of the widening instruction.
579       if (auto *Cast = dyn_cast<CastInst>(SingleUser->getOperand(1)))
580         if (I->getOpcode() == unsigned(Cast->getOpcode()) &&
581             cast<CastInst>(I)->getSrcTy() == Cast->getSrcTy())
582           return 0;
583     }
584   }
585 
586   // TODO: Allow non-throughput costs that aren't binary.
587   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
588     if (CostKind != TTI::TCK_RecipThroughput)
589       return Cost == 0 ? 0 : 1;
590     return Cost;
591   };
592 
593   EVT SrcTy = TLI->getValueType(DL, Src);
594   EVT DstTy = TLI->getValueType(DL, Dst);
595 
596   if (!SrcTy.isSimple() || !DstTy.isSimple())
597     return AdjustCost(
598         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
599 
600   static const TypeConversionCostTblEntry
601   ConversionTbl[] = {
602     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
603     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
604     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
605     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
606 
607     // Truncations on nxvmiN
608     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i16, 1 },
609     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i32, 1 },
610     { ISD::TRUNCATE, MVT::nxv2i1, MVT::nxv2i64, 1 },
611     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i16, 1 },
612     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i32, 1 },
613     { ISD::TRUNCATE, MVT::nxv4i1, MVT::nxv4i64, 2 },
614     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i16, 1 },
615     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i32, 3 },
616     { ISD::TRUNCATE, MVT::nxv8i1, MVT::nxv8i64, 5 },
617     { ISD::TRUNCATE, MVT::nxv2i16, MVT::nxv2i32, 1 },
618     { ISD::TRUNCATE, MVT::nxv2i32, MVT::nxv2i64, 1 },
619     { ISD::TRUNCATE, MVT::nxv4i16, MVT::nxv4i32, 1 },
620     { ISD::TRUNCATE, MVT::nxv4i32, MVT::nxv4i64, 2 },
621     { ISD::TRUNCATE, MVT::nxv8i16, MVT::nxv8i32, 3 },
622     { ISD::TRUNCATE, MVT::nxv8i32, MVT::nxv8i64, 6 },
623 
624     // The number of shll instructions for the extension.
625     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
626     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
627     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
628     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
629     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
630     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
631     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
632     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
633     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
634     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
635     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
636     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
637     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
638     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
639     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
640     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
641 
642     // LowerVectorINT_TO_FP:
643     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
644     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
645     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
646     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
647     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
648     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
649 
650     // Complex: to v2f32
651     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
652     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
653     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
654     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
655     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
656     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
657 
658     // Complex: to v4f32
659     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
660     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
661     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
662     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
663 
664     // Complex: to v8f32
665     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
666     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
667     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
668     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
669 
670     // Complex: to v16f32
671     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
672     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
673 
674     // Complex: to v2f64
675     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
676     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
677     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
678     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
679     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
680     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
681 
682 
683     // LowerVectorFP_TO_INT
684     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
685     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
686     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
687     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
688     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
689     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
690 
691     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
692     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
693     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
694     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
695     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
696     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
697     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
698 
699     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
700     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
701     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
702     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
703     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
704 
705     // Complex, from nxv2f32.
706     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
707     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
708     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
709     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
710     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f32, 1 },
711     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f32, 1 },
712     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f32, 1 },
713     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f32, 1 },
714 
715     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
716     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
717     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
718     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
719     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
720     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
721     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
722 
723     // Complex, from nxv2f64.
724     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
725     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
726     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
727     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
728     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f64, 1 },
729     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f64, 1 },
730     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f64, 1 },
731     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f64, 1 },
732 
733     // Complex, from nxv4f32.
734     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
735     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
736     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
737     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
738     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f32, 4 },
739     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f32, 1 },
740     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f32, 1 },
741     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f32, 1 },
742 
743     // Complex, from nxv8f64. Illegal -> illegal conversions not required.
744     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
745     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
746     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f64, 7 },
747     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f64, 7 },
748 
749     // Complex, from nxv4f64. Illegal -> illegal conversions not required.
750     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
751     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
752     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
753     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f64, 3 },
754     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f64, 3 },
755     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f64, 3 },
756 
757     // Complex, from nxv8f32. Illegal -> illegal conversions not required.
758     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
759     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
760     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f32, 3 },
761     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f32, 3 },
762 
763     // Complex, from nxv8f16.
764     { ISD::FP_TO_SINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
765     { ISD::FP_TO_SINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
766     { ISD::FP_TO_SINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
767     { ISD::FP_TO_SINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
768     { ISD::FP_TO_UINT, MVT::nxv8i64, MVT::nxv8f16, 10 },
769     { ISD::FP_TO_UINT, MVT::nxv8i32, MVT::nxv8f16, 4 },
770     { ISD::FP_TO_UINT, MVT::nxv8i16, MVT::nxv8f16, 1 },
771     { ISD::FP_TO_UINT, MVT::nxv8i8,  MVT::nxv8f16, 1 },
772 
773     // Complex, from nxv4f16.
774     { ISD::FP_TO_SINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
775     { ISD::FP_TO_SINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
776     { ISD::FP_TO_SINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
777     { ISD::FP_TO_SINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
778     { ISD::FP_TO_UINT, MVT::nxv4i64, MVT::nxv4f16, 4 },
779     { ISD::FP_TO_UINT, MVT::nxv4i32, MVT::nxv4f16, 1 },
780     { ISD::FP_TO_UINT, MVT::nxv4i16, MVT::nxv4f16, 1 },
781     { ISD::FP_TO_UINT, MVT::nxv4i8,  MVT::nxv4f16, 1 },
782 
783     // Complex, from nxv2f16.
784     { ISD::FP_TO_SINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
785     { ISD::FP_TO_SINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
786     { ISD::FP_TO_SINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
787     { ISD::FP_TO_SINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
788     { ISD::FP_TO_UINT, MVT::nxv2i64, MVT::nxv2f16, 1 },
789     { ISD::FP_TO_UINT, MVT::nxv2i32, MVT::nxv2f16, 1 },
790     { ISD::FP_TO_UINT, MVT::nxv2i16, MVT::nxv2f16, 1 },
791     { ISD::FP_TO_UINT, MVT::nxv2i8,  MVT::nxv2f16, 1 },
792 
793     // Truncate from nxvmf32 to nxvmf16.
794     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f32, 1 },
795     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f32, 1 },
796     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f32, 3 },
797 
798     // Truncate from nxvmf64 to nxvmf16.
799     { ISD::FP_ROUND, MVT::nxv2f16, MVT::nxv2f64, 1 },
800     { ISD::FP_ROUND, MVT::nxv4f16, MVT::nxv4f64, 3 },
801     { ISD::FP_ROUND, MVT::nxv8f16, MVT::nxv8f64, 7 },
802 
803     // Truncate from nxvmf64 to nxvmf32.
804     { ISD::FP_ROUND, MVT::nxv2f32, MVT::nxv2f64, 1 },
805     { ISD::FP_ROUND, MVT::nxv4f32, MVT::nxv4f64, 3 },
806     { ISD::FP_ROUND, MVT::nxv8f32, MVT::nxv8f64, 6 },
807 
808     // Extend from nxvmf16 to nxvmf32.
809     { ISD::FP_EXTEND, MVT::nxv2f32, MVT::nxv2f16, 1},
810     { ISD::FP_EXTEND, MVT::nxv4f32, MVT::nxv4f16, 1},
811     { ISD::FP_EXTEND, MVT::nxv8f32, MVT::nxv8f16, 2},
812 
813     // Extend from nxvmf16 to nxvmf64.
814     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f16, 1},
815     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f16, 2},
816     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f16, 4},
817 
818     // Extend from nxvmf32 to nxvmf64.
819     { ISD::FP_EXTEND, MVT::nxv2f64, MVT::nxv2f32, 1},
820     { ISD::FP_EXTEND, MVT::nxv4f64, MVT::nxv4f32, 2},
821     { ISD::FP_EXTEND, MVT::nxv8f64, MVT::nxv8f32, 6},
822 
823   };
824 
825   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
826                                                  DstTy.getSimpleVT(),
827                                                  SrcTy.getSimpleVT()))
828     return AdjustCost(Entry->Cost);
829 
830   return AdjustCost(
831       BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
832 }
833 
834 InstructionCost AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode,
835                                                          Type *Dst,
836                                                          VectorType *VecTy,
837                                                          unsigned Index) {
838 
839   // Make sure we were given a valid extend opcode.
840   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
841          "Invalid opcode");
842 
843   // We are extending an element we extract from a vector, so the source type
844   // of the extend is the element type of the vector.
845   auto *Src = VecTy->getElementType();
846 
847   // Sign- and zero-extends are for integer types only.
848   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
849 
850   // Get the cost for the extract. We compute the cost (if any) for the extend
851   // below.
852   InstructionCost Cost =
853       getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
854 
855   // Legalize the types.
856   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
857   auto DstVT = TLI->getValueType(DL, Dst);
858   auto SrcVT = TLI->getValueType(DL, Src);
859   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
860 
861   // If the resulting type is still a vector and the destination type is legal,
862   // we may get the extension for free. If not, get the default cost for the
863   // extend.
864   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
865     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
866                                    CostKind);
867 
868   // The destination type should be larger than the element type. If not, get
869   // the default cost for the extend.
870   if (DstVT.getFixedSizeInBits() < SrcVT.getFixedSizeInBits())
871     return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
872                                    CostKind);
873 
874   switch (Opcode) {
875   default:
876     llvm_unreachable("Opcode should be either SExt or ZExt");
877 
878   // For sign-extends, we only need a smov, which performs the extension
879   // automatically.
880   case Instruction::SExt:
881     return Cost;
882 
883   // For zero-extends, the extend is performed automatically by a umov unless
884   // the destination type is i64 and the element type is i8 or i16.
885   case Instruction::ZExt:
886     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
887       return Cost;
888   }
889 
890   // If we are unable to perform the extend for free, get the default cost.
891   return Cost + getCastInstrCost(Opcode, Dst, Src, TTI::CastContextHint::None,
892                                  CostKind);
893 }
894 
895 InstructionCost AArch64TTIImpl::getCFInstrCost(unsigned Opcode,
896                                                TTI::TargetCostKind CostKind,
897                                                const Instruction *I) {
898   if (CostKind != TTI::TCK_RecipThroughput)
899     return Opcode == Instruction::PHI ? 0 : 1;
900   assert(CostKind == TTI::TCK_RecipThroughput && "unexpected CostKind");
901   // Branches are assumed to be predicted.
902   return 0;
903 }
904 
905 InstructionCost AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
906                                                    unsigned Index) {
907   assert(Val->isVectorTy() && "This must be a vector type");
908 
909   if (Index != -1U) {
910     // Legalize the type.
911     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
912 
913     // This type is legalized to a scalar type.
914     if (!LT.second.isVector())
915       return 0;
916 
917     // The type may be split. Normalize the index to the new type.
918     unsigned Width = LT.second.getVectorNumElements();
919     Index = Index % Width;
920 
921     // The element at index zero is already inside the vector.
922     if (Index == 0)
923       return 0;
924   }
925 
926   // All other insert/extracts cost this much.
927   return ST->getVectorInsertExtractBaseCost();
928 }
929 
930 InstructionCost AArch64TTIImpl::getArithmeticInstrCost(
931     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
932     TTI::OperandValueKind Opd1Info, TTI::OperandValueKind Opd2Info,
933     TTI::OperandValueProperties Opd1PropInfo,
934     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
935     const Instruction *CxtI) {
936   // TODO: Handle more cost kinds.
937   if (CostKind != TTI::TCK_RecipThroughput)
938     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
939                                          Opd2Info, Opd1PropInfo,
940                                          Opd2PropInfo, Args, CxtI);
941 
942   // Legalize the type.
943   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
944 
945   // If the instruction is a widening instruction (e.g., uaddl, saddw, etc.),
946   // add in the widening overhead specified by the sub-target. Since the
947   // extends feeding widening instructions are performed automatically, they
948   // aren't present in the generated code and have a zero cost. By adding a
949   // widening overhead here, we attach the total cost of the combined operation
950   // to the widening instruction.
951   InstructionCost Cost = 0;
952   if (isWideningInstruction(Ty, Opcode, Args))
953     Cost += ST->getWideningBaseCost();
954 
955   int ISD = TLI->InstructionOpcodeToISD(Opcode);
956 
957   switch (ISD) {
958   default:
959     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
960                                                 Opd2Info,
961                                                 Opd1PropInfo, Opd2PropInfo);
962   case ISD::SDIV:
963     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
964         Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
965       // On AArch64, scalar signed division by constants power-of-two are
966       // normally expanded to the sequence ADD + CMP + SELECT + SRA.
967       // The OperandValue properties many not be same as that of previous
968       // operation; conservatively assume OP_None.
969       Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
970                                      Opd1Info, Opd2Info,
971                                      TargetTransformInfo::OP_None,
972                                      TargetTransformInfo::OP_None);
973       Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind,
974                                      Opd1Info, Opd2Info,
975                                      TargetTransformInfo::OP_None,
976                                      TargetTransformInfo::OP_None);
977       Cost += getArithmeticInstrCost(Instruction::Select, Ty, CostKind,
978                                      Opd1Info, Opd2Info,
979                                      TargetTransformInfo::OP_None,
980                                      TargetTransformInfo::OP_None);
981       Cost += getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
982                                      Opd1Info, Opd2Info,
983                                      TargetTransformInfo::OP_None,
984                                      TargetTransformInfo::OP_None);
985       return Cost;
986     }
987     LLVM_FALLTHROUGH;
988   case ISD::UDIV:
989     if (Opd2Info == TargetTransformInfo::OK_UniformConstantValue) {
990       auto VT = TLI->getValueType(DL, Ty);
991       if (TLI->isOperationLegalOrCustom(ISD::MULHU, VT)) {
992         // Vector signed division by constant are expanded to the
993         // sequence MULHS + ADD/SUB + SRA + SRL + ADD, and unsigned division
994         // to MULHS + SUB + SRL + ADD + SRL.
995         InstructionCost MulCost = getArithmeticInstrCost(
996             Instruction::Mul, Ty, CostKind, Opd1Info, Opd2Info,
997             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
998         InstructionCost AddCost = getArithmeticInstrCost(
999             Instruction::Add, Ty, CostKind, Opd1Info, Opd2Info,
1000             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1001         InstructionCost ShrCost = getArithmeticInstrCost(
1002             Instruction::AShr, Ty, CostKind, Opd1Info, Opd2Info,
1003             TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1004         return MulCost * 2 + AddCost * 2 + ShrCost * 2 + 1;
1005       }
1006     }
1007 
1008     Cost += BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1009                                           Opd2Info,
1010                                           Opd1PropInfo, Opd2PropInfo);
1011     if (Ty->isVectorTy()) {
1012       // On AArch64, vector divisions are not supported natively and are
1013       // expanded into scalar divisions of each pair of elements.
1014       Cost += getArithmeticInstrCost(Instruction::ExtractElement, Ty, CostKind,
1015                                      Opd1Info, Opd2Info, Opd1PropInfo,
1016                                      Opd2PropInfo);
1017       Cost += getArithmeticInstrCost(Instruction::InsertElement, Ty, CostKind,
1018                                      Opd1Info, Opd2Info, Opd1PropInfo,
1019                                      Opd2PropInfo);
1020       // TODO: if one of the arguments is scalar, then it's not necessary to
1021       // double the cost of handling the vector elements.
1022       Cost += Cost;
1023     }
1024     return Cost;
1025 
1026   case ISD::MUL:
1027     if (LT.second != MVT::v2i64)
1028       return (Cost + 1) * LT.first;
1029     // Since we do not have a MUL.2d instruction, a mul <2 x i64> is expensive
1030     // as elements are extracted from the vectors and the muls scalarized.
1031     // As getScalarizationOverhead is a bit too pessimistic, we estimate the
1032     // cost for a i64 vector directly here, which is:
1033     // - four i64 extracts,
1034     // - two i64 inserts, and
1035     // - two muls.
1036     // So, for a v2i64 with LT.First = 1 the cost is 8, and for a v4i64 with
1037     // LT.first = 2 the cost is 16.
1038     return LT.first * 8;
1039   case ISD::ADD:
1040   case ISD::XOR:
1041   case ISD::OR:
1042   case ISD::AND:
1043     // These nodes are marked as 'custom' for combining purposes only.
1044     // We know that they are legal. See LowerAdd in ISelLowering.
1045     return (Cost + 1) * LT.first;
1046 
1047   case ISD::FADD:
1048     // These nodes are marked as 'custom' just to lower them to SVE.
1049     // We know said lowering will incur no additional cost.
1050     if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty())
1051       return (Cost + 2) * LT.first;
1052 
1053     return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
1054                                                 Opd2Info,
1055                                                 Opd1PropInfo, Opd2PropInfo);
1056   }
1057 }
1058 
1059 InstructionCost AArch64TTIImpl::getAddressComputationCost(Type *Ty,
1060                                                           ScalarEvolution *SE,
1061                                                           const SCEV *Ptr) {
1062   // Address computations in vectorized code with non-consecutive addresses will
1063   // likely result in more instructions compared to scalar code where the
1064   // computation can more often be merged into the index mode. The resulting
1065   // extra micro-ops can significantly decrease throughput.
1066   unsigned NumVectorInstToHideOverhead = 10;
1067   int MaxMergeDistance = 64;
1068 
1069   if (Ty->isVectorTy() && SE &&
1070       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1071     return NumVectorInstToHideOverhead;
1072 
1073   // In many cases the address computation is not merged into the instruction
1074   // addressing mode.
1075   return 1;
1076 }
1077 
1078 InstructionCost AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
1079                                                    Type *CondTy,
1080                                                    CmpInst::Predicate VecPred,
1081                                                    TTI::TargetCostKind CostKind,
1082                                                    const Instruction *I) {
1083   // TODO: Handle other cost kinds.
1084   if (CostKind != TTI::TCK_RecipThroughput)
1085     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
1086                                      I);
1087 
1088   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1089   // We don't lower some vector selects well that are wider than the register
1090   // width.
1091   if (isa<FixedVectorType>(ValTy) && ISD == ISD::SELECT) {
1092     // We would need this many instructions to hide the scalarization happening.
1093     const int AmortizationCost = 20;
1094 
1095     // If VecPred is not set, check if we can get a predicate from the context
1096     // instruction, if its type matches the requested ValTy.
1097     if (VecPred == CmpInst::BAD_ICMP_PREDICATE && I && I->getType() == ValTy) {
1098       CmpInst::Predicate CurrentPred;
1099       if (match(I, m_Select(m_Cmp(CurrentPred, m_Value(), m_Value()), m_Value(),
1100                             m_Value())))
1101         VecPred = CurrentPred;
1102     }
1103     // Check if we have a compare/select chain that can be lowered using CMxx &
1104     // BFI pair.
1105     if (CmpInst::isIntPredicate(VecPred)) {
1106       static const auto ValidMinMaxTys = {MVT::v8i8,  MVT::v16i8, MVT::v4i16,
1107                                           MVT::v8i16, MVT::v2i32, MVT::v4i32,
1108                                           MVT::v2i64};
1109       auto LT = TLI->getTypeLegalizationCost(DL, ValTy);
1110       if (any_of(ValidMinMaxTys, [&LT](MVT M) { return M == LT.second; }))
1111         return LT.first;
1112     }
1113 
1114     static const TypeConversionCostTblEntry
1115     VectorSelectTbl[] = {
1116       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
1117       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
1118       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
1119       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
1120       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
1121       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
1122     };
1123 
1124     EVT SelCondTy = TLI->getValueType(DL, CondTy);
1125     EVT SelValTy = TLI->getValueType(DL, ValTy);
1126     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
1127       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
1128                                                      SelCondTy.getSimpleVT(),
1129                                                      SelValTy.getSimpleVT()))
1130         return Entry->Cost;
1131     }
1132   }
1133   // The base case handles scalable vectors fine for now, since it treats the
1134   // cost as 1 * legalization cost.
1135   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
1136 }
1137 
1138 AArch64TTIImpl::TTI::MemCmpExpansionOptions
1139 AArch64TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
1140   TTI::MemCmpExpansionOptions Options;
1141   if (ST->requiresStrictAlign()) {
1142     // TODO: Add cost modeling for strict align. Misaligned loads expand to
1143     // a bunch of instructions when strict align is enabled.
1144     return Options;
1145   }
1146   Options.AllowOverlappingLoads = true;
1147   Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
1148   Options.NumLoadsPerBlock = Options.MaxNumLoads;
1149   // TODO: Though vector loads usually perform well on AArch64, in some targets
1150   // they may wake up the FP unit, which raises the power consumption.  Perhaps
1151   // they could be used with no holds barred (-O3).
1152   Options.LoadSizes = {8, 4, 2, 1};
1153   return Options;
1154 }
1155 
1156 InstructionCost
1157 AArch64TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
1158                                       Align Alignment, unsigned AddressSpace,
1159                                       TTI::TargetCostKind CostKind) {
1160   if (!isa<ScalableVectorType>(Src))
1161     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1162                                         CostKind);
1163   auto LT = TLI->getTypeLegalizationCost(DL, Src);
1164   return LT.first * 2;
1165 }
1166 
1167 InstructionCost AArch64TTIImpl::getGatherScatterOpCost(
1168     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1169     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1170 
1171   if (!isa<ScalableVectorType>(DataTy))
1172     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1173                                          Alignment, CostKind, I);
1174   auto *VT = cast<VectorType>(DataTy);
1175   auto LT = TLI->getTypeLegalizationCost(DL, DataTy);
1176   ElementCount LegalVF = LT.second.getVectorElementCount();
1177   Optional<unsigned> MaxNumVScale = getMaxVScale();
1178   assert(MaxNumVScale && "Expected valid max vscale value");
1179 
1180   InstructionCost MemOpCost =
1181       getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind, I);
1182   unsigned MaxNumElementsPerGather =
1183       MaxNumVScale.getValue() * LegalVF.getKnownMinValue();
1184   return LT.first * MaxNumElementsPerGather * MemOpCost;
1185 }
1186 
1187 bool AArch64TTIImpl::useNeonVector(const Type *Ty) const {
1188   return isa<FixedVectorType>(Ty) && !ST->useSVEForFixedLengthVectors();
1189 }
1190 
1191 InstructionCost AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
1192                                                 MaybeAlign Alignment,
1193                                                 unsigned AddressSpace,
1194                                                 TTI::TargetCostKind CostKind,
1195                                                 const Instruction *I) {
1196   // Type legalization can't handle structs
1197   if (TLI->getValueType(DL, Ty,  true) == MVT::Other)
1198     return BaseT::getMemoryOpCost(Opcode, Ty, Alignment, AddressSpace,
1199                                   CostKind);
1200 
1201   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
1202 
1203   // TODO: consider latency as well for TCK_SizeAndLatency.
1204   if (CostKind == TTI::TCK_CodeSize || CostKind == TTI::TCK_SizeAndLatency)
1205     return LT.first;
1206 
1207   if (CostKind != TTI::TCK_RecipThroughput)
1208     return 1;
1209 
1210   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
1211       LT.second.is128BitVector() && (!Alignment || *Alignment < Align(16))) {
1212     // Unaligned stores are extremely inefficient. We don't split all
1213     // unaligned 128-bit stores because the negative impact that has shown in
1214     // practice on inlined block copy code.
1215     // We make such stores expensive so that we will only vectorize if there
1216     // are 6 other instructions getting vectorized.
1217     const int AmortizationCost = 6;
1218 
1219     return LT.first * 2 * AmortizationCost;
1220   }
1221 
1222   if (useNeonVector(Ty) &&
1223       cast<VectorType>(Ty)->getElementType()->isIntegerTy(8)) {
1224     unsigned ProfitableNumElements;
1225     if (Opcode == Instruction::Store)
1226       // We use a custom trunc store lowering so v.4b should be profitable.
1227       ProfitableNumElements = 4;
1228     else
1229       // We scalarize the loads because there is not v.4b register and we
1230       // have to promote the elements to v.2.
1231       ProfitableNumElements = 8;
1232 
1233     if (cast<FixedVectorType>(Ty)->getNumElements() < ProfitableNumElements) {
1234       unsigned NumVecElts = cast<FixedVectorType>(Ty)->getNumElements();
1235       unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
1236       // We generate 2 instructions per vector element.
1237       return NumVectorizableInstsToAmortize * NumVecElts * 2;
1238     }
1239   }
1240 
1241   return LT.first;
1242 }
1243 
1244 InstructionCost AArch64TTIImpl::getInterleavedMemoryOpCost(
1245     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1246     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1247     bool UseMaskForCond, bool UseMaskForGaps) {
1248   assert(Factor >= 2 && "Invalid interleave factor");
1249   auto *VecVTy = cast<FixedVectorType>(VecTy);
1250 
1251   if (!UseMaskForCond && !UseMaskForGaps &&
1252       Factor <= TLI->getMaxSupportedInterleaveFactor()) {
1253     unsigned NumElts = VecVTy->getNumElements();
1254     auto *SubVecTy =
1255         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1256 
1257     // ldN/stN only support legal vector types of size 64 or 128 in bits.
1258     // Accesses having vector types that are a multiple of 128 bits can be
1259     // matched to more than one ldN/stN instruction.
1260     if (NumElts % Factor == 0 &&
1261         TLI->isLegalInterleavedAccessType(SubVecTy, DL))
1262       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1263   }
1264 
1265   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1266                                            Alignment, AddressSpace, CostKind,
1267                                            UseMaskForCond, UseMaskForGaps);
1268 }
1269 
1270 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
1271   InstructionCost Cost = 0;
1272   TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
1273   for (auto *I : Tys) {
1274     if (!I->isVectorTy())
1275       continue;
1276     if (I->getScalarSizeInBits() * cast<FixedVectorType>(I)->getNumElements() ==
1277         128)
1278       Cost += getMemoryOpCost(Instruction::Store, I, Align(128), 0, CostKind) +
1279               getMemoryOpCost(Instruction::Load, I, Align(128), 0, CostKind);
1280   }
1281   return *Cost.getValue();
1282 }
1283 
1284 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
1285   return ST->getMaxInterleaveFactor();
1286 }
1287 
1288 // For Falkor, we want to avoid having too many strided loads in a loop since
1289 // that can exhaust the HW prefetcher resources.  We adjust the unroller
1290 // MaxCount preference below to attempt to ensure unrolling doesn't create too
1291 // many strided loads.
1292 static void
1293 getFalkorUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1294                               TargetTransformInfo::UnrollingPreferences &UP) {
1295   enum { MaxStridedLoads = 7 };
1296   auto countStridedLoads = [](Loop *L, ScalarEvolution &SE) {
1297     int StridedLoads = 0;
1298     // FIXME? We could make this more precise by looking at the CFG and
1299     // e.g. not counting loads in each side of an if-then-else diamond.
1300     for (const auto BB : L->blocks()) {
1301       for (auto &I : *BB) {
1302         LoadInst *LMemI = dyn_cast<LoadInst>(&I);
1303         if (!LMemI)
1304           continue;
1305 
1306         Value *PtrValue = LMemI->getPointerOperand();
1307         if (L->isLoopInvariant(PtrValue))
1308           continue;
1309 
1310         const SCEV *LSCEV = SE.getSCEV(PtrValue);
1311         const SCEVAddRecExpr *LSCEVAddRec = dyn_cast<SCEVAddRecExpr>(LSCEV);
1312         if (!LSCEVAddRec || !LSCEVAddRec->isAffine())
1313           continue;
1314 
1315         // FIXME? We could take pairing of unrolled load copies into account
1316         // by looking at the AddRec, but we would probably have to limit this
1317         // to loops with no stores or other memory optimization barriers.
1318         ++StridedLoads;
1319         // We've seen enough strided loads that seeing more won't make a
1320         // difference.
1321         if (StridedLoads > MaxStridedLoads / 2)
1322           return StridedLoads;
1323       }
1324     }
1325     return StridedLoads;
1326   };
1327 
1328   int StridedLoads = countStridedLoads(L, SE);
1329   LLVM_DEBUG(dbgs() << "falkor-hwpf: detected " << StridedLoads
1330                     << " strided loads\n");
1331   // Pick the largest power of 2 unroll count that won't result in too many
1332   // strided loads.
1333   if (StridedLoads) {
1334     UP.MaxCount = 1 << Log2_32(MaxStridedLoads / StridedLoads);
1335     LLVM_DEBUG(dbgs() << "falkor-hwpf: setting unroll MaxCount to "
1336                       << UP.MaxCount << '\n');
1337   }
1338 }
1339 
1340 void AArch64TTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1341                                              TTI::UnrollingPreferences &UP) {
1342   // Enable partial unrolling and runtime unrolling.
1343   BaseT::getUnrollingPreferences(L, SE, UP);
1344 
1345   // For inner loop, it is more likely to be a hot one, and the runtime check
1346   // can be promoted out from LICM pass, so the overhead is less, let's try
1347   // a larger threshold to unroll more loops.
1348   if (L->getLoopDepth() > 1)
1349     UP.PartialThreshold *= 2;
1350 
1351   // Disable partial & runtime unrolling on -Os.
1352   UP.PartialOptSizeThreshold = 0;
1353 
1354   if (ST->getProcFamily() == AArch64Subtarget::Falkor &&
1355       EnableFalkorHWPFUnrollFix)
1356     getFalkorUnrollingPreferences(L, SE, UP);
1357 
1358   // Scan the loop: don't unroll loops with calls as this could prevent
1359   // inlining. Don't unroll vector loops either, as they don't benefit much from
1360   // unrolling.
1361   for (auto *BB : L->getBlocks()) {
1362     for (auto &I : *BB) {
1363       // Don't unroll vectorised loop.
1364       if (I.getType()->isVectorTy())
1365         return;
1366 
1367       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1368         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1369           if (!isLoweredToCall(F))
1370             continue;
1371         }
1372         return;
1373       }
1374     }
1375   }
1376 
1377   // Enable runtime unrolling for in-order models
1378   // If mcpu is omitted, getProcFamily() returns AArch64Subtarget::Others, so by
1379   // checking for that case, we can ensure that the default behaviour is
1380   // unchanged
1381   if (ST->getProcFamily() != AArch64Subtarget::Others &&
1382       !ST->getSchedModel().isOutOfOrder()) {
1383     UP.Runtime = true;
1384     UP.Partial = true;
1385     UP.UpperBound = true;
1386     UP.UnrollRemainder = true;
1387     UP.DefaultUnrollRuntimeCount = 4;
1388   }
1389 }
1390 
1391 void AArch64TTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1392                                            TTI::PeelingPreferences &PP) {
1393   BaseT::getPeelingPreferences(L, SE, PP);
1394 }
1395 
1396 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
1397                                                          Type *ExpectedType) {
1398   switch (Inst->getIntrinsicID()) {
1399   default:
1400     return nullptr;
1401   case Intrinsic::aarch64_neon_st2:
1402   case Intrinsic::aarch64_neon_st3:
1403   case Intrinsic::aarch64_neon_st4: {
1404     // Create a struct type
1405     StructType *ST = dyn_cast<StructType>(ExpectedType);
1406     if (!ST)
1407       return nullptr;
1408     unsigned NumElts = Inst->getNumArgOperands() - 1;
1409     if (ST->getNumElements() != NumElts)
1410       return nullptr;
1411     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1412       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
1413         return nullptr;
1414     }
1415     Value *Res = UndefValue::get(ExpectedType);
1416     IRBuilder<> Builder(Inst);
1417     for (unsigned i = 0, e = NumElts; i != e; ++i) {
1418       Value *L = Inst->getArgOperand(i);
1419       Res = Builder.CreateInsertValue(Res, L, i);
1420     }
1421     return Res;
1422   }
1423   case Intrinsic::aarch64_neon_ld2:
1424   case Intrinsic::aarch64_neon_ld3:
1425   case Intrinsic::aarch64_neon_ld4:
1426     if (Inst->getType() == ExpectedType)
1427       return Inst;
1428     return nullptr;
1429   }
1430 }
1431 
1432 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
1433                                         MemIntrinsicInfo &Info) {
1434   switch (Inst->getIntrinsicID()) {
1435   default:
1436     break;
1437   case Intrinsic::aarch64_neon_ld2:
1438   case Intrinsic::aarch64_neon_ld3:
1439   case Intrinsic::aarch64_neon_ld4:
1440     Info.ReadMem = true;
1441     Info.WriteMem = false;
1442     Info.PtrVal = Inst->getArgOperand(0);
1443     break;
1444   case Intrinsic::aarch64_neon_st2:
1445   case Intrinsic::aarch64_neon_st3:
1446   case Intrinsic::aarch64_neon_st4:
1447     Info.ReadMem = false;
1448     Info.WriteMem = true;
1449     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
1450     break;
1451   }
1452 
1453   switch (Inst->getIntrinsicID()) {
1454   default:
1455     return false;
1456   case Intrinsic::aarch64_neon_ld2:
1457   case Intrinsic::aarch64_neon_st2:
1458     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
1459     break;
1460   case Intrinsic::aarch64_neon_ld3:
1461   case Intrinsic::aarch64_neon_st3:
1462     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
1463     break;
1464   case Intrinsic::aarch64_neon_ld4:
1465   case Intrinsic::aarch64_neon_st4:
1466     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
1467     break;
1468   }
1469   return true;
1470 }
1471 
1472 /// See if \p I should be considered for address type promotion. We check if \p
1473 /// I is a sext with right type and used in memory accesses. If it used in a
1474 /// "complex" getelementptr, we allow it to be promoted without finding other
1475 /// sext instructions that sign extended the same initial value. A getelementptr
1476 /// is considered as "complex" if it has more than 2 operands.
1477 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
1478     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
1479   bool Considerable = false;
1480   AllowPromotionWithoutCommonHeader = false;
1481   if (!isa<SExtInst>(&I))
1482     return false;
1483   Type *ConsideredSExtType =
1484       Type::getInt64Ty(I.getParent()->getParent()->getContext());
1485   if (I.getType() != ConsideredSExtType)
1486     return false;
1487   // See if the sext is the one with the right type and used in at least one
1488   // GetElementPtrInst.
1489   for (const User *U : I.users()) {
1490     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
1491       Considerable = true;
1492       // A getelementptr is considered as "complex" if it has more than 2
1493       // operands. We will promote a SExt used in such complex GEP as we
1494       // expect some computation to be merged if they are done on 64 bits.
1495       if (GEPInst->getNumOperands() > 2) {
1496         AllowPromotionWithoutCommonHeader = true;
1497         break;
1498       }
1499     }
1500   }
1501   return Considerable;
1502 }
1503 
1504 bool AArch64TTIImpl::isLegalToVectorizeReduction(RecurrenceDescriptor RdxDesc,
1505                                                  ElementCount VF) const {
1506   if (!VF.isScalable())
1507     return true;
1508 
1509   Type *Ty = RdxDesc.getRecurrenceType();
1510   if (Ty->isBFloatTy() || !isLegalElementTypeForSVE(Ty))
1511     return false;
1512 
1513   switch (RdxDesc.getRecurrenceKind()) {
1514   case RecurKind::Add:
1515   case RecurKind::FAdd:
1516   case RecurKind::And:
1517   case RecurKind::Or:
1518   case RecurKind::Xor:
1519   case RecurKind::SMin:
1520   case RecurKind::SMax:
1521   case RecurKind::UMin:
1522   case RecurKind::UMax:
1523   case RecurKind::FMin:
1524   case RecurKind::FMax:
1525     return true;
1526   default:
1527     return false;
1528   }
1529 }
1530 
1531 InstructionCost
1532 AArch64TTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
1533                                        bool IsPairwise, bool IsUnsigned,
1534                                        TTI::TargetCostKind CostKind) {
1535   if (!isa<ScalableVectorType>(Ty))
1536     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsPairwise, IsUnsigned,
1537                                          CostKind);
1538   assert((isa<ScalableVectorType>(Ty) && isa<ScalableVectorType>(CondTy)) &&
1539          "Both vector needs to be scalable");
1540 
1541   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1542   InstructionCost LegalizationCost = 0;
1543   if (LT.first > 1) {
1544     Type *LegalVTy = EVT(LT.second).getTypeForEVT(Ty->getContext());
1545     unsigned CmpOpcode =
1546         Ty->isFPOrFPVectorTy() ? Instruction::FCmp : Instruction::ICmp;
1547     LegalizationCost =
1548         getCmpSelInstrCost(CmpOpcode, LegalVTy, LegalVTy,
1549                            CmpInst::BAD_ICMP_PREDICATE, CostKind) +
1550         getCmpSelInstrCost(Instruction::Select, LegalVTy, LegalVTy,
1551                            CmpInst::BAD_ICMP_PREDICATE, CostKind);
1552     LegalizationCost *= LT.first - 1;
1553   }
1554 
1555   return LegalizationCost + /*Cost of horizontal reduction*/ 2;
1556 }
1557 
1558 InstructionCost AArch64TTIImpl::getArithmeticReductionCostSVE(
1559     unsigned Opcode, VectorType *ValTy, bool IsPairwise,
1560     TTI::TargetCostKind CostKind) {
1561   assert(!IsPairwise && "Cannot be pair wise to continue");
1562 
1563   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1564   InstructionCost LegalizationCost = 0;
1565   if (LT.first > 1) {
1566     Type *LegalVTy = EVT(LT.second).getTypeForEVT(ValTy->getContext());
1567     LegalizationCost = getArithmeticInstrCost(Opcode, LegalVTy, CostKind);
1568     LegalizationCost *= LT.first - 1;
1569   }
1570 
1571   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1572   assert(ISD && "Invalid opcode");
1573   // Add the final reduction cost for the legal horizontal reduction
1574   switch (ISD) {
1575   case ISD::ADD:
1576   case ISD::AND:
1577   case ISD::OR:
1578   case ISD::XOR:
1579   case ISD::FADD:
1580     return LegalizationCost + 2;
1581   default:
1582     return InstructionCost::getInvalid();
1583   }
1584 }
1585 
1586 InstructionCost
1587 AArch64TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1588                                            bool IsPairwiseForm,
1589                                            TTI::TargetCostKind CostKind) {
1590 
1591   if (isa<ScalableVectorType>(ValTy))
1592     return getArithmeticReductionCostSVE(Opcode, ValTy, IsPairwiseForm,
1593                                          CostKind);
1594   if (IsPairwiseForm)
1595     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1596                                              CostKind);
1597 
1598   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1599   MVT MTy = LT.second;
1600   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1601   assert(ISD && "Invalid opcode");
1602 
1603   // Horizontal adds can use the 'addv' instruction. We model the cost of these
1604   // instructions as normal vector adds. This is the only arithmetic vector
1605   // reduction operation for which we have an instruction.
1606   static const CostTblEntry CostTblNoPairwise[]{
1607       {ISD::ADD, MVT::v8i8,  1},
1608       {ISD::ADD, MVT::v16i8, 1},
1609       {ISD::ADD, MVT::v4i16, 1},
1610       {ISD::ADD, MVT::v8i16, 1},
1611       {ISD::ADD, MVT::v4i32, 1},
1612   };
1613 
1614   if (const auto *Entry = CostTableLookup(CostTblNoPairwise, ISD, MTy))
1615     return LT.first * Entry->Cost;
1616 
1617   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1618                                            CostKind);
1619 }
1620 
1621 InstructionCost AArch64TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1622                                                VectorType *Tp,
1623                                                ArrayRef<int> Mask, int Index,
1624                                                VectorType *SubTp) {
1625   Kind = improveShuffleKindFromMask(Kind, Mask);
1626   if (Kind == TTI::SK_Broadcast || Kind == TTI::SK_Transpose ||
1627       Kind == TTI::SK_Select || Kind == TTI::SK_PermuteSingleSrc ||
1628       Kind == TTI::SK_Reverse) {
1629     static const CostTblEntry ShuffleTbl[] = {
1630       // Broadcast shuffle kinds can be performed with 'dup'.
1631       { TTI::SK_Broadcast, MVT::v8i8,  1 },
1632       { TTI::SK_Broadcast, MVT::v16i8, 1 },
1633       { TTI::SK_Broadcast, MVT::v4i16, 1 },
1634       { TTI::SK_Broadcast, MVT::v8i16, 1 },
1635       { TTI::SK_Broadcast, MVT::v2i32, 1 },
1636       { TTI::SK_Broadcast, MVT::v4i32, 1 },
1637       { TTI::SK_Broadcast, MVT::v2i64, 1 },
1638       { TTI::SK_Broadcast, MVT::v2f32, 1 },
1639       { TTI::SK_Broadcast, MVT::v4f32, 1 },
1640       { TTI::SK_Broadcast, MVT::v2f64, 1 },
1641       // Transpose shuffle kinds can be performed with 'trn1/trn2' and
1642       // 'zip1/zip2' instructions.
1643       { TTI::SK_Transpose, MVT::v8i8,  1 },
1644       { TTI::SK_Transpose, MVT::v16i8, 1 },
1645       { TTI::SK_Transpose, MVT::v4i16, 1 },
1646       { TTI::SK_Transpose, MVT::v8i16, 1 },
1647       { TTI::SK_Transpose, MVT::v2i32, 1 },
1648       { TTI::SK_Transpose, MVT::v4i32, 1 },
1649       { TTI::SK_Transpose, MVT::v2i64, 1 },
1650       { TTI::SK_Transpose, MVT::v2f32, 1 },
1651       { TTI::SK_Transpose, MVT::v4f32, 1 },
1652       { TTI::SK_Transpose, MVT::v2f64, 1 },
1653       // Select shuffle kinds.
1654       // TODO: handle vXi8/vXi16.
1655       { TTI::SK_Select, MVT::v2i32, 1 }, // mov.
1656       { TTI::SK_Select, MVT::v4i32, 2 }, // rev+trn (or similar).
1657       { TTI::SK_Select, MVT::v2i64, 1 }, // mov.
1658       { TTI::SK_Select, MVT::v2f32, 1 }, // mov.
1659       { TTI::SK_Select, MVT::v4f32, 2 }, // rev+trn (or similar).
1660       { TTI::SK_Select, MVT::v2f64, 1 }, // mov.
1661       // PermuteSingleSrc shuffle kinds.
1662       // TODO: handle vXi8/vXi16.
1663       { TTI::SK_PermuteSingleSrc, MVT::v2i32, 1 }, // mov.
1664       { TTI::SK_PermuteSingleSrc, MVT::v4i32, 3 }, // perfectshuffle worst case.
1665       { TTI::SK_PermuteSingleSrc, MVT::v2i64, 1 }, // mov.
1666       { TTI::SK_PermuteSingleSrc, MVT::v2f32, 1 }, // mov.
1667       { TTI::SK_PermuteSingleSrc, MVT::v4f32, 3 }, // perfectshuffle worst case.
1668       { TTI::SK_PermuteSingleSrc, MVT::v2f64, 1 }, // mov.
1669       // Reverse can be lowered with `rev`.
1670       { TTI::SK_Reverse, MVT::v2i32, 1 }, // mov.
1671       { TTI::SK_Reverse, MVT::v4i32, 2 }, // REV64; EXT
1672       { TTI::SK_Reverse, MVT::v2i64, 1 }, // mov.
1673       { TTI::SK_Reverse, MVT::v2f32, 1 }, // mov.
1674       { TTI::SK_Reverse, MVT::v4f32, 2 }, // REV64; EXT
1675       { TTI::SK_Reverse, MVT::v2f64, 1 }, // mov.
1676       // Broadcast shuffle kinds for scalable vectors
1677       { TTI::SK_Broadcast, MVT::nxv16i8,  1 },
1678       { TTI::SK_Broadcast, MVT::nxv8i16,  1 },
1679       { TTI::SK_Broadcast, MVT::nxv4i32,  1 },
1680       { TTI::SK_Broadcast, MVT::nxv2i64,  1 },
1681       { TTI::SK_Broadcast, MVT::nxv8f16,  1 },
1682       { TTI::SK_Broadcast, MVT::nxv8bf16, 1 },
1683       { TTI::SK_Broadcast, MVT::nxv4f32,  1 },
1684       { TTI::SK_Broadcast, MVT::nxv2f64,  1 },
1685       // Handle the cases for vector.reverse with scalable vectors
1686       { TTI::SK_Reverse, MVT::nxv16i8,  1 },
1687       { TTI::SK_Reverse, MVT::nxv8i16,  1 },
1688       { TTI::SK_Reverse, MVT::nxv4i32,  1 },
1689       { TTI::SK_Reverse, MVT::nxv2i64,  1 },
1690       { TTI::SK_Reverse, MVT::nxv8f16,  1 },
1691       { TTI::SK_Reverse, MVT::nxv8bf16, 1 },
1692       { TTI::SK_Reverse, MVT::nxv4f32,  1 },
1693       { TTI::SK_Reverse, MVT::nxv2f64,  1 },
1694       { TTI::SK_Reverse, MVT::nxv16i1,  1 },
1695       { TTI::SK_Reverse, MVT::nxv8i1,   1 },
1696       { TTI::SK_Reverse, MVT::nxv4i1,   1 },
1697       { TTI::SK_Reverse, MVT::nxv2i1,   1 },
1698     };
1699     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1700     if (const auto *Entry = CostTableLookup(ShuffleTbl, Kind, LT.second))
1701       return LT.first * Entry->Cost;
1702   }
1703 
1704   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
1705 }
1706