1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "armtti"
44 
45 static cl::opt<bool> EnableMaskedLoadStores(
46   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47   cl::desc("Enable the generation of masked loads and stores"));
48 
49 static cl::opt<bool> DisableLowOverheadLoops(
50   "disable-arm-loloops", cl::Hidden, cl::init(false),
51   cl::desc("Disable the generation of low-overhead loops"));
52 
53 static cl::opt<bool>
54     AllowWLSLoops("allow-arm-wlsloops", cl::Hidden, cl::init(true),
55                   cl::desc("Enable the generation of WLS loops"));
56 
57 extern cl::opt<TailPredication::Mode> EnableTailPredication;
58 
59 extern cl::opt<bool> EnableMaskedGatherScatters;
60 
61 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
62 
63 /// Convert a vector load intrinsic into a simple llvm load instruction.
64 /// This is beneficial when the underlying object being addressed comes
65 /// from a constant, since we get constant-folding for free.
66 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
67                                InstCombiner::BuilderTy &Builder) {
68   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
69 
70   if (!IntrAlign)
71     return nullptr;
72 
73   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
74                            ? MemAlign
75                            : IntrAlign->getLimitedValue();
76 
77   if (!isPowerOf2_32(Alignment))
78     return nullptr;
79 
80   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
81                                           PointerType::get(II.getType(), 0));
82   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
83 }
84 
85 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
86                                      const Function *Callee) const {
87   const TargetMachine &TM = getTLI()->getTargetMachine();
88   const FeatureBitset &CallerBits =
89       TM.getSubtargetImpl(*Caller)->getFeatureBits();
90   const FeatureBitset &CalleeBits =
91       TM.getSubtargetImpl(*Callee)->getFeatureBits();
92 
93   // To inline a callee, all features not in the allowed list must match exactly.
94   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
95                     (CalleeBits & ~InlineFeaturesAllowed);
96   // For features in the allowed list, the callee's features must be a subset of
97   // the callers'.
98   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
99                      (CalleeBits & InlineFeaturesAllowed);
100   return MatchExact && MatchSubset;
101 }
102 
103 TTI::AddressingModeKind
104 ARMTTIImpl::getPreferredAddressingMode(const Loop *L,
105                                        ScalarEvolution *SE) const {
106   if (ST->hasMVEIntegerOps())
107     return TTI::AMK_PostIndexed;
108 
109   if (L->getHeader()->getParent()->hasOptSize())
110     return TTI::AMK_None;
111 
112   if (ST->isMClass() && ST->isThumb2() &&
113       L->getNumBlocks() == 1)
114     return TTI::AMK_PreIndexed;
115 
116   return TTI::AMK_None;
117 }
118 
119 Optional<Instruction *>
120 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
121   using namespace PatternMatch;
122   Intrinsic::ID IID = II.getIntrinsicID();
123   switch (IID) {
124   default:
125     break;
126   case Intrinsic::arm_neon_vld1: {
127     Align MemAlign =
128         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
129                           &IC.getAssumptionCache(), &IC.getDominatorTree());
130     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
131       return IC.replaceInstUsesWith(II, V);
132     }
133     break;
134   }
135 
136   case Intrinsic::arm_neon_vld2:
137   case Intrinsic::arm_neon_vld3:
138   case Intrinsic::arm_neon_vld4:
139   case Intrinsic::arm_neon_vld2lane:
140   case Intrinsic::arm_neon_vld3lane:
141   case Intrinsic::arm_neon_vld4lane:
142   case Intrinsic::arm_neon_vst1:
143   case Intrinsic::arm_neon_vst2:
144   case Intrinsic::arm_neon_vst3:
145   case Intrinsic::arm_neon_vst4:
146   case Intrinsic::arm_neon_vst2lane:
147   case Intrinsic::arm_neon_vst3lane:
148   case Intrinsic::arm_neon_vst4lane: {
149     Align MemAlign =
150         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
151                           &IC.getAssumptionCache(), &IC.getDominatorTree());
152     unsigned AlignArg = II.getNumArgOperands() - 1;
153     Value *AlignArgOp = II.getArgOperand(AlignArg);
154     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
155     if (Align && *Align < MemAlign) {
156       return IC.replaceOperand(
157           II, AlignArg,
158           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
159                            false));
160     }
161     break;
162   }
163 
164   case Intrinsic::arm_mve_pred_i2v: {
165     Value *Arg = II.getArgOperand(0);
166     Value *ArgArg;
167     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
168                        PatternMatch::m_Value(ArgArg))) &&
169         II.getType() == ArgArg->getType()) {
170       return IC.replaceInstUsesWith(II, ArgArg);
171     }
172     Constant *XorMask;
173     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
174                              PatternMatch::m_Value(ArgArg)),
175                          PatternMatch::m_Constant(XorMask))) &&
176         II.getType() == ArgArg->getType()) {
177       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
178         if (CI->getValue().trunc(16).isAllOnesValue()) {
179           auto TrueVector = IC.Builder.CreateVectorSplat(
180               cast<FixedVectorType>(II.getType())->getNumElements(),
181               IC.Builder.getTrue());
182           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
183         }
184       }
185     }
186     KnownBits ScalarKnown(32);
187     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
188                                 ScalarKnown, 0)) {
189       return &II;
190     }
191     break;
192   }
193   case Intrinsic::arm_mve_pred_v2i: {
194     Value *Arg = II.getArgOperand(0);
195     Value *ArgArg;
196     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
197                        PatternMatch::m_Value(ArgArg)))) {
198       return IC.replaceInstUsesWith(II, ArgArg);
199     }
200     if (!II.getMetadata(LLVMContext::MD_range)) {
201       Type *IntTy32 = Type::getInt32Ty(II.getContext());
202       Metadata *M[] = {
203           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
204           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0x10000))};
205       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
206       return &II;
207     }
208     break;
209   }
210   case Intrinsic::arm_mve_vadc:
211   case Intrinsic::arm_mve_vadc_predicated: {
212     unsigned CarryOp =
213         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
214     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
215            "Bad type for intrinsic!");
216 
217     KnownBits CarryKnown(32);
218     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
219                                 CarryKnown)) {
220       return &II;
221     }
222     break;
223   }
224   case Intrinsic::arm_mve_vmldava: {
225     Instruction *I = cast<Instruction>(&II);
226     if (I->hasOneUse()) {
227       auto *User = cast<Instruction>(*I->user_begin());
228       Value *OpZ;
229       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
230           match(I->getOperand(3), m_Zero())) {
231         Value *OpX = I->getOperand(4);
232         Value *OpY = I->getOperand(5);
233         Type *OpTy = OpX->getType();
234 
235         IC.Builder.SetInsertPoint(User);
236         Value *V =
237             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
238                                        {I->getOperand(0), I->getOperand(1),
239                                         I->getOperand(2), OpZ, OpX, OpY});
240 
241         IC.replaceInstUsesWith(*User, V);
242         return IC.eraseInstFromFunction(*User);
243       }
244     }
245     return None;
246   }
247   }
248   return None;
249 }
250 
251 InstructionCost ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
252                                           TTI::TargetCostKind CostKind) {
253   assert(Ty->isIntegerTy());
254 
255  unsigned Bits = Ty->getPrimitiveSizeInBits();
256  if (Bits == 0 || Imm.getActiveBits() >= 64)
257    return 4;
258 
259   int64_t SImmVal = Imm.getSExtValue();
260   uint64_t ZImmVal = Imm.getZExtValue();
261   if (!ST->isThumb()) {
262     if ((SImmVal >= 0 && SImmVal < 65536) ||
263         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
264         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
265       return 1;
266     return ST->hasV6T2Ops() ? 2 : 3;
267   }
268   if (ST->isThumb2()) {
269     if ((SImmVal >= 0 && SImmVal < 65536) ||
270         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
271         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
272       return 1;
273     return ST->hasV6T2Ops() ? 2 : 3;
274   }
275   // Thumb1, any i8 imm cost 1.
276   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
277     return 1;
278   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
279     return 2;
280   // Load from constantpool.
281   return 3;
282 }
283 
284 // Constants smaller than 256 fit in the immediate field of
285 // Thumb1 instructions so we return a zero cost and 1 otherwise.
286 InstructionCost ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
287                                                   const APInt &Imm, Type *Ty) {
288   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
289     return 0;
290 
291   return 1;
292 }
293 
294 // Checks whether Inst is part of a min(max()) or max(min()) pattern
295 // that will match to an SSAT instruction
296 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
297   Value *LHS, *RHS;
298   ConstantInt *C;
299   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
300 
301   if (InstSPF == SPF_SMAX &&
302       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
303       C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
304 
305     auto isSSatMin = [&](Value *MinInst) {
306       if (isa<SelectInst>(MinInst)) {
307         Value *MinLHS, *MinRHS;
308         ConstantInt *MinC;
309         SelectPatternFlavor MinSPF =
310             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
311         if (MinSPF == SPF_SMIN &&
312             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
313             MinC->getValue() == ((-Imm) - 1))
314           return true;
315       }
316       return false;
317     };
318 
319     if (isSSatMin(Inst->getOperand(1)) ||
320         (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
321                                isSSatMin(*(++Inst->user_begin())))))
322       return true;
323   }
324   return false;
325 }
326 
327 InstructionCost ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
328                                               const APInt &Imm, Type *Ty,
329                                               TTI::TargetCostKind CostKind,
330                                               Instruction *Inst) {
331   // Division by a constant can be turned into multiplication, but only if we
332   // know it's constant. So it's not so much that the immediate is cheap (it's
333   // not), but that the alternative is worse.
334   // FIXME: this is probably unneeded with GlobalISel.
335   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
336        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
337       Idx == 1)
338     return 0;
339 
340   // Leave any gep offsets for the CodeGenPrepare, which will do a better job at
341   // splitting any large offsets.
342   if (Opcode == Instruction::GetElementPtr && Idx != 0)
343     return 0;
344 
345   if (Opcode == Instruction::And) {
346     // UXTB/UXTH
347     if (Imm == 255 || Imm == 65535)
348       return 0;
349     // Conversion to BIC is free, and means we can use ~Imm instead.
350     return std::min(getIntImmCost(Imm, Ty, CostKind),
351                     getIntImmCost(~Imm, Ty, CostKind));
352   }
353 
354   if (Opcode == Instruction::Add)
355     // Conversion to SUB is free, and means we can use -Imm instead.
356     return std::min(getIntImmCost(Imm, Ty, CostKind),
357                     getIntImmCost(-Imm, Ty, CostKind));
358 
359   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
360       Ty->getIntegerBitWidth() == 32) {
361     int64_t NegImm = -Imm.getSExtValue();
362     if (ST->isThumb2() && NegImm < 1<<12)
363       // icmp X, #-C -> cmn X, #C
364       return 0;
365     if (ST->isThumb() && NegImm < 1<<8)
366       // icmp X, #-C -> adds X, #C
367       return 0;
368   }
369 
370   // xor a, -1 can always be folded to MVN
371   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
372     return 0;
373 
374   // Ensures negative constant of min(max()) or max(min()) patterns that
375   // match to SSAT instructions don't get hoisted
376   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
377       Ty->getIntegerBitWidth() <= 32) {
378     if (isSSATMinMaxPattern(Inst, Imm) ||
379         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
380          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
381       return 0;
382   }
383 
384   return getIntImmCost(Imm, Ty, CostKind);
385 }
386 
387 InstructionCost ARMTTIImpl::getCFInstrCost(unsigned Opcode,
388                                            TTI::TargetCostKind CostKind,
389                                            const Instruction *I) {
390   if (CostKind == TTI::TCK_RecipThroughput &&
391       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
392     // FIXME: The vectorizer is highly sensistive to the cost of these
393     // instructions, which suggests that it may be using the costs incorrectly.
394     // But, for now, just make them free to avoid performance regressions for
395     // vector targets.
396     return 0;
397   }
398   return BaseT::getCFInstrCost(Opcode, CostKind, I);
399 }
400 
401 InstructionCost ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
402                                              Type *Src,
403                                              TTI::CastContextHint CCH,
404                                              TTI::TargetCostKind CostKind,
405                                              const Instruction *I) {
406   int ISD = TLI->InstructionOpcodeToISD(Opcode);
407   assert(ISD && "Invalid opcode");
408 
409   // TODO: Allow non-throughput costs that aren't binary.
410   auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
411     if (CostKind != TTI::TCK_RecipThroughput)
412       return Cost == 0 ? 0 : 1;
413     return Cost;
414   };
415   auto IsLegalFPType = [this](EVT VT) {
416     EVT EltVT = VT.getScalarType();
417     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
418             (EltVT == MVT::f64 && ST->hasFP64()) ||
419             (EltVT == MVT::f16 && ST->hasFullFP16());
420   };
421 
422   EVT SrcTy = TLI->getValueType(DL, Src);
423   EVT DstTy = TLI->getValueType(DL, Dst);
424 
425   if (!SrcTy.isSimple() || !DstTy.isSimple())
426     return AdjustCost(
427         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
428 
429   // Extending masked load/Truncating masked stores is expensive because we
430   // currently don't split them. This means that we'll likely end up
431   // loading/storing each element individually (hence the high cost).
432   if ((ST->hasMVEIntegerOps() &&
433        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
434         Opcode == Instruction::SExt)) ||
435       (ST->hasMVEFloatOps() &&
436        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
437        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
438     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
439       return 2 * DstTy.getVectorNumElements() *
440              ST->getMVEVectorCostFactor(CostKind);
441 
442   // The extend of other kinds of load is free
443   if (CCH == TTI::CastContextHint::Normal ||
444       CCH == TTI::CastContextHint::Masked) {
445     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
446         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
447         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
448         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
449         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
450         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
451         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
452         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
453         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
454         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
455         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
456         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
457         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
458     };
459     if (const auto *Entry = ConvertCostTableLookup(
460             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
461       return AdjustCost(Entry->Cost);
462 
463     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
464         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
465         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
466         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
467         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
468         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
469         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
470         // The following extend from a legal type to an illegal type, so need to
471         // split the load. This introduced an extra load operation, but the
472         // extend is still "free".
473         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
474         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
475         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
476         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
477         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
478         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
479     };
480     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
481       if (const auto *Entry =
482               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
483                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
484         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
485     }
486 
487     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
488         // FPExtends are similar but also require the VCVT instructions.
489         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
490         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
491     };
492     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
493       if (const auto *Entry =
494               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
495                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
496         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
497     }
498 
499     // The truncate of a store is free. This is the mirror of extends above.
500     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
501         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
502         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
503         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
504         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
505         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i8, 1},
506         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
507         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
508     };
509     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
510       if (const auto *Entry =
511               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
512                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
513         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
514     }
515 
516     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
517         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
518         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
519     };
520     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
521       if (const auto *Entry =
522               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
523                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
524         return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
525     }
526   }
527 
528   // NEON vector operations that can extend their inputs.
529   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
530       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
531     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
532       // vaddl
533       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
534       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
535       // vsubl
536       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
537       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
538       // vmull
539       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
540       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
541       // vshll
542       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
543       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
544     };
545 
546     auto *User = cast<Instruction>(*I->user_begin());
547     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
548     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
549                                              DstTy.getSimpleVT(),
550                                              SrcTy.getSimpleVT())) {
551       return AdjustCost(Entry->Cost);
552     }
553   }
554 
555   // Single to/from double precision conversions.
556   if (Src->isVectorTy() && ST->hasNEON() &&
557       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
558         DstTy.getScalarType() == MVT::f32) ||
559        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
560         DstTy.getScalarType() == MVT::f64))) {
561     static const CostTblEntry NEONFltDblTbl[] = {
562         // Vector fptrunc/fpext conversions.
563         {ISD::FP_ROUND, MVT::v2f64, 2},
564         {ISD::FP_EXTEND, MVT::v2f32, 2},
565         {ISD::FP_EXTEND, MVT::v4f32, 4}};
566 
567     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
568     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
569       return AdjustCost(LT.first * Entry->Cost);
570   }
571 
572   // Some arithmetic, load and store operations have specific instructions
573   // to cast up/down their types automatically at no extra cost.
574   // TODO: Get these tables to know at least what the related operations are.
575   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
576     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
577     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
578     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
579     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
580     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
581     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
582 
583     // The number of vmovl instructions for the extension.
584     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
585     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
586     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
587     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
588     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
589     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
590     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
591     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
592     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
593     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
594     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
595     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
596     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
597     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
598     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
599     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
600     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
601     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
602 
603     // Operations that we legalize using splitting.
604     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
605     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
606 
607     // Vector float <-> i32 conversions.
608     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
609     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
610 
611     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
612     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
613     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
614     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
615     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
616     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
617     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
618     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
619     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
620     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
621     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
622     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
623     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
624     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
625     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
626     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
627     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
628     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
629     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
630     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
631 
632     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
633     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
634     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
635     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
636     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
637     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
638 
639     // Vector double <-> i32 conversions.
640     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
641     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
642 
643     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
644     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
645     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
646     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
647     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
648     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
649 
650     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
651     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
652     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
653     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
654     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
655     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
656   };
657 
658   if (SrcTy.isVector() && ST->hasNEON()) {
659     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
660                                                    DstTy.getSimpleVT(),
661                                                    SrcTy.getSimpleVT()))
662       return AdjustCost(Entry->Cost);
663   }
664 
665   // Scalar float to integer conversions.
666   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
667     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
668     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
669     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
670     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
671     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
672     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
673     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
674     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
675     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
676     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
677     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
678     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
679     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
680     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
681     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
682     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
683     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
684     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
685     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
686     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
687   };
688   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
689     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
690                                                    DstTy.getSimpleVT(),
691                                                    SrcTy.getSimpleVT()))
692       return AdjustCost(Entry->Cost);
693   }
694 
695   // Scalar integer to float conversions.
696   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
697     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
698     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
699     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
700     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
701     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
702     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
703     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
704     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
705     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
706     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
707     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
708     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
709     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
710     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
711     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
712     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
713     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
714     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
715     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
716     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
717   };
718 
719   if (SrcTy.isInteger() && ST->hasNEON()) {
720     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
721                                                    ISD, DstTy.getSimpleVT(),
722                                                    SrcTy.getSimpleVT()))
723       return AdjustCost(Entry->Cost);
724   }
725 
726   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
727   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
728   // are linearised so take more.
729   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
730     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
731     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
732     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
733     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
734     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
735     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
736     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
737     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
738     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
739     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
740     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
741     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
742   };
743 
744   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
745     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
746                                                    ISD, DstTy.getSimpleVT(),
747                                                    SrcTy.getSimpleVT()))
748       return Entry->Cost * ST->getMVEVectorCostFactor(CostKind);
749   }
750 
751   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
752     // As general rule, fp converts that were not matched above are scalarized
753     // and cost 1 vcvt for each lane, so long as the instruction is available.
754     // If not it will become a series of function calls.
755     const InstructionCost CallCost =
756         getCallInstrCost(nullptr, Dst, {Src}, CostKind);
757     int Lanes = 1;
758     if (SrcTy.isFixedLengthVector())
759       Lanes = SrcTy.getVectorNumElements();
760 
761     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
762       return Lanes;
763     else
764       return Lanes * CallCost;
765   }
766 
767   if (ISD == ISD::TRUNCATE && ST->hasMVEIntegerOps() &&
768       SrcTy.isFixedLengthVector()) {
769     // Treat a truncate with larger than legal source (128bits for MVE) as
770     // expensive, 2 instructions per lane.
771     if ((SrcTy.getScalarType() == MVT::i8 ||
772          SrcTy.getScalarType() == MVT::i16 ||
773          SrcTy.getScalarType() == MVT::i32) &&
774         SrcTy.getSizeInBits() > 128 &&
775         SrcTy.getSizeInBits() > DstTy.getSizeInBits())
776       return SrcTy.getVectorNumElements() * 2;
777   }
778 
779   // Scalar integer conversion costs.
780   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
781     // i16 -> i64 requires two dependent operations.
782     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
783 
784     // Truncates on i64 are assumed to be free.
785     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
786     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
787     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
788     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
789   };
790 
791   if (SrcTy.isInteger()) {
792     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
793                                                    DstTy.getSimpleVT(),
794                                                    SrcTy.getSimpleVT()))
795       return AdjustCost(Entry->Cost);
796   }
797 
798   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
799                      ? ST->getMVEVectorCostFactor(CostKind)
800                      : 1;
801   return AdjustCost(
802       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
803 }
804 
805 InstructionCost ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
806                                                unsigned Index) {
807   // Penalize inserting into an D-subregister. We end up with a three times
808   // lower estimated throughput on swift.
809   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
810       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
811     return 3;
812 
813   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
814                         Opcode == Instruction::ExtractElement)) {
815     // Cross-class copies are expensive on many microarchitectures,
816     // so assume they are expensive by default.
817     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
818       return 3;
819 
820     // Even if it's not a cross class copy, this likely leads to mixing
821     // of NEON and VFP code and should be therefore penalized.
822     if (ValTy->isVectorTy() &&
823         ValTy->getScalarSizeInBits() <= 32)
824       return std::max<InstructionCost>(
825           BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
826   }
827 
828   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
829                                  Opcode == Instruction::ExtractElement)) {
830     // Integer cross-lane moves are more expensive than float, which can
831     // sometimes just be vmovs. Integer involve being passes to GPR registers,
832     // causing more of a delay.
833     std::pair<InstructionCost, MVT> LT =
834         getTLI()->getTypeLegalizationCost(DL, ValTy->getScalarType());
835     return LT.first * (ValTy->getScalarType()->isIntegerTy() ? 4 : 1);
836   }
837 
838   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
839 }
840 
841 InstructionCost ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
842                                                Type *CondTy,
843                                                CmpInst::Predicate VecPred,
844                                                TTI::TargetCostKind CostKind,
845                                                const Instruction *I) {
846   int ISD = TLI->InstructionOpcodeToISD(Opcode);
847 
848   // Thumb scalar code size cost for select.
849   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
850       ST->isThumb() && !ValTy->isVectorTy()) {
851     // Assume expensive structs.
852     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
853       return TTI::TCC_Expensive;
854 
855     // Select costs can vary because they:
856     // - may require one or more conditional mov (including an IT),
857     // - can't operate directly on immediates,
858     // - require live flags, which we can't copy around easily.
859     InstructionCost Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
860 
861     // Possible IT instruction for Thumb2, or more for Thumb1.
862     ++Cost;
863 
864     // i1 values may need rematerialising by using mov immediates and/or
865     // flag setting instructions.
866     if (ValTy->isIntegerTy(1))
867       ++Cost;
868 
869     return Cost;
870   }
871 
872   // If this is a vector min/max/abs, use the cost of that intrinsic directly
873   // instead. Hopefully when min/max intrinsics are more prevalent this code
874   // will not be needed.
875   const Instruction *Sel = I;
876   if ((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && Sel &&
877       Sel->hasOneUse())
878     Sel = cast<Instruction>(Sel->user_back());
879   if (Sel && ValTy->isVectorTy() &&
880       (ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy())) {
881     const Value *LHS, *RHS;
882     SelectPatternFlavor SPF = matchSelectPattern(Sel, LHS, RHS).Flavor;
883     unsigned IID = 0;
884     switch (SPF) {
885     case SPF_ABS:
886       IID = Intrinsic::abs;
887       break;
888     case SPF_SMIN:
889       IID = Intrinsic::smin;
890       break;
891     case SPF_SMAX:
892       IID = Intrinsic::smax;
893       break;
894     case SPF_UMIN:
895       IID = Intrinsic::umin;
896       break;
897     case SPF_UMAX:
898       IID = Intrinsic::umax;
899       break;
900     case SPF_FMINNUM:
901       IID = Intrinsic::minnum;
902       break;
903     case SPF_FMAXNUM:
904       IID = Intrinsic::maxnum;
905       break;
906     default:
907       break;
908     }
909     if (IID) {
910       // The ICmp is free, the select gets the cost of the min/max/etc
911       if (Sel != I)
912         return 0;
913       IntrinsicCostAttributes CostAttrs(IID, ValTy, {ValTy, ValTy});
914       return getIntrinsicInstrCost(CostAttrs, CostKind);
915     }
916   }
917 
918   // On NEON a vector select gets lowered to vbsl.
919   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT && CondTy) {
920     // Lowering of some vector selects is currently far from perfect.
921     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
922       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
923       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
924       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
925     };
926 
927     EVT SelCondTy = TLI->getValueType(DL, CondTy);
928     EVT SelValTy = TLI->getValueType(DL, ValTy);
929     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
930       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
931                                                      SelCondTy.getSimpleVT(),
932                                                      SelValTy.getSimpleVT()))
933         return Entry->Cost;
934     }
935 
936     std::pair<InstructionCost, MVT> LT =
937         TLI->getTypeLegalizationCost(DL, ValTy);
938     return LT.first;
939   }
940 
941   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy() &&
942       (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
943       cast<FixedVectorType>(ValTy)->getNumElements() > 1) {
944     FixedVectorType *VecValTy = cast<FixedVectorType>(ValTy);
945     FixedVectorType *VecCondTy = dyn_cast_or_null<FixedVectorType>(CondTy);
946     if (!VecCondTy)
947       VecCondTy = cast<FixedVectorType>(CmpInst::makeCmpResultType(VecValTy));
948 
949     // If we don't have mve.fp any fp operations will need to be scalarized.
950     if (Opcode == Instruction::FCmp && !ST->hasMVEFloatOps()) {
951       // One scalaization insert, one scalarization extract and the cost of the
952       // fcmps.
953       return BaseT::getScalarizationOverhead(VecValTy, false, true) +
954              BaseT::getScalarizationOverhead(VecCondTy, true, false) +
955              VecValTy->getNumElements() *
956                  getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
957                                     VecCondTy->getScalarType(), VecPred, CostKind,
958                                     I);
959     }
960 
961     std::pair<InstructionCost, MVT> LT =
962         TLI->getTypeLegalizationCost(DL, ValTy);
963     int BaseCost = ST->getMVEVectorCostFactor(CostKind);
964     // There are two types - the input that specifies the type of the compare
965     // and the output vXi1 type. Because we don't know how the output will be
966     // split, we may need an expensive shuffle to get two in sync. This has the
967     // effect of making larger than legal compares (v8i32 for example)
968     // expensive.
969     if (LT.second.getVectorNumElements() > 2) {
970       if (LT.first > 1)
971         return LT.first * BaseCost +
972                BaseT::getScalarizationOverhead(VecCondTy, true, false);
973       return BaseCost;
974     }
975   }
976 
977   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
978   // for "multiple beats" potentially needed by MVE instructions.
979   int BaseCost = 1;
980   if (ST->hasMVEIntegerOps() && ValTy->isVectorTy())
981     BaseCost = ST->getMVEVectorCostFactor(CostKind);
982 
983   return BaseCost *
984          BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
985 }
986 
987 InstructionCost ARMTTIImpl::getAddressComputationCost(Type *Ty,
988                                                       ScalarEvolution *SE,
989                                                       const SCEV *Ptr) {
990   // Address computations in vectorized code with non-consecutive addresses will
991   // likely result in more instructions compared to scalar code where the
992   // computation can more often be merged into the index mode. The resulting
993   // extra micro-ops can significantly decrease throughput.
994   unsigned NumVectorInstToHideOverhead = 10;
995   int MaxMergeDistance = 64;
996 
997   if (ST->hasNEON()) {
998     if (Ty->isVectorTy() && SE &&
999         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
1000       return NumVectorInstToHideOverhead;
1001 
1002     // In many cases the address computation is not merged into the instruction
1003     // addressing mode.
1004     return 1;
1005   }
1006   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
1007 }
1008 
1009 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
1010   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1011     // If a VCTP is part of a chain, it's already profitable and shouldn't be
1012     // optimized, else LSR may block tail-predication.
1013     switch (II->getIntrinsicID()) {
1014     case Intrinsic::arm_mve_vctp8:
1015     case Intrinsic::arm_mve_vctp16:
1016     case Intrinsic::arm_mve_vctp32:
1017     case Intrinsic::arm_mve_vctp64:
1018       return true;
1019     default:
1020       break;
1021     }
1022   }
1023   return false;
1024 }
1025 
1026 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
1027   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
1028     return false;
1029 
1030   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
1031     // Don't support v2i1 yet.
1032     if (VecTy->getNumElements() == 2)
1033       return false;
1034 
1035     // We don't support extending fp types.
1036      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
1037     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
1038       return false;
1039   }
1040 
1041   unsigned EltWidth = DataTy->getScalarSizeInBits();
1042   return (EltWidth == 32 && Alignment >= 4) ||
1043          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
1044 }
1045 
1046 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
1047   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
1048     return false;
1049 
1050   // This method is called in 2 places:
1051   //  - from the vectorizer with a scalar type, in which case we need to get
1052   //  this as good as we can with the limited info we have (and rely on the cost
1053   //  model for the rest).
1054   //  - from the masked intrinsic lowering pass with the actual vector type.
1055   // For MVE, we have a custom lowering pass that will already have custom
1056   // legalised any gathers that we can to MVE intrinsics, and want to expand all
1057   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
1058   // are here, we know we want to expand.
1059   if (isa<VectorType>(Ty))
1060     return false;
1061 
1062   unsigned EltWidth = Ty->getScalarSizeInBits();
1063   return ((EltWidth == 32 && Alignment >= 4) ||
1064           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
1065 }
1066 
1067 /// Given a memcpy/memset/memmove instruction, return the number of memory
1068 /// operations performed, via querying findOptimalMemOpLowering. Returns -1 if a
1069 /// call is used.
1070 int ARMTTIImpl::getNumMemOps(const IntrinsicInst *I) const {
1071   MemOp MOp;
1072   unsigned DstAddrSpace = ~0u;
1073   unsigned SrcAddrSpace = ~0u;
1074   const Function *F = I->getParent()->getParent();
1075 
1076   if (const auto *MC = dyn_cast<MemTransferInst>(I)) {
1077     ConstantInt *C = dyn_cast<ConstantInt>(MC->getLength());
1078     // If 'size' is not a constant, a library call will be generated.
1079     if (!C)
1080       return -1;
1081 
1082     const unsigned Size = C->getValue().getZExtValue();
1083     const Align DstAlign = *MC->getDestAlign();
1084     const Align SrcAlign = *MC->getSourceAlign();
1085 
1086     MOp = MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
1087                       /*IsVolatile*/ false);
1088     DstAddrSpace = MC->getDestAddressSpace();
1089     SrcAddrSpace = MC->getSourceAddressSpace();
1090   }
1091   else if (const auto *MS = dyn_cast<MemSetInst>(I)) {
1092     ConstantInt *C = dyn_cast<ConstantInt>(MS->getLength());
1093     // If 'size' is not a constant, a library call will be generated.
1094     if (!C)
1095       return -1;
1096 
1097     const unsigned Size = C->getValue().getZExtValue();
1098     const Align DstAlign = *MS->getDestAlign();
1099 
1100     MOp = MemOp::Set(Size, /*DstAlignCanChange*/ false, DstAlign,
1101                      /*IsZeroMemset*/ false, /*IsVolatile*/ false);
1102     DstAddrSpace = MS->getDestAddressSpace();
1103   }
1104   else
1105     llvm_unreachable("Expected a memcpy/move or memset!");
1106 
1107   unsigned Limit, Factor = 2;
1108   switch(I->getIntrinsicID()) {
1109     case Intrinsic::memcpy:
1110       Limit = TLI->getMaxStoresPerMemcpy(F->hasMinSize());
1111       break;
1112     case Intrinsic::memmove:
1113       Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
1114       break;
1115     case Intrinsic::memset:
1116       Limit = TLI->getMaxStoresPerMemset(F->hasMinSize());
1117       Factor = 1;
1118       break;
1119     default:
1120       llvm_unreachable("Expected a memcpy/move or memset!");
1121   }
1122 
1123   // MemOps will be poplulated with a list of data types that needs to be
1124   // loaded and stored. That's why we multiply the number of elements by 2 to
1125   // get the cost for this memcpy.
1126   std::vector<EVT> MemOps;
1127   if (getTLI()->findOptimalMemOpLowering(
1128           MemOps, Limit, MOp, DstAddrSpace,
1129           SrcAddrSpace, F->getAttributes()))
1130     return MemOps.size() * Factor;
1131 
1132   // If we can't find an optimal memop lowering, return the default cost
1133   return -1;
1134 }
1135 
1136 InstructionCost ARMTTIImpl::getMemcpyCost(const Instruction *I) {
1137   int NumOps = getNumMemOps(cast<IntrinsicInst>(I));
1138 
1139   // To model the cost of a library call, we assume 1 for the call, and
1140   // 3 for the argument setup.
1141   if (NumOps == -1)
1142     return 4;
1143   return NumOps;
1144 }
1145 
1146 InstructionCost ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1147                                            VectorType *Tp, ArrayRef<int> Mask,
1148                                            int Index, VectorType *SubTp) {
1149   Kind = improveShuffleKindFromMask(Kind, Mask);
1150   if (ST->hasNEON()) {
1151     if (Kind == TTI::SK_Broadcast) {
1152       static const CostTblEntry NEONDupTbl[] = {
1153           // VDUP handles these cases.
1154           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1155           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1156           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1157           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1158           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1159           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1160 
1161           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1162           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1163           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1164           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1165 
1166       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1167       if (const auto *Entry =
1168               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1169         return LT.first * Entry->Cost;
1170     }
1171     if (Kind == TTI::SK_Reverse) {
1172       static const CostTblEntry NEONShuffleTbl[] = {
1173           // Reverse shuffle cost one instruction if we are shuffling within a
1174           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1175           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1176           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1177           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1178           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1179           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1180           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1181 
1182           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1183           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1184           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1185           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1186 
1187       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1188       if (const auto *Entry =
1189               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1190         return LT.first * Entry->Cost;
1191     }
1192     if (Kind == TTI::SK_Select) {
1193       static const CostTblEntry NEONSelShuffleTbl[] = {
1194           // Select shuffle cost table for ARM. Cost is the number of
1195           // instructions
1196           // required to create the shuffled vector.
1197 
1198           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1199           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1200           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1201           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1202 
1203           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1204           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1205           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1206 
1207           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1208 
1209           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1210 
1211       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1212       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1213                                               ISD::VECTOR_SHUFFLE, LT.second))
1214         return LT.first * Entry->Cost;
1215     }
1216   }
1217   if (ST->hasMVEIntegerOps()) {
1218     if (Kind == TTI::SK_Broadcast) {
1219       static const CostTblEntry MVEDupTbl[] = {
1220           // VDUP handles these cases.
1221           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1222           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1223           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1224           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1225           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1226 
1227       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1228       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1229                                               LT.second))
1230         return LT.first * Entry->Cost *
1231                ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput);
1232     }
1233 
1234     if (!Mask.empty()) {
1235       std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1236       if (Mask.size() <= LT.second.getVectorNumElements() &&
1237           (isVREVMask(Mask, LT.second, 16) || isVREVMask(Mask, LT.second, 32) ||
1238            isVREVMask(Mask, LT.second, 64)))
1239         return ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput) * LT.first;
1240     }
1241   }
1242 
1243   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1244                      ? ST->getMVEVectorCostFactor(TTI::TCK_RecipThroughput)
1245                      : 1;
1246   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
1247 }
1248 
1249 InstructionCost ARMTTIImpl::getArithmeticInstrCost(
1250     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
1251     TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
1252     TTI::OperandValueProperties Opd1PropInfo,
1253     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
1254     const Instruction *CxtI) {
1255   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1256   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1257     // Make operations on i1 relatively expensive as this often involves
1258     // combining predicates. AND and XOR should be easier to handle with IT
1259     // blocks.
1260     switch (ISDOpcode) {
1261     default:
1262       break;
1263     case ISD::AND:
1264     case ISD::XOR:
1265       return 2;
1266     case ISD::OR:
1267       return 3;
1268     }
1269   }
1270 
1271   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1272 
1273   if (ST->hasNEON()) {
1274     const unsigned FunctionCallDivCost = 20;
1275     const unsigned ReciprocalDivCost = 10;
1276     static const CostTblEntry CostTbl[] = {
1277       // Division.
1278       // These costs are somewhat random. Choose a cost of 20 to indicate that
1279       // vectorizing devision (added function call) is going to be very expensive.
1280       // Double registers types.
1281       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1282       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1283       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1284       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1285       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1286       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1287       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1288       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1289       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1290       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1291       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1292       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1293       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1294       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1295       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1296       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1297       // Quad register types.
1298       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1299       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1300       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1301       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1302       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1303       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1304       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1305       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1306       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1307       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1308       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1309       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1310       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1311       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1312       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1313       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1314       // Multiplication.
1315     };
1316 
1317     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1318       return LT.first * Entry->Cost;
1319 
1320     InstructionCost Cost = BaseT::getArithmeticInstrCost(
1321         Opcode, Ty, CostKind, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
1322 
1323     // This is somewhat of a hack. The problem that we are facing is that SROA
1324     // creates a sequence of shift, and, or instructions to construct values.
1325     // These sequences are recognized by the ISel and have zero-cost. Not so for
1326     // the vectorized code. Because we have support for v2i64 but not i64 those
1327     // sequences look particularly beneficial to vectorize.
1328     // To work around this we increase the cost of v2i64 operations to make them
1329     // seem less beneficial.
1330     if (LT.second == MVT::v2i64 &&
1331         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1332       Cost += 4;
1333 
1334     return Cost;
1335   }
1336 
1337   // If this operation is a shift on arm/thumb2, it might well be folded into
1338   // the following instruction, hence having a cost of 0.
1339   auto LooksLikeAFreeShift = [&]() {
1340     if (ST->isThumb1Only() || Ty->isVectorTy())
1341       return false;
1342 
1343     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1344       return false;
1345     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1346       return false;
1347 
1348     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1349     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1350     case Instruction::Add:
1351     case Instruction::Sub:
1352     case Instruction::And:
1353     case Instruction::Xor:
1354     case Instruction::Or:
1355     case Instruction::ICmp:
1356       return true;
1357     default:
1358       return false;
1359     }
1360   };
1361   if (LooksLikeAFreeShift())
1362     return 0;
1363 
1364   // Default to cheap (throughput/size of 1 instruction) but adjust throughput
1365   // for "multiple beats" potentially needed by MVE instructions.
1366   int BaseCost = 1;
1367   if (ST->hasMVEIntegerOps() && Ty->isVectorTy())
1368     BaseCost = ST->getMVEVectorCostFactor(CostKind);
1369 
1370   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1371   // without treating floats as more expensive that scalars or increasing the
1372   // costs for custom operations. The results is also multiplied by the
1373   // MVEVectorCostFactor where appropriate.
1374   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1375     return LT.first * BaseCost;
1376 
1377   // Else this is expand, assume that we need to scalarize this op.
1378   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1379     unsigned Num = VTy->getNumElements();
1380     InstructionCost Cost =
1381         getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind);
1382     // Return the cost of multiple scalar invocation plus the cost of
1383     // inserting and extracting the values.
1384     SmallVector<Type *> Tys(Args.size(), Ty);
1385     return BaseT::getScalarizationOverhead(VTy, Args, Tys) + Num * Cost;
1386   }
1387 
1388   return BaseCost;
1389 }
1390 
1391 InstructionCost ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1392                                             MaybeAlign Alignment,
1393                                             unsigned AddressSpace,
1394                                             TTI::TargetCostKind CostKind,
1395                                             const Instruction *I) {
1396   // TODO: Handle other cost kinds.
1397   if (CostKind != TTI::TCK_RecipThroughput)
1398     return 1;
1399 
1400   // Type legalization can't handle structs
1401   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1402     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1403                                   CostKind);
1404 
1405   if (ST->hasNEON() && Src->isVectorTy() &&
1406       (Alignment && *Alignment != Align(16)) &&
1407       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1408     // Unaligned loads/stores are extremely inefficient.
1409     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1410     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1411     return LT.first * 4;
1412   }
1413 
1414   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1415   // Same for stores.
1416   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1417       ((Opcode == Instruction::Load && I->hasOneUse() &&
1418         isa<FPExtInst>(*I->user_begin())) ||
1419        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1420     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1421     Type *DstTy =
1422         Opcode == Instruction::Load
1423             ? (*I->user_begin())->getType()
1424             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1425     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1426         DstTy->getScalarType()->isFloatTy())
1427       return ST->getMVEVectorCostFactor(CostKind);
1428   }
1429 
1430   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1431                      ? ST->getMVEVectorCostFactor(CostKind)
1432                      : 1;
1433   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1434                                            CostKind, I);
1435 }
1436 
1437 InstructionCost
1438 ARMTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
1439                                   unsigned AddressSpace,
1440                                   TTI::TargetCostKind CostKind) {
1441   if (ST->hasMVEIntegerOps()) {
1442     if (Opcode == Instruction::Load && isLegalMaskedLoad(Src, Alignment))
1443       return ST->getMVEVectorCostFactor(CostKind);
1444     if (Opcode == Instruction::Store && isLegalMaskedStore(Src, Alignment))
1445       return ST->getMVEVectorCostFactor(CostKind);
1446   }
1447   if (!isa<FixedVectorType>(Src))
1448     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1449                                         CostKind);
1450   // Scalar cost, which is currently very high due to the efficiency of the
1451   // generated code.
1452   return cast<FixedVectorType>(Src)->getNumElements() * 8;
1453 }
1454 
1455 InstructionCost ARMTTIImpl::getInterleavedMemoryOpCost(
1456     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1457     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1458     bool UseMaskForCond, bool UseMaskForGaps) {
1459   assert(Factor >= 2 && "Invalid interleave factor");
1460   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1461 
1462   // vldN/vstN doesn't support vector types of i64/f64 element.
1463   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1464 
1465   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1466       !UseMaskForCond && !UseMaskForGaps) {
1467     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1468     auto *SubVecTy =
1469         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1470 
1471     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1472     // Accesses having vector types that are a multiple of 128 bits can be
1473     // matched to more than one vldN/vstN instruction.
1474     int BaseCost =
1475         ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor(CostKind) : 1;
1476     if (NumElts % Factor == 0 &&
1477         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, Alignment, DL))
1478       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1479 
1480     // Some smaller than legal interleaved patterns are cheap as we can make
1481     // use of the vmovn or vrev patterns to interleave a standard load. This is
1482     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1483     // promoted differently). The cost of 2 here is then a load and vrev or
1484     // vmovn.
1485     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1486         VecTy->isIntOrIntVectorTy() &&
1487         DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1488       return 2 * BaseCost;
1489   }
1490 
1491   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1492                                            Alignment, AddressSpace, CostKind,
1493                                            UseMaskForCond, UseMaskForGaps);
1494 }
1495 
1496 InstructionCost ARMTTIImpl::getGatherScatterOpCost(
1497     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
1498     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
1499   using namespace PatternMatch;
1500   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1501     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1502                                          Alignment, CostKind, I);
1503 
1504   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1505   auto *VTy = cast<FixedVectorType>(DataTy);
1506 
1507   // TODO: Splitting, once we do that.
1508 
1509   unsigned NumElems = VTy->getNumElements();
1510   unsigned EltSize = VTy->getScalarSizeInBits();
1511   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1512 
1513   // For now, it is assumed that for the MVE gather instructions the loads are
1514   // all effectively serialised. This means the cost is the scalar cost
1515   // multiplied by the number of elements being loaded. This is possibly very
1516   // conservative, but even so we still end up vectorising loops because the
1517   // cost per iteration for many loops is lower than for scalar loops.
1518   InstructionCost VectorCost =
1519       NumElems * LT.first * ST->getMVEVectorCostFactor(CostKind);
1520   // The scalarization cost should be a lot higher. We use the number of vector
1521   // elements plus the scalarization overhead.
1522   InstructionCost ScalarCost =
1523       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, true, false) +
1524       BaseT::getScalarizationOverhead(VTy, false, true);
1525 
1526   if (EltSize < 8 || Alignment < EltSize / 8)
1527     return ScalarCost;
1528 
1529   unsigned ExtSize = EltSize;
1530   // Check whether there's a single user that asks for an extended type
1531   if (I != nullptr) {
1532     // Dependent of the caller of this function, a gather instruction will
1533     // either have opcode Instruction::Load or be a call to the masked_gather
1534     // intrinsic
1535     if ((I->getOpcode() == Instruction::Load ||
1536          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1537         I->hasOneUse()) {
1538       const User *Us = *I->users().begin();
1539       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1540         // only allow valid type combinations
1541         unsigned TypeSize =
1542             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1543         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1544              (TypeSize == 16 && EltSize == 8)) &&
1545             TypeSize * NumElems == 128) {
1546           ExtSize = TypeSize;
1547         }
1548       }
1549     }
1550     // Check whether the input data needs to be truncated
1551     TruncInst *T;
1552     if ((I->getOpcode() == Instruction::Store ||
1553          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1554         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1555       // Only allow valid type combinations
1556       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1557       if (((EltSize == 16 && TypeSize == 32) ||
1558            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1559           TypeSize * NumElems == 128)
1560         ExtSize = TypeSize;
1561     }
1562   }
1563 
1564   if (ExtSize * NumElems != 128 || NumElems < 4)
1565     return ScalarCost;
1566 
1567   // Any (aligned) i32 gather will not need to be scalarised.
1568   if (ExtSize == 32)
1569     return VectorCost;
1570   // For smaller types, we need to ensure that the gep's inputs are correctly
1571   // extended from a small enough value. Other sizes (including i64) are
1572   // scalarized for now.
1573   if (ExtSize != 8 && ExtSize != 16)
1574     return ScalarCost;
1575 
1576   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1577     Ptr = BC->getOperand(0);
1578   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1579     if (GEP->getNumOperands() != 2)
1580       return ScalarCost;
1581     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1582     // Scale needs to be correct (which is only relevant for i16s).
1583     if (Scale != 1 && Scale * 8 != ExtSize)
1584       return ScalarCost;
1585     // And we need to zext (not sext) the indexes from a small enough type.
1586     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1587       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1588         return VectorCost;
1589     }
1590     return ScalarCost;
1591   }
1592   return ScalarCost;
1593 }
1594 
1595 InstructionCost
1596 ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1597                                        Optional<FastMathFlags> FMF,
1598                                        TTI::TargetCostKind CostKind) {
1599   if (TTI::requiresOrderedReduction(FMF))
1600     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1601 
1602   EVT ValVT = TLI->getValueType(DL, ValTy);
1603   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1604   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1605     return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1606 
1607   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1608 
1609   static const CostTblEntry CostTblAdd[]{
1610       {ISD::ADD, MVT::v16i8, 1},
1611       {ISD::ADD, MVT::v8i16, 1},
1612       {ISD::ADD, MVT::v4i32, 1},
1613   };
1614   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1615     return Entry->Cost * ST->getMVEVectorCostFactor(CostKind) * LT.first;
1616 
1617   return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
1618 }
1619 
1620 InstructionCost
1621 ARMTTIImpl::getExtendedAddReductionCost(bool IsMLA, bool IsUnsigned,
1622                                         Type *ResTy, VectorType *ValTy,
1623                                         TTI::TargetCostKind CostKind) {
1624   EVT ValVT = TLI->getValueType(DL, ValTy);
1625   EVT ResVT = TLI->getValueType(DL, ResTy);
1626 
1627   if (ST->hasMVEIntegerOps() && ValVT.isSimple() && ResVT.isSimple()) {
1628     std::pair<InstructionCost, MVT> LT =
1629         TLI->getTypeLegalizationCost(DL, ValTy);
1630 
1631     // The legal cases are:
1632     //   VADDV u/s 8/16/32
1633     //   VMLAV u/s 8/16/32
1634     //   VADDLV u/s 32
1635     //   VMLALV u/s 16/32
1636     // Codegen currently cannot always handle larger than legal vectors very
1637     // well, especially for predicated reductions where the mask needs to be
1638     // split, so restrict to 128bit or smaller input types.
1639     unsigned RevVTSize = ResVT.getSizeInBits();
1640     if (ValVT.getSizeInBits() <= 128 &&
1641         ((LT.second == MVT::v16i8 && RevVTSize <= 32) ||
1642          (LT.second == MVT::v8i16 && RevVTSize <= (IsMLA ? 64u : 32u)) ||
1643          (LT.second == MVT::v4i32 && RevVTSize <= 64)))
1644       return ST->getMVEVectorCostFactor(CostKind) * LT.first;
1645   }
1646 
1647   return BaseT::getExtendedAddReductionCost(IsMLA, IsUnsigned, ResTy, ValTy,
1648                                             CostKind);
1649 }
1650 
1651 InstructionCost
1652 ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1653                                   TTI::TargetCostKind CostKind) {
1654   switch (ICA.getID()) {
1655   case Intrinsic::get_active_lane_mask:
1656     // Currently we make a somewhat optimistic assumption that
1657     // active_lane_mask's are always free. In reality it may be freely folded
1658     // into a tail predicated loop, expanded into a VCPT or expanded into a lot
1659     // of add/icmp code. We may need to improve this in the future, but being
1660     // able to detect if it is free or not involves looking at a lot of other
1661     // code. We currently assume that the vectorizer inserted these, and knew
1662     // what it was doing in adding one.
1663     if (ST->hasMVEIntegerOps())
1664       return 0;
1665     break;
1666   case Intrinsic::sadd_sat:
1667   case Intrinsic::ssub_sat:
1668   case Intrinsic::uadd_sat:
1669   case Intrinsic::usub_sat: {
1670     if (!ST->hasMVEIntegerOps())
1671       break;
1672     Type *VT = ICA.getReturnType();
1673 
1674     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1675     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1676         LT.second == MVT::v16i8) {
1677       // This is a base cost of 1 for the vqadd, plus 3 extract shifts if we
1678       // need to extend the type, as it uses shr(qadd(shl, shl)).
1679       unsigned Instrs =
1680           LT.second.getScalarSizeInBits() == VT->getScalarSizeInBits() ? 1 : 4;
1681       return LT.first * ST->getMVEVectorCostFactor(CostKind) * Instrs;
1682     }
1683     break;
1684   }
1685   case Intrinsic::abs:
1686   case Intrinsic::smin:
1687   case Intrinsic::smax:
1688   case Intrinsic::umin:
1689   case Intrinsic::umax: {
1690     if (!ST->hasMVEIntegerOps())
1691       break;
1692     Type *VT = ICA.getReturnType();
1693 
1694     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1695     if (LT.second == MVT::v4i32 || LT.second == MVT::v8i16 ||
1696         LT.second == MVT::v16i8)
1697       return LT.first * ST->getMVEVectorCostFactor(CostKind);
1698     break;
1699   }
1700   case Intrinsic::minnum:
1701   case Intrinsic::maxnum: {
1702     if (!ST->hasMVEFloatOps())
1703       break;
1704     Type *VT = ICA.getReturnType();
1705     std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VT);
1706     if (LT.second == MVT::v4f32 || LT.second == MVT::v8f16)
1707       return LT.first * ST->getMVEVectorCostFactor(CostKind);
1708     break;
1709   }
1710   }
1711 
1712   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1713 }
1714 
1715 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1716   if (!F->isIntrinsic())
1717     BaseT::isLoweredToCall(F);
1718 
1719   // Assume all Arm-specific intrinsics map to an instruction.
1720   if (F->getName().startswith("llvm.arm"))
1721     return false;
1722 
1723   switch (F->getIntrinsicID()) {
1724   default: break;
1725   case Intrinsic::powi:
1726   case Intrinsic::sin:
1727   case Intrinsic::cos:
1728   case Intrinsic::pow:
1729   case Intrinsic::log:
1730   case Intrinsic::log10:
1731   case Intrinsic::log2:
1732   case Intrinsic::exp:
1733   case Intrinsic::exp2:
1734     return true;
1735   case Intrinsic::sqrt:
1736   case Intrinsic::fabs:
1737   case Intrinsic::copysign:
1738   case Intrinsic::floor:
1739   case Intrinsic::ceil:
1740   case Intrinsic::trunc:
1741   case Intrinsic::rint:
1742   case Intrinsic::nearbyint:
1743   case Intrinsic::round:
1744   case Intrinsic::canonicalize:
1745   case Intrinsic::lround:
1746   case Intrinsic::llround:
1747   case Intrinsic::lrint:
1748   case Intrinsic::llrint:
1749     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1750       return true;
1751     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1752       return true;
1753     // Some operations can be handled by vector instructions and assume
1754     // unsupported vectors will be expanded into supported scalar ones.
1755     // TODO Handle scalar operations properly.
1756     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1757   case Intrinsic::masked_store:
1758   case Intrinsic::masked_load:
1759   case Intrinsic::masked_gather:
1760   case Intrinsic::masked_scatter:
1761     return !ST->hasMVEIntegerOps();
1762   case Intrinsic::sadd_with_overflow:
1763   case Intrinsic::uadd_with_overflow:
1764   case Intrinsic::ssub_with_overflow:
1765   case Intrinsic::usub_with_overflow:
1766   case Intrinsic::sadd_sat:
1767   case Intrinsic::uadd_sat:
1768   case Intrinsic::ssub_sat:
1769   case Intrinsic::usub_sat:
1770     return false;
1771   }
1772 
1773   return BaseT::isLoweredToCall(F);
1774 }
1775 
1776 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1777   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1778   EVT VT = TLI->getValueType(DL, I.getType(), true);
1779   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1780     return true;
1781 
1782   // Check if an intrinsic will be lowered to a call and assume that any
1783   // other CallInst will generate a bl.
1784   if (auto *Call = dyn_cast<CallInst>(&I)) {
1785     if (auto *II = dyn_cast<IntrinsicInst>(Call)) {
1786       switch(II->getIntrinsicID()) {
1787         case Intrinsic::memcpy:
1788         case Intrinsic::memset:
1789         case Intrinsic::memmove:
1790           return getNumMemOps(II) == -1;
1791         default:
1792           if (const Function *F = Call->getCalledFunction())
1793             return isLoweredToCall(F);
1794       }
1795     }
1796     return true;
1797   }
1798 
1799   // FPv5 provides conversions between integer, double-precision,
1800   // single-precision, and half-precision formats.
1801   switch (I.getOpcode()) {
1802   default:
1803     break;
1804   case Instruction::FPToSI:
1805   case Instruction::FPToUI:
1806   case Instruction::SIToFP:
1807   case Instruction::UIToFP:
1808   case Instruction::FPTrunc:
1809   case Instruction::FPExt:
1810     return !ST->hasFPARMv8Base();
1811   }
1812 
1813   // FIXME: Unfortunately the approach of checking the Operation Action does
1814   // not catch all cases of Legalization that use library calls. Our
1815   // Legalization step categorizes some transformations into library calls as
1816   // Custom, Expand or even Legal when doing type legalization. So for now
1817   // we have to special case for instance the SDIV of 64bit integers and the
1818   // use of floating point emulation.
1819   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1820     switch (ISD) {
1821     default:
1822       break;
1823     case ISD::SDIV:
1824     case ISD::UDIV:
1825     case ISD::SREM:
1826     case ISD::UREM:
1827     case ISD::SDIVREM:
1828     case ISD::UDIVREM:
1829       return true;
1830     }
1831   }
1832 
1833   // Assume all other non-float operations are supported.
1834   if (!VT.isFloatingPoint())
1835     return false;
1836 
1837   // We'll need a library call to handle most floats when using soft.
1838   if (TLI->useSoftFloat()) {
1839     switch (I.getOpcode()) {
1840     default:
1841       return true;
1842     case Instruction::Alloca:
1843     case Instruction::Load:
1844     case Instruction::Store:
1845     case Instruction::Select:
1846     case Instruction::PHI:
1847       return false;
1848     }
1849   }
1850 
1851   // We'll need a libcall to perform double precision operations on a single
1852   // precision only FPU.
1853   if (I.getType()->isDoubleTy() && !ST->hasFP64())
1854     return true;
1855 
1856   // Likewise for half precision arithmetic.
1857   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1858     return true;
1859 
1860   return false;
1861 }
1862 
1863 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1864                                           AssumptionCache &AC,
1865                                           TargetLibraryInfo *LibInfo,
1866                                           HardwareLoopInfo &HWLoopInfo) {
1867   // Low-overhead branches are only supported in the 'low-overhead branch'
1868   // extension of v8.1-m.
1869   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1870     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1871     return false;
1872   }
1873 
1874   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1875     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1876     return false;
1877   }
1878 
1879   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1880   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1881     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1882     return false;
1883   }
1884 
1885   const SCEV *TripCountSCEV =
1886     SE.getAddExpr(BackedgeTakenCount,
1887                   SE.getOne(BackedgeTakenCount->getType()));
1888 
1889   // We need to store the trip count in LR, a 32-bit register.
1890   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1891     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1892     return false;
1893   }
1894 
1895   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1896   // point in generating a hardware loop if that's going to happen.
1897 
1898   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1899     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1900       switch (Call->getIntrinsicID()) {
1901       default:
1902         break;
1903       case Intrinsic::start_loop_iterations:
1904       case Intrinsic::test_start_loop_iterations:
1905       case Intrinsic::loop_decrement:
1906       case Intrinsic::loop_decrement_reg:
1907         return true;
1908       }
1909     }
1910     return false;
1911   };
1912 
1913   // Scan the instructions to see if there's any that we know will turn into a
1914   // call or if this loop is already a low-overhead loop or will become a tail
1915   // predicated loop.
1916   bool IsTailPredLoop = false;
1917   auto ScanLoop = [&](Loop *L) {
1918     for (auto *BB : L->getBlocks()) {
1919       for (auto &I : *BB) {
1920         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I) ||
1921             isa<InlineAsm>(I)) {
1922           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1923           return false;
1924         }
1925         if (auto *II = dyn_cast<IntrinsicInst>(&I))
1926           IsTailPredLoop |=
1927               II->getIntrinsicID() == Intrinsic::get_active_lane_mask ||
1928               II->getIntrinsicID() == Intrinsic::arm_mve_vctp8 ||
1929               II->getIntrinsicID() == Intrinsic::arm_mve_vctp16 ||
1930               II->getIntrinsicID() == Intrinsic::arm_mve_vctp32 ||
1931               II->getIntrinsicID() == Intrinsic::arm_mve_vctp64;
1932       }
1933     }
1934     return true;
1935   };
1936 
1937   // Visit inner loops.
1938   for (auto Inner : *L)
1939     if (!ScanLoop(Inner))
1940       return false;
1941 
1942   if (!ScanLoop(L))
1943     return false;
1944 
1945   // TODO: Check whether the trip count calculation is expensive. If L is the
1946   // inner loop but we know it has a low trip count, calculating that trip
1947   // count (in the parent loop) may be detrimental.
1948 
1949   LLVMContext &C = L->getHeader()->getContext();
1950   HWLoopInfo.CounterInReg = true;
1951   HWLoopInfo.IsNestingLegal = false;
1952   HWLoopInfo.PerformEntryTest = AllowWLSLoops && !IsTailPredLoop;
1953   HWLoopInfo.CountType = Type::getInt32Ty(C);
1954   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1955   return true;
1956 }
1957 
1958 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1959   // We don't allow icmp's, and because we only look at single block loops,
1960   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1961   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1962     return false;
1963 
1964   if (isa<FCmpInst>(&I))
1965     return false;
1966 
1967   // We could allow extending/narrowing FP loads/stores, but codegen is
1968   // too inefficient so reject this for now.
1969   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1970     return false;
1971 
1972   // Extends have to be extending-loads
1973   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1974     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1975       return false;
1976 
1977   // Truncs have to be narrowing-stores
1978   if (isa<TruncInst>(&I) )
1979     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1980       return false;
1981 
1982   return true;
1983 }
1984 
1985 // To set up a tail-predicated loop, we need to know the total number of
1986 // elements processed by that loop. Thus, we need to determine the element
1987 // size and:
1988 // 1) it should be uniform for all operations in the vector loop, so we
1989 //    e.g. don't want any widening/narrowing operations.
1990 // 2) it should be smaller than i64s because we don't have vector operations
1991 //    that work on i64s.
1992 // 3) we don't want elements to be reversed or shuffled, to make sure the
1993 //    tail-predication masks/predicates the right lanes.
1994 //
1995 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1996                                  const DataLayout &DL,
1997                                  const LoopAccessInfo *LAI) {
1998   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1999 
2000   // If there are live-out values, it is probably a reduction. We can predicate
2001   // most reduction operations freely under MVE using a combination of
2002   // prefer-predicated-reduction-select and inloop reductions. We limit this to
2003   // floating point and integer reductions, but don't check for operators
2004   // specifically here. If the value ends up not being a reduction (and so the
2005   // vectorizer cannot tailfold the loop), we should fall back to standard
2006   // vectorization automatically.
2007   SmallVector< Instruction *, 8 > LiveOuts;
2008   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
2009   bool ReductionsDisabled =
2010       EnableTailPredication == TailPredication::EnabledNoReductions ||
2011       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
2012 
2013   for (auto *I : LiveOuts) {
2014     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
2015         !I->getType()->isHalfTy()) {
2016       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
2017                            "live-out value\n");
2018       return false;
2019     }
2020     if (ReductionsDisabled) {
2021       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
2022       return false;
2023     }
2024   }
2025 
2026   // Next, check that all instructions can be tail-predicated.
2027   PredicatedScalarEvolution PSE = LAI->getPSE();
2028   SmallVector<Instruction *, 16> LoadStores;
2029   int ICmpCount = 0;
2030 
2031   for (BasicBlock *BB : L->blocks()) {
2032     for (Instruction &I : BB->instructionsWithoutDebug()) {
2033       if (isa<PHINode>(&I))
2034         continue;
2035       if (!canTailPredicateInstruction(I, ICmpCount)) {
2036         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
2037         return false;
2038       }
2039 
2040       Type *T  = I.getType();
2041       if (T->isPointerTy())
2042         T = T->getPointerElementType();
2043 
2044       if (T->getScalarSizeInBits() > 32) {
2045         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
2046         return false;
2047       }
2048       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
2049         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
2050         int64_t NextStride = getPtrStride(PSE, Ptr, L);
2051         if (NextStride == 1) {
2052           // TODO: for now only allow consecutive strides of 1. We could support
2053           // other strides as long as it is uniform, but let's keep it simple
2054           // for now.
2055           continue;
2056         } else if (NextStride == -1 ||
2057                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
2058                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
2059           LLVM_DEBUG(dbgs()
2060                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
2061                         "be tail-predicated\n.");
2062           return false;
2063           // TODO: don't tail predicate if there is a reversed load?
2064         } else if (EnableMaskedGatherScatters) {
2065           // Gather/scatters do allow loading from arbitrary strides, at
2066           // least if they are loop invariant.
2067           // TODO: Loop variant strides should in theory work, too, but
2068           // this requires further testing.
2069           const SCEV *PtrScev =
2070               replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
2071           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
2072             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
2073             if (PSE.getSE()->isLoopInvariant(Step, L))
2074               continue;
2075           }
2076         }
2077         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
2078                              "tail-predicate\n.");
2079         return false;
2080       }
2081     }
2082   }
2083 
2084   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
2085   return true;
2086 }
2087 
2088 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
2089                                              ScalarEvolution &SE,
2090                                              AssumptionCache &AC,
2091                                              TargetLibraryInfo *TLI,
2092                                              DominatorTree *DT,
2093                                              const LoopAccessInfo *LAI) {
2094   if (!EnableTailPredication) {
2095     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
2096     return false;
2097   }
2098 
2099   // Creating a predicated vector loop is the first step for generating a
2100   // tail-predicated hardware loop, for which we need the MVE masked
2101   // load/stores instructions:
2102   if (!ST->hasMVEIntegerOps())
2103     return false;
2104 
2105   // For now, restrict this to single block loops.
2106   if (L->getNumBlocks() > 1) {
2107     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
2108                          "loop.\n");
2109     return false;
2110   }
2111 
2112   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
2113 
2114   HardwareLoopInfo HWLoopInfo(L);
2115   if (!HWLoopInfo.canAnalyze(*LI)) {
2116     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2117                          "analyzable.\n");
2118     return false;
2119   }
2120 
2121   // This checks if we have the low-overhead branch architecture
2122   // extension, and if we will create a hardware-loop:
2123   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
2124     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2125                          "profitable.\n");
2126     return false;
2127   }
2128 
2129   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
2130     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
2131                          "a candidate.\n");
2132     return false;
2133   }
2134 
2135   return canTailPredicateLoop(L, LI, SE, DL, LAI);
2136 }
2137 
2138 bool ARMTTIImpl::emitGetActiveLaneMask() const {
2139   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
2140     return false;
2141 
2142   // Intrinsic @llvm.get.active.lane.mask is supported.
2143   // It is used in the MVETailPredication pass, which requires the number of
2144   // elements processed by this vector loop to setup the tail-predicated
2145   // loop.
2146   return true;
2147 }
2148 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
2149                                          TTI::UnrollingPreferences &UP,
2150                                          OptimizationRemarkEmitter *ORE) {
2151   // Enable Upper bound unrolling universally, not dependant upon the conditions
2152   // below.
2153   UP.UpperBound = true;
2154 
2155   // Only currently enable these preferences for M-Class cores.
2156   if (!ST->isMClass())
2157     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
2158 
2159   // Disable loop unrolling for Oz and Os.
2160   UP.OptSizeThreshold = 0;
2161   UP.PartialOptSizeThreshold = 0;
2162   if (L->getHeader()->getParent()->hasOptSize())
2163     return;
2164 
2165   SmallVector<BasicBlock*, 4> ExitingBlocks;
2166   L->getExitingBlocks(ExitingBlocks);
2167   LLVM_DEBUG(dbgs() << "Loop has:\n"
2168                     << "Blocks: " << L->getNumBlocks() << "\n"
2169                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
2170 
2171   // Only allow another exit other than the latch. This acts as an early exit
2172   // as it mirrors the profitability calculation of the runtime unroller.
2173   if (ExitingBlocks.size() > 2)
2174     return;
2175 
2176   // Limit the CFG of the loop body for targets with a branch predictor.
2177   // Allowing 4 blocks permits if-then-else diamonds in the body.
2178   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
2179     return;
2180 
2181   // Don't unroll vectorized loops, including the remainder loop
2182   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
2183     return;
2184 
2185   // Scan the loop: don't unroll loops with calls as this could prevent
2186   // inlining.
2187   InstructionCost Cost = 0;
2188   for (auto *BB : L->getBlocks()) {
2189     for (auto &I : *BB) {
2190       // Don't unroll vectorised loop. MVE does not benefit from it as much as
2191       // scalar code.
2192       if (I.getType()->isVectorTy())
2193         return;
2194 
2195       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
2196         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
2197           if (!isLoweredToCall(F))
2198             continue;
2199         }
2200         return;
2201       }
2202 
2203       SmallVector<const Value*, 4> Operands(I.operand_values());
2204       Cost +=
2205         getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
2206     }
2207   }
2208 
2209   // On v6m cores, there are very few registers available. We can easily end up
2210   // spilling and reloading more registers in an unrolled loop. Look at the
2211   // number of LCSSA phis as a rough measure of how many registers will need to
2212   // be live out of the loop, reducing the default unroll count if more than 1
2213   // value is needed.  In the long run, all of this should be being learnt by a
2214   // machine.
2215   unsigned UnrollCount = 4;
2216   if (ST->isThumb1Only()) {
2217     unsigned ExitingValues = 0;
2218     SmallVector<BasicBlock *, 4> ExitBlocks;
2219     L->getExitBlocks(ExitBlocks);
2220     for (auto *Exit : ExitBlocks) {
2221       // Count the number of LCSSA phis. Exclude values coming from GEP's as
2222       // only the last is expected to be needed for address operands.
2223       unsigned LiveOuts = count_if(Exit->phis(), [](auto &PH) {
2224         return PH.getNumOperands() != 1 ||
2225                !isa<GetElementPtrInst>(PH.getOperand(0));
2226       });
2227       ExitingValues = ExitingValues < LiveOuts ? LiveOuts : ExitingValues;
2228     }
2229     if (ExitingValues)
2230       UnrollCount /= ExitingValues;
2231     if (UnrollCount <= 1)
2232       return;
2233   }
2234 
2235   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
2236   LLVM_DEBUG(dbgs() << "Default Runtime Unroll Count: " << UnrollCount << "\n");
2237 
2238   UP.Partial = true;
2239   UP.Runtime = true;
2240   UP.UnrollRemainder = true;
2241   UP.DefaultUnrollRuntimeCount = UnrollCount;
2242   UP.UnrollAndJam = true;
2243   UP.UnrollAndJamInnerLoopThreshold = 60;
2244 
2245   // Force unrolling small loops can be very useful because of the branch
2246   // taken cost of the backedge.
2247   if (Cost < 12)
2248     UP.Force = true;
2249 }
2250 
2251 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
2252                                        TTI::PeelingPreferences &PP) {
2253   BaseT::getPeelingPreferences(L, SE, PP);
2254 }
2255 
2256 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
2257                                        TTI::ReductionFlags Flags) const {
2258   if (!ST->hasMVEIntegerOps())
2259     return false;
2260 
2261   unsigned ScalarBits = Ty->getScalarSizeInBits();
2262   switch (Opcode) {
2263   case Instruction::Add:
2264     return ScalarBits <= 64;
2265   default:
2266     return false;
2267   }
2268 }
2269 
2270 bool ARMTTIImpl::preferPredicatedReductionSelect(
2271     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
2272   if (!ST->hasMVEIntegerOps())
2273     return false;
2274   return true;
2275 }
2276