1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/IntrinsicsARM.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/MC/SubtargetFeature.h"
28 #include "llvm/Support/Casting.h"
29 #include "llvm/Support/MachineValueType.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Transforms/InstCombine/InstCombiner.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Transforms/Utils/LoopUtils.h"
34 #include <algorithm>
35 #include <cassert>
36 #include <cstdint>
37 #include <utility>
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "armtti"
42 
43 static cl::opt<bool> EnableMaskedLoadStores(
44   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
45   cl::desc("Enable the generation of masked loads and stores"));
46 
47 static cl::opt<bool> DisableLowOverheadLoops(
48   "disable-arm-loloops", cl::Hidden, cl::init(false),
49   cl::desc("Disable the generation of low-overhead loops"));
50 
51 extern cl::opt<TailPredication::Mode> EnableTailPredication;
52 
53 extern cl::opt<bool> EnableMaskedGatherScatters;
54 
55 /// Convert a vector load intrinsic into a simple llvm load instruction.
56 /// This is beneficial when the underlying object being addressed comes
57 /// from a constant, since we get constant-folding for free.
58 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
59                                InstCombiner::BuilderTy &Builder) {
60   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
61 
62   if (!IntrAlign)
63     return nullptr;
64 
65   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
66                            ? MemAlign
67                            : IntrAlign->getLimitedValue();
68 
69   if (!isPowerOf2_32(Alignment))
70     return nullptr;
71 
72   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
73                                           PointerType::get(II.getType(), 0));
74   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
75 }
76 
77 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
78                                      const Function *Callee) const {
79   const TargetMachine &TM = getTLI()->getTargetMachine();
80   const FeatureBitset &CallerBits =
81       TM.getSubtargetImpl(*Caller)->getFeatureBits();
82   const FeatureBitset &CalleeBits =
83       TM.getSubtargetImpl(*Callee)->getFeatureBits();
84 
85   // To inline a callee, all features not in the allowed list must match exactly.
86   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
87                     (CalleeBits & ~InlineFeaturesAllowed);
88   // For features in the allowed list, the callee's features must be a subset of
89   // the callers'.
90   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
91                      (CalleeBits & InlineFeaturesAllowed);
92   return MatchExact && MatchSubset;
93 }
94 
95 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
96   if (L->getHeader()->getParent()->hasOptSize())
97     return false;
98   if (ST->hasMVEIntegerOps())
99     return false;
100   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
101 }
102 
103 bool ARMTTIImpl::shouldFavorPostInc() const {
104   if (ST->hasMVEIntegerOps())
105     return true;
106   return false;
107 }
108 
109 Optional<Instruction *>
110 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
111   Intrinsic::ID IID = II.getIntrinsicID();
112   switch (IID) {
113   default:
114     break;
115   case Intrinsic::arm_neon_vld1: {
116     Align MemAlign =
117         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
118                           &IC.getAssumptionCache(), &IC.getDominatorTree());
119     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
120       return IC.replaceInstUsesWith(II, V);
121     }
122     break;
123   }
124 
125   case Intrinsic::arm_neon_vld2:
126   case Intrinsic::arm_neon_vld3:
127   case Intrinsic::arm_neon_vld4:
128   case Intrinsic::arm_neon_vld2lane:
129   case Intrinsic::arm_neon_vld3lane:
130   case Intrinsic::arm_neon_vld4lane:
131   case Intrinsic::arm_neon_vst1:
132   case Intrinsic::arm_neon_vst2:
133   case Intrinsic::arm_neon_vst3:
134   case Intrinsic::arm_neon_vst4:
135   case Intrinsic::arm_neon_vst2lane:
136   case Intrinsic::arm_neon_vst3lane:
137   case Intrinsic::arm_neon_vst4lane: {
138     Align MemAlign =
139         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
140                           &IC.getAssumptionCache(), &IC.getDominatorTree());
141     unsigned AlignArg = II.getNumArgOperands() - 1;
142     Value *AlignArgOp = II.getArgOperand(AlignArg);
143     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
144     if (Align && *Align < MemAlign) {
145       return IC.replaceOperand(
146           II, AlignArg,
147           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
148                            false));
149     }
150     break;
151   }
152 
153   case Intrinsic::arm_mve_pred_i2v: {
154     Value *Arg = II.getArgOperand(0);
155     Value *ArgArg;
156     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
157                        PatternMatch::m_Value(ArgArg))) &&
158         II.getType() == ArgArg->getType()) {
159       return IC.replaceInstUsesWith(II, ArgArg);
160     }
161     Constant *XorMask;
162     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
163                              PatternMatch::m_Value(ArgArg)),
164                          PatternMatch::m_Constant(XorMask))) &&
165         II.getType() == ArgArg->getType()) {
166       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
167         if (CI->getValue().trunc(16).isAllOnesValue()) {
168           auto TrueVector = IC.Builder.CreateVectorSplat(
169               cast<FixedVectorType>(II.getType())->getNumElements(),
170               IC.Builder.getTrue());
171           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
172         }
173       }
174     }
175     KnownBits ScalarKnown(32);
176     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
177                                 ScalarKnown, 0)) {
178       return &II;
179     }
180     break;
181   }
182   case Intrinsic::arm_mve_pred_v2i: {
183     Value *Arg = II.getArgOperand(0);
184     Value *ArgArg;
185     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
186                        PatternMatch::m_Value(ArgArg)))) {
187       return IC.replaceInstUsesWith(II, ArgArg);
188     }
189     if (!II.getMetadata(LLVMContext::MD_range)) {
190       Type *IntTy32 = Type::getInt32Ty(II.getContext());
191       Metadata *M[] = {
192           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
193           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))};
194       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
195       return &II;
196     }
197     break;
198   }
199   case Intrinsic::arm_mve_vadc:
200   case Intrinsic::arm_mve_vadc_predicated: {
201     unsigned CarryOp =
202         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
203     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
204            "Bad type for intrinsic!");
205 
206     KnownBits CarryKnown(32);
207     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
208                                 CarryKnown)) {
209       return &II;
210     }
211     break;
212   }
213   }
214   return None;
215 }
216 
217 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
218                               TTI::TargetCostKind CostKind) {
219   assert(Ty->isIntegerTy());
220 
221  unsigned Bits = Ty->getPrimitiveSizeInBits();
222  if (Bits == 0 || Imm.getActiveBits() >= 64)
223    return 4;
224 
225   int64_t SImmVal = Imm.getSExtValue();
226   uint64_t ZImmVal = Imm.getZExtValue();
227   if (!ST->isThumb()) {
228     if ((SImmVal >= 0 && SImmVal < 65536) ||
229         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
230         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
231       return 1;
232     return ST->hasV6T2Ops() ? 2 : 3;
233   }
234   if (ST->isThumb2()) {
235     if ((SImmVal >= 0 && SImmVal < 65536) ||
236         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
237         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
238       return 1;
239     return ST->hasV6T2Ops() ? 2 : 3;
240   }
241   // Thumb1, any i8 imm cost 1.
242   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
243     return 1;
244   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
245     return 2;
246   // Load from constantpool.
247   return 3;
248 }
249 
250 // Constants smaller than 256 fit in the immediate field of
251 // Thumb1 instructions so we return a zero cost and 1 otherwise.
252 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
253                                       const APInt &Imm, Type *Ty) {
254   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
255     return 0;
256 
257   return 1;
258 }
259 
260 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
261                                   Type *Ty, TTI::TargetCostKind CostKind) {
262   // Division by a constant can be turned into multiplication, but only if we
263   // know it's constant. So it's not so much that the immediate is cheap (it's
264   // not), but that the alternative is worse.
265   // FIXME: this is probably unneeded with GlobalISel.
266   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
267        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
268       Idx == 1)
269     return 0;
270 
271   if (Opcode == Instruction::And) {
272     // UXTB/UXTH
273     if (Imm == 255 || Imm == 65535)
274       return 0;
275     // Conversion to BIC is free, and means we can use ~Imm instead.
276     return std::min(getIntImmCost(Imm, Ty, CostKind),
277                     getIntImmCost(~Imm, Ty, CostKind));
278   }
279 
280   if (Opcode == Instruction::Add)
281     // Conversion to SUB is free, and means we can use -Imm instead.
282     return std::min(getIntImmCost(Imm, Ty, CostKind),
283                     getIntImmCost(-Imm, Ty, CostKind));
284 
285   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
286       Ty->getIntegerBitWidth() == 32) {
287     int64_t NegImm = -Imm.getSExtValue();
288     if (ST->isThumb2() && NegImm < 1<<12)
289       // icmp X, #-C -> cmn X, #C
290       return 0;
291     if (ST->isThumb() && NegImm < 1<<8)
292       // icmp X, #-C -> adds X, #C
293       return 0;
294   }
295 
296   // xor a, -1 can always be folded to MVN
297   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
298     return 0;
299 
300   return getIntImmCost(Imm, Ty, CostKind);
301 }
302 
303 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
304                                  TTI::CastContextHint CCH,
305                                  TTI::TargetCostKind CostKind,
306                                  const Instruction *I) {
307   int ISD = TLI->InstructionOpcodeToISD(Opcode);
308   assert(ISD && "Invalid opcode");
309 
310   // TODO: Allow non-throughput costs that aren't binary.
311   auto AdjustCost = [&CostKind](int Cost) {
312     if (CostKind != TTI::TCK_RecipThroughput)
313       return Cost == 0 ? 0 : 1;
314     return Cost;
315   };
316   auto IsLegalFPType = [this](EVT VT) {
317     EVT EltVT = VT.getScalarType();
318     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
319             (EltVT == MVT::f64 && ST->hasFP64()) ||
320             (EltVT == MVT::f16 && ST->hasFullFP16());
321   };
322 
323   EVT SrcTy = TLI->getValueType(DL, Src);
324   EVT DstTy = TLI->getValueType(DL, Dst);
325 
326   if (!SrcTy.isSimple() || !DstTy.isSimple())
327     return AdjustCost(
328         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
329 
330   // Extending masked load/Truncating masked stores is expensive because we
331   // currently don't split them. This means that we'll likely end up
332   // loading/storing each element individually (hence the high cost).
333   if ((ST->hasMVEIntegerOps() &&
334        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
335         Opcode == Instruction::SExt)) ||
336       (ST->hasMVEFloatOps() &&
337        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
338        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
339     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
340       return 2 * DstTy.getVectorNumElements() * ST->getMVEVectorCostFactor();
341 
342   // The extend of other kinds of load is free
343   if (CCH == TTI::CastContextHint::Normal ||
344       CCH == TTI::CastContextHint::Masked) {
345     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
346         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
347         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
348         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
349         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
350         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
351         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
352         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
353         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
354         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
355         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
356         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
357         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
358     };
359     if (const auto *Entry = ConvertCostTableLookup(
360             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
361       return AdjustCost(Entry->Cost);
362 
363     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
364         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
365         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
366         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
367         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
368         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
369         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
370         // The following extend from a legal type to an illegal type, so need to
371         // split the load. This introduced an extra load operation, but the
372         // extend is still "free".
373         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
374         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
375         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
376         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
377         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
378         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
379     };
380     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
381       if (const auto *Entry =
382               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
383                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
384         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
385     }
386 
387     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
388         // FPExtends are similar but also require the VCVT instructions.
389         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
390         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
391     };
392     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
393       if (const auto *Entry =
394               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
395                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
396         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
397     }
398 
399     // The truncate of a store is free. This is the mirror of extends above.
400     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
401         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
402         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
403         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
404         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
405         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
406         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
407     };
408     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
409       if (const auto *Entry =
410               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
411                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
412         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
413     }
414 
415     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
416         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
417         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
418     };
419     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
420       if (const auto *Entry =
421               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
422                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
423         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
424     }
425   }
426 
427   // NEON vector operations that can extend their inputs.
428   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
429       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
430     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
431       // vaddl
432       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
433       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
434       // vsubl
435       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
436       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
437       // vmull
438       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
439       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
440       // vshll
441       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
442       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
443     };
444 
445     auto *User = cast<Instruction>(*I->user_begin());
446     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
447     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
448                                              DstTy.getSimpleVT(),
449                                              SrcTy.getSimpleVT())) {
450       return AdjustCost(Entry->Cost);
451     }
452   }
453 
454   // Single to/from double precision conversions.
455   if (Src->isVectorTy() && ST->hasNEON() &&
456       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
457         DstTy.getScalarType() == MVT::f32) ||
458        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
459         DstTy.getScalarType() == MVT::f64))) {
460     static const CostTblEntry NEONFltDblTbl[] = {
461         // Vector fptrunc/fpext conversions.
462         {ISD::FP_ROUND, MVT::v2f64, 2},
463         {ISD::FP_EXTEND, MVT::v2f32, 2},
464         {ISD::FP_EXTEND, MVT::v4f32, 4}};
465 
466     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
467     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
468       return AdjustCost(LT.first * Entry->Cost);
469   }
470 
471   // Some arithmetic, load and store operations have specific instructions
472   // to cast up/down their types automatically at no extra cost.
473   // TODO: Get these tables to know at least what the related operations are.
474   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
475     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
476     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
477     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
478     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
479     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
480     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
481 
482     // The number of vmovl instructions for the extension.
483     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
484     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
485     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
486     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
487     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
488     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
489     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
490     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
491     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
492     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
493     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
494     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
495     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
496     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
497     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
498     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
499     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
500     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
501 
502     // Operations that we legalize using splitting.
503     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
504     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
505 
506     // Vector float <-> i32 conversions.
507     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
508     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
509 
510     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
511     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
512     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
513     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
514     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
515     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
516     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
517     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
518     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
519     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
520     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
521     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
522     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
523     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
524     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
525     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
526     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
527     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
528     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
529     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
530 
531     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
532     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
533     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
534     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
535     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
536     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
537 
538     // Vector double <-> i32 conversions.
539     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
540     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
541 
542     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
543     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
544     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
545     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
546     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
547     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
548 
549     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
550     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
551     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
552     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
553     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
554     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
555   };
556 
557   if (SrcTy.isVector() && ST->hasNEON()) {
558     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
559                                                    DstTy.getSimpleVT(),
560                                                    SrcTy.getSimpleVT()))
561       return AdjustCost(Entry->Cost);
562   }
563 
564   // Scalar float to integer conversions.
565   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
566     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
567     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
568     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
569     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
570     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
571     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
572     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
573     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
574     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
575     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
576     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
577     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
578     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
579     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
580     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
581     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
582     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
583     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
584     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
585     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
586   };
587   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
588     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
589                                                    DstTy.getSimpleVT(),
590                                                    SrcTy.getSimpleVT()))
591       return AdjustCost(Entry->Cost);
592   }
593 
594   // Scalar integer to float conversions.
595   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
596     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
597     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
598     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
599     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
600     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
601     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
602     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
603     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
604     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
605     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
606     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
607     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
608     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
609     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
610     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
611     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
612     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
613     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
614     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
615     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
616   };
617 
618   if (SrcTy.isInteger() && ST->hasNEON()) {
619     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
620                                                    ISD, DstTy.getSimpleVT(),
621                                                    SrcTy.getSimpleVT()))
622       return AdjustCost(Entry->Cost);
623   }
624 
625   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
626   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
627   // are linearised so take more.
628   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
629     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
630     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
631     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
632     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
633     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
634     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
635     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
636     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
637     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
638     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
639     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
640     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
641   };
642 
643   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
644     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
645                                                    ISD, DstTy.getSimpleVT(),
646                                                    SrcTy.getSimpleVT()))
647       return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
648   }
649 
650   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
651     // As general rule, fp converts that were not matched above are scalarized
652     // and cost 1 vcvt for each lane, so long as the instruction is available.
653     // If not it will become a series of function calls.
654     const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind);
655     int Lanes = 1;
656     if (SrcTy.isFixedLengthVector())
657       Lanes = SrcTy.getVectorNumElements();
658 
659     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
660       return Lanes;
661     else
662       return Lanes * CallCost;
663   }
664 
665   // Scalar integer conversion costs.
666   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
667     // i16 -> i64 requires two dependent operations.
668     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
669 
670     // Truncates on i64 are assumed to be free.
671     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
672     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
673     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
674     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
675   };
676 
677   if (SrcTy.isInteger()) {
678     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
679                                                    DstTy.getSimpleVT(),
680                                                    SrcTy.getSimpleVT()))
681       return AdjustCost(Entry->Cost);
682   }
683 
684   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
685                      ? ST->getMVEVectorCostFactor()
686                      : 1;
687   return AdjustCost(
688       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
689 }
690 
691 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
692                                    unsigned Index) {
693   // Penalize inserting into an D-subregister. We end up with a three times
694   // lower estimated throughput on swift.
695   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
696       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
697     return 3;
698 
699   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
700                         Opcode == Instruction::ExtractElement)) {
701     // Cross-class copies are expensive on many microarchitectures,
702     // so assume they are expensive by default.
703     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
704       return 3;
705 
706     // Even if it's not a cross class copy, this likely leads to mixing
707     // of NEON and VFP code and should be therefore penalized.
708     if (ValTy->isVectorTy() &&
709         ValTy->getScalarSizeInBits() <= 32)
710       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
711   }
712 
713   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
714                                  Opcode == Instruction::ExtractElement)) {
715     // We say MVE moves costs at least the MVEVectorCostFactor, even though
716     // they are scalar instructions. This helps prevent mixing scalar and
717     // vector, to prevent vectorising where we end up just scalarising the
718     // result anyway.
719     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
720                     ST->getMVEVectorCostFactor()) *
721            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
722   }
723 
724   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
725 }
726 
727 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
728                                    TTI::TargetCostKind CostKind,
729                                    const Instruction *I) {
730   // TODO: Handle other cost kinds.
731   if (CostKind != TTI::TCK_RecipThroughput)
732     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
733 
734   int ISD = TLI->InstructionOpcodeToISD(Opcode);
735   // On NEON a vector select gets lowered to vbsl.
736   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
737     // Lowering of some vector selects is currently far from perfect.
738     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
739       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
740       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
741       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
742     };
743 
744     EVT SelCondTy = TLI->getValueType(DL, CondTy);
745     EVT SelValTy = TLI->getValueType(DL, ValTy);
746     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
747       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
748                                                      SelCondTy.getSimpleVT(),
749                                                      SelValTy.getSimpleVT()))
750         return Entry->Cost;
751     }
752 
753     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
754     return LT.first;
755   }
756 
757   int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
758                      ? ST->getMVEVectorCostFactor()
759                      : 1;
760   return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind,
761                                               I);
762 }
763 
764 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
765                                           const SCEV *Ptr) {
766   // Address computations in vectorized code with non-consecutive addresses will
767   // likely result in more instructions compared to scalar code where the
768   // computation can more often be merged into the index mode. The resulting
769   // extra micro-ops can significantly decrease throughput.
770   unsigned NumVectorInstToHideOverhead = 10;
771   int MaxMergeDistance = 64;
772 
773   if (ST->hasNEON()) {
774     if (Ty->isVectorTy() && SE &&
775         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
776       return NumVectorInstToHideOverhead;
777 
778     // In many cases the address computation is not merged into the instruction
779     // addressing mode.
780     return 1;
781   }
782   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
783 }
784 
785 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
786   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
787     // If a VCTP is part of a chain, it's already profitable and shouldn't be
788     // optimized, else LSR may block tail-predication.
789     switch (II->getIntrinsicID()) {
790     case Intrinsic::arm_mve_vctp8:
791     case Intrinsic::arm_mve_vctp16:
792     case Intrinsic::arm_mve_vctp32:
793     case Intrinsic::arm_mve_vctp64:
794       return true;
795     default:
796       break;
797     }
798   }
799   return false;
800 }
801 
802 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
803   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
804     return false;
805 
806   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
807     // Don't support v2i1 yet.
808     if (VecTy->getNumElements() == 2)
809       return false;
810 
811     // We don't support extending fp types.
812      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
813     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
814       return false;
815   }
816 
817   unsigned EltWidth = DataTy->getScalarSizeInBits();
818   return (EltWidth == 32 && Alignment >= 4) ||
819          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
820 }
821 
822 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
823   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
824     return false;
825 
826   // This method is called in 2 places:
827   //  - from the vectorizer with a scalar type, in which case we need to get
828   //  this as good as we can with the limited info we have (and rely on the cost
829   //  model for the rest).
830   //  - from the masked intrinsic lowering pass with the actual vector type.
831   // For MVE, we have a custom lowering pass that will already have custom
832   // legalised any gathers that we can to MVE intrinsics, and want to expand all
833   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
834   // are here, we know we want to expand.
835   if (isa<VectorType>(Ty))
836     return false;
837 
838   unsigned EltWidth = Ty->getScalarSizeInBits();
839   return ((EltWidth == 32 && Alignment >= 4) ||
840           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
841 }
842 
843 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
844   const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
845   assert(MI && "MemcpyInst expected");
846   ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
847 
848   // To model the cost of a library call, we assume 1 for the call, and
849   // 3 for the argument setup.
850   const unsigned LibCallCost = 4;
851 
852   // If 'size' is not a constant, a library call will be generated.
853   if (!C)
854     return LibCallCost;
855 
856   const unsigned Size = C->getValue().getZExtValue();
857   const Align DstAlign = *MI->getDestAlign();
858   const Align SrcAlign = *MI->getSourceAlign();
859   const Function *F = I->getParent()->getParent();
860   const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
861   std::vector<EVT> MemOps;
862 
863   // MemOps will be poplulated with a list of data types that needs to be
864   // loaded and stored. That's why we multiply the number of elements by 2 to
865   // get the cost for this memcpy.
866   if (getTLI()->findOptimalMemOpLowering(
867           MemOps, Limit,
868           MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
869                       /*IsVolatile*/ true),
870           MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
871           F->getAttributes()))
872     return MemOps.size() * 2;
873 
874   // If we can't find an optimal memop lowering, return the default cost
875   return LibCallCost;
876 }
877 
878 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
879                                int Index, VectorType *SubTp) {
880   if (ST->hasNEON()) {
881     if (Kind == TTI::SK_Broadcast) {
882       static const CostTblEntry NEONDupTbl[] = {
883           // VDUP handles these cases.
884           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
885           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
886           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
887           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
888           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
889           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
890 
891           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
892           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
893           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
894           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
895 
896       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
897 
898       if (const auto *Entry =
899               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
900         return LT.first * Entry->Cost;
901     }
902     if (Kind == TTI::SK_Reverse) {
903       static const CostTblEntry NEONShuffleTbl[] = {
904           // Reverse shuffle cost one instruction if we are shuffling within a
905           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
906           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
907           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
908           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
909           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
910           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
911           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
912 
913           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
914           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
915           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
916           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
917 
918       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
919 
920       if (const auto *Entry =
921               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
922         return LT.first * Entry->Cost;
923     }
924     if (Kind == TTI::SK_Select) {
925       static const CostTblEntry NEONSelShuffleTbl[] = {
926           // Select shuffle cost table for ARM. Cost is the number of
927           // instructions
928           // required to create the shuffled vector.
929 
930           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
931           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
932           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
933           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
934 
935           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
936           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
937           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
938 
939           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
940 
941           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
942 
943       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
944       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
945                                               ISD::VECTOR_SHUFFLE, LT.second))
946         return LT.first * Entry->Cost;
947     }
948   }
949   if (ST->hasMVEIntegerOps()) {
950     if (Kind == TTI::SK_Broadcast) {
951       static const CostTblEntry MVEDupTbl[] = {
952           // VDUP handles these cases.
953           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
954           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
955           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
956           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
957           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
958 
959       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
960 
961       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
962                                               LT.second))
963         return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
964     }
965   }
966   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
967                      ? ST->getMVEVectorCostFactor()
968                      : 1;
969   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
970 }
971 
972 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
973                                        TTI::TargetCostKind CostKind,
974                                        TTI::OperandValueKind Op1Info,
975                                        TTI::OperandValueKind Op2Info,
976                                        TTI::OperandValueProperties Opd1PropInfo,
977                                        TTI::OperandValueProperties Opd2PropInfo,
978                                        ArrayRef<const Value *> Args,
979                                        const Instruction *CxtI) {
980   // TODO: Handle more cost kinds.
981   if (CostKind != TTI::TCK_RecipThroughput)
982     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
983                                          Op2Info, Opd1PropInfo,
984                                          Opd2PropInfo, Args, CxtI);
985 
986   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
987   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
988 
989   if (ST->hasNEON()) {
990     const unsigned FunctionCallDivCost = 20;
991     const unsigned ReciprocalDivCost = 10;
992     static const CostTblEntry CostTbl[] = {
993       // Division.
994       // These costs are somewhat random. Choose a cost of 20 to indicate that
995       // vectorizing devision (added function call) is going to be very expensive.
996       // Double registers types.
997       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
998       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
999       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1000       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1001       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1002       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1003       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1004       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1005       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1006       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1007       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1008       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1009       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1010       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1011       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1012       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1013       // Quad register types.
1014       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1015       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1016       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1017       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1018       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1019       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1020       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1021       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1022       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1023       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1024       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1025       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1026       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1027       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1028       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1029       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1030       // Multiplication.
1031     };
1032 
1033     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1034       return LT.first * Entry->Cost;
1035 
1036     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1037                                              Op2Info,
1038                                              Opd1PropInfo, Opd2PropInfo);
1039 
1040     // This is somewhat of a hack. The problem that we are facing is that SROA
1041     // creates a sequence of shift, and, or instructions to construct values.
1042     // These sequences are recognized by the ISel and have zero-cost. Not so for
1043     // the vectorized code. Because we have support for v2i64 but not i64 those
1044     // sequences look particularly beneficial to vectorize.
1045     // To work around this we increase the cost of v2i64 operations to make them
1046     // seem less beneficial.
1047     if (LT.second == MVT::v2i64 &&
1048         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1049       Cost += 4;
1050 
1051     return Cost;
1052   }
1053 
1054   // If this operation is a shift on arm/thumb2, it might well be folded into
1055   // the following instruction, hence having a cost of 0.
1056   auto LooksLikeAFreeShift = [&]() {
1057     if (ST->isThumb1Only() || Ty->isVectorTy())
1058       return false;
1059 
1060     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1061       return false;
1062     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1063       return false;
1064 
1065     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1066     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1067     case Instruction::Add:
1068     case Instruction::Sub:
1069     case Instruction::And:
1070     case Instruction::Xor:
1071     case Instruction::Or:
1072     case Instruction::ICmp:
1073       return true;
1074     default:
1075       return false;
1076     }
1077   };
1078   if (LooksLikeAFreeShift())
1079     return 0;
1080 
1081   int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
1082                      ? ST->getMVEVectorCostFactor()
1083                      : 1;
1084 
1085   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1086   // without treating floats as more expensive that scalars or increasing the
1087   // costs for custom operations. The results is also multiplied by the
1088   // MVEVectorCostFactor where appropriate.
1089   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1090     return LT.first * BaseCost;
1091 
1092   // Else this is expand, assume that we need to scalarize this op.
1093   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1094     unsigned Num = VTy->getNumElements();
1095     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
1096                                            CostKind);
1097     // Return the cost of multiple scalar invocation plus the cost of
1098     // inserting and extracting the values.
1099     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
1100   }
1101 
1102   return BaseCost;
1103 }
1104 
1105 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1106                                 MaybeAlign Alignment, unsigned AddressSpace,
1107                                 TTI::TargetCostKind CostKind,
1108                                 const Instruction *I) {
1109   // TODO: Handle other cost kinds.
1110   if (CostKind != TTI::TCK_RecipThroughput)
1111     return 1;
1112 
1113   // Type legalization can't handle structs
1114   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1115     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1116                                   CostKind);
1117 
1118   if (ST->hasNEON() && Src->isVectorTy() &&
1119       (Alignment && *Alignment != Align(16)) &&
1120       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1121     // Unaligned loads/stores are extremely inefficient.
1122     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1123     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1124     return LT.first * 4;
1125   }
1126 
1127   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1128   // Same for stores.
1129   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1130       ((Opcode == Instruction::Load && I->hasOneUse() &&
1131         isa<FPExtInst>(*I->user_begin())) ||
1132        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1133     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1134     Type *DstTy =
1135         Opcode == Instruction::Load
1136             ? (*I->user_begin())->getType()
1137             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1138     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1139         DstTy->getScalarType()->isFloatTy())
1140       return ST->getMVEVectorCostFactor();
1141   }
1142 
1143   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1144                      ? ST->getMVEVectorCostFactor()
1145                      : 1;
1146   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1147                                            CostKind, I);
1148 }
1149 
1150 int ARMTTIImpl::getInterleavedMemoryOpCost(
1151     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1152     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1153     bool UseMaskForCond, bool UseMaskForGaps) {
1154   assert(Factor >= 2 && "Invalid interleave factor");
1155   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1156 
1157   // vldN/vstN doesn't support vector types of i64/f64 element.
1158   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1159 
1160   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1161       !UseMaskForCond && !UseMaskForGaps) {
1162     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1163     auto *SubVecTy =
1164         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1165 
1166     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1167     // Accesses having vector types that are a multiple of 128 bits can be
1168     // matched to more than one vldN/vstN instruction.
1169     int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1;
1170     if (NumElts % Factor == 0 &&
1171         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL))
1172       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1173 
1174     // Some smaller than legal interleaved patterns are cheap as we can make
1175     // use of the vmovn or vrev patterns to interleave a standard load. This is
1176     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1177     // promoted differently). The cost of 2 here is then a load and vrev or
1178     // vmovn.
1179     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1180         VecTy->isIntOrIntVectorTy() && DL.getTypeSizeInBits(SubVecTy) <= 64)
1181       return 2 * BaseCost;
1182   }
1183 
1184   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1185                                            Alignment, AddressSpace, CostKind,
1186                                            UseMaskForCond, UseMaskForGaps);
1187 }
1188 
1189 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1190                                             const Value *Ptr, bool VariableMask,
1191                                             Align Alignment,
1192                                             TTI::TargetCostKind CostKind,
1193                                             const Instruction *I) {
1194   using namespace PatternMatch;
1195   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1196     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1197                                          Alignment, CostKind, I);
1198 
1199   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1200   auto *VTy = cast<FixedVectorType>(DataTy);
1201 
1202   // TODO: Splitting, once we do that.
1203 
1204   unsigned NumElems = VTy->getNumElements();
1205   unsigned EltSize = VTy->getScalarSizeInBits();
1206   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1207 
1208   // For now, it is assumed that for the MVE gather instructions the loads are
1209   // all effectively serialised. This means the cost is the scalar cost
1210   // multiplied by the number of elements being loaded. This is possibly very
1211   // conservative, but even so we still end up vectorising loops because the
1212   // cost per iteration for many loops is lower than for scalar loops.
1213   unsigned VectorCost = NumElems * LT.first;
1214   // The scalarization cost should be a lot higher. We use the number of vector
1215   // elements plus the scalarization overhead.
1216   unsigned ScalarCost =
1217       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {});
1218 
1219   if (Alignment < EltSize / 8)
1220     return ScalarCost;
1221 
1222   unsigned ExtSize = EltSize;
1223   // Check whether there's a single user that asks for an extended type
1224   if (I != nullptr) {
1225     // Dependent of the caller of this function, a gather instruction will
1226     // either have opcode Instruction::Load or be a call to the masked_gather
1227     // intrinsic
1228     if ((I->getOpcode() == Instruction::Load ||
1229          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1230         I->hasOneUse()) {
1231       const User *Us = *I->users().begin();
1232       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1233         // only allow valid type combinations
1234         unsigned TypeSize =
1235             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1236         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1237              (TypeSize == 16 && EltSize == 8)) &&
1238             TypeSize * NumElems == 128) {
1239           ExtSize = TypeSize;
1240         }
1241       }
1242     }
1243     // Check whether the input data needs to be truncated
1244     TruncInst *T;
1245     if ((I->getOpcode() == Instruction::Store ||
1246          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1247         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1248       // Only allow valid type combinations
1249       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1250       if (((EltSize == 16 && TypeSize == 32) ||
1251            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1252           TypeSize * NumElems == 128)
1253         ExtSize = TypeSize;
1254     }
1255   }
1256 
1257   if (ExtSize * NumElems != 128 || NumElems < 4)
1258     return ScalarCost;
1259 
1260   // Any (aligned) i32 gather will not need to be scalarised.
1261   if (ExtSize == 32)
1262     return VectorCost;
1263   // For smaller types, we need to ensure that the gep's inputs are correctly
1264   // extended from a small enough value. Other sizes (including i64) are
1265   // scalarized for now.
1266   if (ExtSize != 8 && ExtSize != 16)
1267     return ScalarCost;
1268 
1269   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1270     Ptr = BC->getOperand(0);
1271   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1272     if (GEP->getNumOperands() != 2)
1273       return ScalarCost;
1274     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1275     // Scale needs to be correct (which is only relevant for i16s).
1276     if (Scale != 1 && Scale * 8 != ExtSize)
1277       return ScalarCost;
1278     // And we need to zext (not sext) the indexes from a small enough type.
1279     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1280       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1281         return VectorCost;
1282     }
1283     return ScalarCost;
1284   }
1285   return ScalarCost;
1286 }
1287 
1288 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1289   if (!F->isIntrinsic())
1290     BaseT::isLoweredToCall(F);
1291 
1292   // Assume all Arm-specific intrinsics map to an instruction.
1293   if (F->getName().startswith("llvm.arm"))
1294     return false;
1295 
1296   switch (F->getIntrinsicID()) {
1297   default: break;
1298   case Intrinsic::powi:
1299   case Intrinsic::sin:
1300   case Intrinsic::cos:
1301   case Intrinsic::pow:
1302   case Intrinsic::log:
1303   case Intrinsic::log10:
1304   case Intrinsic::log2:
1305   case Intrinsic::exp:
1306   case Intrinsic::exp2:
1307     return true;
1308   case Intrinsic::sqrt:
1309   case Intrinsic::fabs:
1310   case Intrinsic::copysign:
1311   case Intrinsic::floor:
1312   case Intrinsic::ceil:
1313   case Intrinsic::trunc:
1314   case Intrinsic::rint:
1315   case Intrinsic::nearbyint:
1316   case Intrinsic::round:
1317   case Intrinsic::canonicalize:
1318   case Intrinsic::lround:
1319   case Intrinsic::llround:
1320   case Intrinsic::lrint:
1321   case Intrinsic::llrint:
1322     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1323       return true;
1324     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1325       return true;
1326     // Some operations can be handled by vector instructions and assume
1327     // unsupported vectors will be expanded into supported scalar ones.
1328     // TODO Handle scalar operations properly.
1329     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1330   case Intrinsic::masked_store:
1331   case Intrinsic::masked_load:
1332   case Intrinsic::masked_gather:
1333   case Intrinsic::masked_scatter:
1334     return !ST->hasMVEIntegerOps();
1335   case Intrinsic::sadd_with_overflow:
1336   case Intrinsic::uadd_with_overflow:
1337   case Intrinsic::ssub_with_overflow:
1338   case Intrinsic::usub_with_overflow:
1339   case Intrinsic::sadd_sat:
1340   case Intrinsic::uadd_sat:
1341   case Intrinsic::ssub_sat:
1342   case Intrinsic::usub_sat:
1343     return false;
1344   }
1345 
1346   return BaseT::isLoweredToCall(F);
1347 }
1348 
1349 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1350                                           AssumptionCache &AC,
1351                                           TargetLibraryInfo *LibInfo,
1352                                           HardwareLoopInfo &HWLoopInfo) {
1353   // Low-overhead branches are only supported in the 'low-overhead branch'
1354   // extension of v8.1-m.
1355   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1356     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1357     return false;
1358   }
1359 
1360   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1361     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1362     return false;
1363   }
1364 
1365   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1366   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1367     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1368     return false;
1369   }
1370 
1371   const SCEV *TripCountSCEV =
1372     SE.getAddExpr(BackedgeTakenCount,
1373                   SE.getOne(BackedgeTakenCount->getType()));
1374 
1375   // We need to store the trip count in LR, a 32-bit register.
1376   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1377     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1378     return false;
1379   }
1380 
1381   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1382   // point in generating a hardware loop if that's going to happen.
1383   auto MaybeCall = [this](Instruction &I) {
1384     const ARMTargetLowering *TLI = getTLI();
1385     unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1386     EVT VT = TLI->getValueType(DL, I.getType(), true);
1387     if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1388       return true;
1389 
1390     // Check if an intrinsic will be lowered to a call and assume that any
1391     // other CallInst will generate a bl.
1392     if (auto *Call = dyn_cast<CallInst>(&I)) {
1393       if (isa<IntrinsicInst>(Call)) {
1394         if (const Function *F = Call->getCalledFunction())
1395           return isLoweredToCall(F);
1396       }
1397       return true;
1398     }
1399 
1400     // FPv5 provides conversions between integer, double-precision,
1401     // single-precision, and half-precision formats.
1402     switch (I.getOpcode()) {
1403     default:
1404       break;
1405     case Instruction::FPToSI:
1406     case Instruction::FPToUI:
1407     case Instruction::SIToFP:
1408     case Instruction::UIToFP:
1409     case Instruction::FPTrunc:
1410     case Instruction::FPExt:
1411       return !ST->hasFPARMv8Base();
1412     }
1413 
1414     // FIXME: Unfortunately the approach of checking the Operation Action does
1415     // not catch all cases of Legalization that use library calls. Our
1416     // Legalization step categorizes some transformations into library calls as
1417     // Custom, Expand or even Legal when doing type legalization. So for now
1418     // we have to special case for instance the SDIV of 64bit integers and the
1419     // use of floating point emulation.
1420     if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1421       switch (ISD) {
1422       default:
1423         break;
1424       case ISD::SDIV:
1425       case ISD::UDIV:
1426       case ISD::SREM:
1427       case ISD::UREM:
1428       case ISD::SDIVREM:
1429       case ISD::UDIVREM:
1430         return true;
1431       }
1432     }
1433 
1434     // Assume all other non-float operations are supported.
1435     if (!VT.isFloatingPoint())
1436       return false;
1437 
1438     // We'll need a library call to handle most floats when using soft.
1439     if (TLI->useSoftFloat()) {
1440       switch (I.getOpcode()) {
1441       default:
1442         return true;
1443       case Instruction::Alloca:
1444       case Instruction::Load:
1445       case Instruction::Store:
1446       case Instruction::Select:
1447       case Instruction::PHI:
1448         return false;
1449       }
1450     }
1451 
1452     // We'll need a libcall to perform double precision operations on a single
1453     // precision only FPU.
1454     if (I.getType()->isDoubleTy() && !ST->hasFP64())
1455       return true;
1456 
1457     // Likewise for half precision arithmetic.
1458     if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1459       return true;
1460 
1461     return false;
1462   };
1463 
1464   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1465     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1466       switch (Call->getIntrinsicID()) {
1467       default:
1468         break;
1469       case Intrinsic::set_loop_iterations:
1470       case Intrinsic::test_set_loop_iterations:
1471       case Intrinsic::loop_decrement:
1472       case Intrinsic::loop_decrement_reg:
1473         return true;
1474       }
1475     }
1476     return false;
1477   };
1478 
1479   // Scan the instructions to see if there's any that we know will turn into a
1480   // call or if this loop is already a low-overhead loop.
1481   auto ScanLoop = [&](Loop *L) {
1482     for (auto *BB : L->getBlocks()) {
1483       for (auto &I : *BB) {
1484         if (MaybeCall(I) || IsHardwareLoopIntrinsic(I)) {
1485           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1486           return false;
1487         }
1488       }
1489     }
1490     return true;
1491   };
1492 
1493   // Visit inner loops.
1494   for (auto Inner : *L)
1495     if (!ScanLoop(Inner))
1496       return false;
1497 
1498   if (!ScanLoop(L))
1499     return false;
1500 
1501   // TODO: Check whether the trip count calculation is expensive. If L is the
1502   // inner loop but we know it has a low trip count, calculating that trip
1503   // count (in the parent loop) may be detrimental.
1504 
1505   LLVMContext &C = L->getHeader()->getContext();
1506   HWLoopInfo.CounterInReg = true;
1507   HWLoopInfo.IsNestingLegal = false;
1508   HWLoopInfo.PerformEntryTest = true;
1509   HWLoopInfo.CountType = Type::getInt32Ty(C);
1510   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1511   return true;
1512 }
1513 
1514 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1515   // We don't allow icmp's, and because we only look at single block loops,
1516   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1517   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1518     return false;
1519 
1520   if (isa<FCmpInst>(&I))
1521     return false;
1522 
1523   // We could allow extending/narrowing FP loads/stores, but codegen is
1524   // too inefficient so reject this for now.
1525   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1526     return false;
1527 
1528   // Extends have to be extending-loads
1529   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1530     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1531       return false;
1532 
1533   // Truncs have to be narrowing-stores
1534   if (isa<TruncInst>(&I) )
1535     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1536       return false;
1537 
1538   return true;
1539 }
1540 
1541 // To set up a tail-predicated loop, we need to know the total number of
1542 // elements processed by that loop. Thus, we need to determine the element
1543 // size and:
1544 // 1) it should be uniform for all operations in the vector loop, so we
1545 //    e.g. don't want any widening/narrowing operations.
1546 // 2) it should be smaller than i64s because we don't have vector operations
1547 //    that work on i64s.
1548 // 3) we don't want elements to be reversed or shuffled, to make sure the
1549 //    tail-predication masks/predicates the right lanes.
1550 //
1551 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1552                                  const DataLayout &DL,
1553                                  const LoopAccessInfo *LAI) {
1554   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1555 
1556   // If there are live-out values, it is probably a reduction, which needs a
1557   // final reduction step after the loop. MVE has a VADDV instruction to reduce
1558   // integer vectors, but doesn't have an equivalent one for float vectors. A
1559   // live-out value that is not recognised as a reduction will result in the
1560   // tail-predicated loop to be reverted to a non-predicated loop and this is
1561   // very expensive, i.e. it has a significant performance impact. So, in this
1562   // case it's better not to tail-predicate the loop, which is what we check
1563   // here. Thus, we allow only 1 live-out value, which has to be an integer
1564   // reduction, which matches the loops supported by ARMLowOverheadLoops.
1565   // It is important to keep ARMLowOverheadLoops and canTailPredicateLoop in
1566   // sync with each other.
1567   SmallVector< Instruction *, 8 > LiveOuts;
1568   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1569   bool IntReductionsDisabled =
1570       EnableTailPredication == TailPredication::EnabledNoReductions ||
1571       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1572 
1573   for (auto *I : LiveOuts) {
1574     if (!I->getType()->isIntegerTy()) {
1575       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer "
1576                            "live-out value\n");
1577       return false;
1578     }
1579     if (I->getOpcode() != Instruction::Add) {
1580       LLVM_DEBUG(dbgs() << "Only add reductions supported\n");
1581       return false;
1582     }
1583     if (IntReductionsDisabled) {
1584       LLVM_DEBUG(dbgs() << "Integer add reductions not enabled\n");
1585       return false;
1586     }
1587   }
1588 
1589   // Next, check that all instructions can be tail-predicated.
1590   PredicatedScalarEvolution PSE = LAI->getPSE();
1591   SmallVector<Instruction *, 16> LoadStores;
1592   int ICmpCount = 0;
1593   int Stride = 0;
1594 
1595   for (BasicBlock *BB : L->blocks()) {
1596     for (Instruction &I : BB->instructionsWithoutDebug()) {
1597       if (isa<PHINode>(&I))
1598         continue;
1599       if (!canTailPredicateInstruction(I, ICmpCount)) {
1600         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1601         return false;
1602       }
1603 
1604       Type *T  = I.getType();
1605       if (T->isPointerTy())
1606         T = T->getPointerElementType();
1607 
1608       if (T->getScalarSizeInBits() > 32) {
1609         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1610         return false;
1611       }
1612 
1613       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1614         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1615         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1616         // TODO: for now only allow consecutive strides of 1. We could support
1617         // other strides as long as it is uniform, but let's keep it simple for
1618         // now.
1619         if (Stride == 0 && NextStride == 1) {
1620           Stride = NextStride;
1621           continue;
1622         }
1623         if (Stride != NextStride) {
1624           LLVM_DEBUG(dbgs() << "Different strides found, can't "
1625                                "tail-predicate\n.");
1626           return false;
1627         }
1628       }
1629     }
1630   }
1631 
1632   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1633   return true;
1634 }
1635 
1636 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1637                                              ScalarEvolution &SE,
1638                                              AssumptionCache &AC,
1639                                              TargetLibraryInfo *TLI,
1640                                              DominatorTree *DT,
1641                                              const LoopAccessInfo *LAI) {
1642   if (!EnableTailPredication) {
1643     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
1644     return false;
1645   }
1646 
1647   // Creating a predicated vector loop is the first step for generating a
1648   // tail-predicated hardware loop, for which we need the MVE masked
1649   // load/stores instructions:
1650   if (!ST->hasMVEIntegerOps())
1651     return false;
1652 
1653   // For now, restrict this to single block loops.
1654   if (L->getNumBlocks() > 1) {
1655     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1656                          "loop.\n");
1657     return false;
1658   }
1659 
1660   assert(L->empty() && "preferPredicateOverEpilogue: inner-loop expected");
1661 
1662   HardwareLoopInfo HWLoopInfo(L);
1663   if (!HWLoopInfo.canAnalyze(*LI)) {
1664     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1665                          "analyzable.\n");
1666     return false;
1667   }
1668 
1669   // This checks if we have the low-overhead branch architecture
1670   // extension, and if we will create a hardware-loop:
1671   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1672     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1673                          "profitable.\n");
1674     return false;
1675   }
1676 
1677   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1678     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1679                          "a candidate.\n");
1680     return false;
1681   }
1682 
1683   return canTailPredicateLoop(L, LI, SE, DL, LAI);
1684 }
1685 
1686 bool ARMTTIImpl::emitGetActiveLaneMask() const {
1687   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
1688     return false;
1689 
1690   // Intrinsic @llvm.get.active.lane.mask is supported.
1691   // It is used in the MVETailPredication pass, which requires the number of
1692   // elements processed by this vector loop to setup the tail-predicated
1693   // loop.
1694   return true;
1695 }
1696 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1697                                          TTI::UnrollingPreferences &UP) {
1698   // Only currently enable these preferences for M-Class cores.
1699   if (!ST->isMClass())
1700     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
1701 
1702   // Disable loop unrolling for Oz and Os.
1703   UP.OptSizeThreshold = 0;
1704   UP.PartialOptSizeThreshold = 0;
1705   if (L->getHeader()->getParent()->hasOptSize())
1706     return;
1707 
1708   // Only enable on Thumb-2 targets.
1709   if (!ST->isThumb2())
1710     return;
1711 
1712   SmallVector<BasicBlock*, 4> ExitingBlocks;
1713   L->getExitingBlocks(ExitingBlocks);
1714   LLVM_DEBUG(dbgs() << "Loop has:\n"
1715                     << "Blocks: " << L->getNumBlocks() << "\n"
1716                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
1717 
1718   // Only allow another exit other than the latch. This acts as an early exit
1719   // as it mirrors the profitability calculation of the runtime unroller.
1720   if (ExitingBlocks.size() > 2)
1721     return;
1722 
1723   // Limit the CFG of the loop body for targets with a branch predictor.
1724   // Allowing 4 blocks permits if-then-else diamonds in the body.
1725   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
1726     return;
1727 
1728   // Scan the loop: don't unroll loops with calls as this could prevent
1729   // inlining.
1730   unsigned Cost = 0;
1731   for (auto *BB : L->getBlocks()) {
1732     for (auto &I : *BB) {
1733       // Don't unroll vectorised loop. MVE does not benefit from it as much as
1734       // scalar code.
1735       if (I.getType()->isVectorTy())
1736         return;
1737 
1738       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1739         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1740           if (!isLoweredToCall(F))
1741             continue;
1742         }
1743         return;
1744       }
1745 
1746       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1747                                             I.value_op_end());
1748       Cost += getUserCost(&I, Operands, TargetTransformInfo::TCK_CodeSize);
1749     }
1750   }
1751 
1752   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
1753 
1754   UP.Partial = true;
1755   UP.Runtime = true;
1756   UP.UpperBound = true;
1757   UP.UnrollRemainder = true;
1758   UP.DefaultUnrollRuntimeCount = 4;
1759   UP.UnrollAndJam = true;
1760   UP.UnrollAndJamInnerLoopThreshold = 60;
1761 
1762   // Force unrolling small loops can be very useful because of the branch
1763   // taken cost of the backedge.
1764   if (Cost < 12)
1765     UP.Force = true;
1766 }
1767 
1768 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1769                                        TTI::PeelingPreferences &PP) {
1770   BaseT::getPeelingPreferences(L, SE, PP);
1771 }
1772 
1773 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1774                                        TTI::ReductionFlags Flags) const {
1775   return ST->hasMVEIntegerOps();
1776 }
1777