1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/IntrinsicsARM.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/MC/SubtargetFeature.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/KnownBits.h"
31 #include "llvm/Support/MachineValueType.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Transforms/InstCombine/InstCombiner.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <utility>
40 
41 using namespace llvm;
42 
43 #define DEBUG_TYPE "armtti"
44 
45 static cl::opt<bool> EnableMaskedLoadStores(
46   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
47   cl::desc("Enable the generation of masked loads and stores"));
48 
49 static cl::opt<bool> DisableLowOverheadLoops(
50   "disable-arm-loloops", cl::Hidden, cl::init(false),
51   cl::desc("Disable the generation of low-overhead loops"));
52 
53 extern cl::opt<TailPredication::Mode> EnableTailPredication;
54 
55 extern cl::opt<bool> EnableMaskedGatherScatters;
56 
57 extern cl::opt<unsigned> MVEMaxSupportedInterleaveFactor;
58 
59 /// Convert a vector load intrinsic into a simple llvm load instruction.
60 /// This is beneficial when the underlying object being addressed comes
61 /// from a constant, since we get constant-folding for free.
62 static Value *simplifyNeonVld1(const IntrinsicInst &II, unsigned MemAlign,
63                                InstCombiner::BuilderTy &Builder) {
64   auto *IntrAlign = dyn_cast<ConstantInt>(II.getArgOperand(1));
65 
66   if (!IntrAlign)
67     return nullptr;
68 
69   unsigned Alignment = IntrAlign->getLimitedValue() < MemAlign
70                            ? MemAlign
71                            : IntrAlign->getLimitedValue();
72 
73   if (!isPowerOf2_32(Alignment))
74     return nullptr;
75 
76   auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0),
77                                           PointerType::get(II.getType(), 0));
78   return Builder.CreateAlignedLoad(II.getType(), BCastInst, Align(Alignment));
79 }
80 
81 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
82                                      const Function *Callee) const {
83   const TargetMachine &TM = getTLI()->getTargetMachine();
84   const FeatureBitset &CallerBits =
85       TM.getSubtargetImpl(*Caller)->getFeatureBits();
86   const FeatureBitset &CalleeBits =
87       TM.getSubtargetImpl(*Callee)->getFeatureBits();
88 
89   // To inline a callee, all features not in the allowed list must match exactly.
90   bool MatchExact = (CallerBits & ~InlineFeaturesAllowed) ==
91                     (CalleeBits & ~InlineFeaturesAllowed);
92   // For features in the allowed list, the callee's features must be a subset of
93   // the callers'.
94   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeaturesAllowed) ==
95                      (CalleeBits & InlineFeaturesAllowed);
96   return MatchExact && MatchSubset;
97 }
98 
99 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
100   if (L->getHeader()->getParent()->hasOptSize())
101     return false;
102   if (ST->hasMVEIntegerOps())
103     return false;
104   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
105 }
106 
107 bool ARMTTIImpl::shouldFavorPostInc() const {
108   if (ST->hasMVEIntegerOps())
109     return true;
110   return false;
111 }
112 
113 Optional<Instruction *>
114 ARMTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
115   using namespace PatternMatch;
116   Intrinsic::ID IID = II.getIntrinsicID();
117   switch (IID) {
118   default:
119     break;
120   case Intrinsic::arm_neon_vld1: {
121     Align MemAlign =
122         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
123                           &IC.getAssumptionCache(), &IC.getDominatorTree());
124     if (Value *V = simplifyNeonVld1(II, MemAlign.value(), IC.Builder)) {
125       return IC.replaceInstUsesWith(II, V);
126     }
127     break;
128   }
129 
130   case Intrinsic::arm_neon_vld2:
131   case Intrinsic::arm_neon_vld3:
132   case Intrinsic::arm_neon_vld4:
133   case Intrinsic::arm_neon_vld2lane:
134   case Intrinsic::arm_neon_vld3lane:
135   case Intrinsic::arm_neon_vld4lane:
136   case Intrinsic::arm_neon_vst1:
137   case Intrinsic::arm_neon_vst2:
138   case Intrinsic::arm_neon_vst3:
139   case Intrinsic::arm_neon_vst4:
140   case Intrinsic::arm_neon_vst2lane:
141   case Intrinsic::arm_neon_vst3lane:
142   case Intrinsic::arm_neon_vst4lane: {
143     Align MemAlign =
144         getKnownAlignment(II.getArgOperand(0), IC.getDataLayout(), &II,
145                           &IC.getAssumptionCache(), &IC.getDominatorTree());
146     unsigned AlignArg = II.getNumArgOperands() - 1;
147     Value *AlignArgOp = II.getArgOperand(AlignArg);
148     MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
149     if (Align && *Align < MemAlign) {
150       return IC.replaceOperand(
151           II, AlignArg,
152           ConstantInt::get(Type::getInt32Ty(II.getContext()), MemAlign.value(),
153                            false));
154     }
155     break;
156   }
157 
158   case Intrinsic::arm_mve_pred_i2v: {
159     Value *Arg = II.getArgOperand(0);
160     Value *ArgArg;
161     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
162                        PatternMatch::m_Value(ArgArg))) &&
163         II.getType() == ArgArg->getType()) {
164       return IC.replaceInstUsesWith(II, ArgArg);
165     }
166     Constant *XorMask;
167     if (match(Arg, m_Xor(PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_v2i>(
168                              PatternMatch::m_Value(ArgArg)),
169                          PatternMatch::m_Constant(XorMask))) &&
170         II.getType() == ArgArg->getType()) {
171       if (auto *CI = dyn_cast<ConstantInt>(XorMask)) {
172         if (CI->getValue().trunc(16).isAllOnesValue()) {
173           auto TrueVector = IC.Builder.CreateVectorSplat(
174               cast<FixedVectorType>(II.getType())->getNumElements(),
175               IC.Builder.getTrue());
176           return BinaryOperator::Create(Instruction::Xor, ArgArg, TrueVector);
177         }
178       }
179     }
180     KnownBits ScalarKnown(32);
181     if (IC.SimplifyDemandedBits(&II, 0, APInt::getLowBitsSet(32, 16),
182                                 ScalarKnown, 0)) {
183       return &II;
184     }
185     break;
186   }
187   case Intrinsic::arm_mve_pred_v2i: {
188     Value *Arg = II.getArgOperand(0);
189     Value *ArgArg;
190     if (match(Arg, PatternMatch::m_Intrinsic<Intrinsic::arm_mve_pred_i2v>(
191                        PatternMatch::m_Value(ArgArg)))) {
192       return IC.replaceInstUsesWith(II, ArgArg);
193     }
194     if (!II.getMetadata(LLVMContext::MD_range)) {
195       Type *IntTy32 = Type::getInt32Ty(II.getContext());
196       Metadata *M[] = {
197           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0)),
198           ConstantAsMetadata::get(ConstantInt::get(IntTy32, 0xFFFF))};
199       II.setMetadata(LLVMContext::MD_range, MDNode::get(II.getContext(), M));
200       return &II;
201     }
202     break;
203   }
204   case Intrinsic::arm_mve_vadc:
205   case Intrinsic::arm_mve_vadc_predicated: {
206     unsigned CarryOp =
207         (II.getIntrinsicID() == Intrinsic::arm_mve_vadc_predicated) ? 3 : 2;
208     assert(II.getArgOperand(CarryOp)->getType()->getScalarSizeInBits() == 32 &&
209            "Bad type for intrinsic!");
210 
211     KnownBits CarryKnown(32);
212     if (IC.SimplifyDemandedBits(&II, CarryOp, APInt::getOneBitSet(32, 29),
213                                 CarryKnown)) {
214       return &II;
215     }
216     break;
217   }
218   case Intrinsic::arm_mve_vmldava: {
219     Instruction *I = cast<Instruction>(&II);
220     if (I->hasOneUse()) {
221       auto *User = cast<Instruction>(*I->user_begin());
222       Value *OpZ;
223       if (match(User, m_c_Add(m_Specific(I), m_Value(OpZ))) &&
224           match(I->getOperand(3), m_Zero())) {
225         Value *OpX = I->getOperand(4);
226         Value *OpY = I->getOperand(5);
227         Type *OpTy = OpX->getType();
228 
229         IC.Builder.SetInsertPoint(User);
230         Value *V =
231             IC.Builder.CreateIntrinsic(Intrinsic::arm_mve_vmldava, {OpTy},
232                                        {I->getOperand(0), I->getOperand(1),
233                                         I->getOperand(2), OpZ, OpX, OpY});
234 
235         IC.replaceInstUsesWith(*User, V);
236         return IC.eraseInstFromFunction(*User);
237       }
238     }
239     return None;
240   }
241   }
242   return None;
243 }
244 
245 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
246                               TTI::TargetCostKind CostKind) {
247   assert(Ty->isIntegerTy());
248 
249  unsigned Bits = Ty->getPrimitiveSizeInBits();
250  if (Bits == 0 || Imm.getActiveBits() >= 64)
251    return 4;
252 
253   int64_t SImmVal = Imm.getSExtValue();
254   uint64_t ZImmVal = Imm.getZExtValue();
255   if (!ST->isThumb()) {
256     if ((SImmVal >= 0 && SImmVal < 65536) ||
257         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
258         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
259       return 1;
260     return ST->hasV6T2Ops() ? 2 : 3;
261   }
262   if (ST->isThumb2()) {
263     if ((SImmVal >= 0 && SImmVal < 65536) ||
264         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
265         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
266       return 1;
267     return ST->hasV6T2Ops() ? 2 : 3;
268   }
269   // Thumb1, any i8 imm cost 1.
270   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
271     return 1;
272   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
273     return 2;
274   // Load from constantpool.
275   return 3;
276 }
277 
278 // Constants smaller than 256 fit in the immediate field of
279 // Thumb1 instructions so we return a zero cost and 1 otherwise.
280 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
281                                       const APInt &Imm, Type *Ty) {
282   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
283     return 0;
284 
285   return 1;
286 }
287 
288 // Checks whether Inst is part of a min(max()) or max(min()) pattern
289 // that will match to an SSAT instruction
290 static bool isSSATMinMaxPattern(Instruction *Inst, const APInt &Imm) {
291   Value *LHS, *RHS;
292   ConstantInt *C;
293   SelectPatternFlavor InstSPF = matchSelectPattern(Inst, LHS, RHS).Flavor;
294 
295   if (InstSPF == SPF_SMAX &&
296       PatternMatch::match(RHS, PatternMatch::m_ConstantInt(C)) &&
297       C->getValue() == Imm && Imm.isNegative() && (-Imm).isPowerOf2()) {
298 
299     auto isSSatMin = [&](Value *MinInst) {
300       if (isa<SelectInst>(MinInst)) {
301         Value *MinLHS, *MinRHS;
302         ConstantInt *MinC;
303         SelectPatternFlavor MinSPF =
304             matchSelectPattern(MinInst, MinLHS, MinRHS).Flavor;
305         if (MinSPF == SPF_SMIN &&
306             PatternMatch::match(MinRHS, PatternMatch::m_ConstantInt(MinC)) &&
307             MinC->getValue() == ((-Imm) - 1))
308           return true;
309       }
310       return false;
311     };
312 
313     if (isSSatMin(Inst->getOperand(1)) ||
314         (Inst->hasNUses(2) && (isSSatMin(*Inst->user_begin()) ||
315                                isSSatMin(*(++Inst->user_begin())))))
316       return true;
317   }
318   return false;
319 }
320 
321 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
322                                   const APInt &Imm, Type *Ty,
323                                   TTI::TargetCostKind CostKind,
324                                   Instruction *Inst) {
325   // Division by a constant can be turned into multiplication, but only if we
326   // know it's constant. So it's not so much that the immediate is cheap (it's
327   // not), but that the alternative is worse.
328   // FIXME: this is probably unneeded with GlobalISel.
329   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
330        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
331       Idx == 1)
332     return 0;
333 
334   if (Opcode == Instruction::And) {
335     // UXTB/UXTH
336     if (Imm == 255 || Imm == 65535)
337       return 0;
338     // Conversion to BIC is free, and means we can use ~Imm instead.
339     return std::min(getIntImmCost(Imm, Ty, CostKind),
340                     getIntImmCost(~Imm, Ty, CostKind));
341   }
342 
343   if (Opcode == Instruction::Add)
344     // Conversion to SUB is free, and means we can use -Imm instead.
345     return std::min(getIntImmCost(Imm, Ty, CostKind),
346                     getIntImmCost(-Imm, Ty, CostKind));
347 
348   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
349       Ty->getIntegerBitWidth() == 32) {
350     int64_t NegImm = -Imm.getSExtValue();
351     if (ST->isThumb2() && NegImm < 1<<12)
352       // icmp X, #-C -> cmn X, #C
353       return 0;
354     if (ST->isThumb() && NegImm < 1<<8)
355       // icmp X, #-C -> adds X, #C
356       return 0;
357   }
358 
359   // xor a, -1 can always be folded to MVN
360   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
361     return 0;
362 
363   // Ensures negative constant of min(max()) or max(min()) patterns that
364   // match to SSAT instructions don't get hoisted
365   if (Inst && ((ST->hasV6Ops() && !ST->isThumb()) || ST->isThumb2()) &&
366       Ty->getIntegerBitWidth() <= 32) {
367     if (isSSATMinMaxPattern(Inst, Imm) ||
368         (isa<ICmpInst>(Inst) && Inst->hasOneUse() &&
369          isSSATMinMaxPattern(cast<Instruction>(*Inst->user_begin()), Imm)))
370       return 0;
371   }
372 
373   return getIntImmCost(Imm, Ty, CostKind);
374 }
375 
376 int ARMTTIImpl::getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind) {
377   if (CostKind == TTI::TCK_RecipThroughput &&
378       (ST->hasNEON() || ST->hasMVEIntegerOps())) {
379     // FIXME: The vectorizer is highly sensistive to the cost of these
380     // instructions, which suggests that it may be using the costs incorrectly.
381     // But, for now, just make them free to avoid performance regressions for
382     // vector targets.
383     return 0;
384   }
385   return BaseT::getCFInstrCost(Opcode, CostKind);
386 }
387 
388 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
389                                  TTI::CastContextHint CCH,
390                                  TTI::TargetCostKind CostKind,
391                                  const Instruction *I) {
392   int ISD = TLI->InstructionOpcodeToISD(Opcode);
393   assert(ISD && "Invalid opcode");
394 
395   // TODO: Allow non-throughput costs that aren't binary.
396   auto AdjustCost = [&CostKind](int Cost) {
397     if (CostKind != TTI::TCK_RecipThroughput)
398       return Cost == 0 ? 0 : 1;
399     return Cost;
400   };
401   auto IsLegalFPType = [this](EVT VT) {
402     EVT EltVT = VT.getScalarType();
403     return (EltVT == MVT::f32 && ST->hasVFP2Base()) ||
404             (EltVT == MVT::f64 && ST->hasFP64()) ||
405             (EltVT == MVT::f16 && ST->hasFullFP16());
406   };
407 
408   EVT SrcTy = TLI->getValueType(DL, Src);
409   EVT DstTy = TLI->getValueType(DL, Dst);
410 
411   if (!SrcTy.isSimple() || !DstTy.isSimple())
412     return AdjustCost(
413         BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
414 
415   // Extending masked load/Truncating masked stores is expensive because we
416   // currently don't split them. This means that we'll likely end up
417   // loading/storing each element individually (hence the high cost).
418   if ((ST->hasMVEIntegerOps() &&
419        (Opcode == Instruction::Trunc || Opcode == Instruction::ZExt ||
420         Opcode == Instruction::SExt)) ||
421       (ST->hasMVEFloatOps() &&
422        (Opcode == Instruction::FPExt || Opcode == Instruction::FPTrunc) &&
423        IsLegalFPType(SrcTy) && IsLegalFPType(DstTy)))
424     if (CCH == TTI::CastContextHint::Masked && DstTy.getSizeInBits() > 128)
425       return 2 * DstTy.getVectorNumElements() * ST->getMVEVectorCostFactor();
426 
427   // The extend of other kinds of load is free
428   if (CCH == TTI::CastContextHint::Normal ||
429       CCH == TTI::CastContextHint::Masked) {
430     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
431         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
432         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
433         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
434         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
435         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
436         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
437         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
438         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
439         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
440         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
441         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
442         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
443     };
444     if (const auto *Entry = ConvertCostTableLookup(
445             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
446       return AdjustCost(Entry->Cost);
447 
448     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
449         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
450         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
451         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
452         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
453         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
454         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
455         // The following extend from a legal type to an illegal type, so need to
456         // split the load. This introduced an extra load operation, but the
457         // extend is still "free".
458         {ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1},
459         {ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1},
460         {ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 3},
461         {ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 3},
462         {ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1},
463         {ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1},
464     };
465     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
466       if (const auto *Entry =
467               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
468                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
469         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
470     }
471 
472     static const TypeConversionCostTblEntry MVEFLoadConversionTbl[] = {
473         // FPExtends are similar but also require the VCVT instructions.
474         {ISD::FP_EXTEND, MVT::v4f32, MVT::v4f16, 1},
475         {ISD::FP_EXTEND, MVT::v8f32, MVT::v8f16, 3},
476     };
477     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
478       if (const auto *Entry =
479               ConvertCostTableLookup(MVEFLoadConversionTbl, ISD,
480                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
481         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
482     }
483 
484     // The truncate of a store is free. This is the mirror of extends above.
485     static const TypeConversionCostTblEntry MVEStoreConversionTbl[] = {
486         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i16, 0},
487         {ISD::TRUNCATE, MVT::v4i32, MVT::v4i8, 0},
488         {ISD::TRUNCATE, MVT::v8i16, MVT::v8i8, 0},
489         {ISD::TRUNCATE, MVT::v8i32, MVT::v8i16, 1},
490         {ISD::TRUNCATE, MVT::v16i32, MVT::v16i8, 3},
491         {ISD::TRUNCATE, MVT::v16i16, MVT::v16i8, 1},
492     };
493     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
494       if (const auto *Entry =
495               ConvertCostTableLookup(MVEStoreConversionTbl, ISD,
496                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
497         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
498     }
499 
500     static const TypeConversionCostTblEntry MVEFStoreConversionTbl[] = {
501         {ISD::FP_ROUND, MVT::v4f32, MVT::v4f16, 1},
502         {ISD::FP_ROUND, MVT::v8f32, MVT::v8f16, 3},
503     };
504     if (SrcTy.isVector() && ST->hasMVEFloatOps()) {
505       if (const auto *Entry =
506               ConvertCostTableLookup(MVEFStoreConversionTbl, ISD,
507                                      SrcTy.getSimpleVT(), DstTy.getSimpleVT()))
508         return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
509     }
510   }
511 
512   // NEON vector operations that can extend their inputs.
513   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
514       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
515     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
516       // vaddl
517       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
518       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
519       // vsubl
520       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
521       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
522       // vmull
523       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
524       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
525       // vshll
526       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
527       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
528     };
529 
530     auto *User = cast<Instruction>(*I->user_begin());
531     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
532     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
533                                              DstTy.getSimpleVT(),
534                                              SrcTy.getSimpleVT())) {
535       return AdjustCost(Entry->Cost);
536     }
537   }
538 
539   // Single to/from double precision conversions.
540   if (Src->isVectorTy() && ST->hasNEON() &&
541       ((ISD == ISD::FP_ROUND && SrcTy.getScalarType() == MVT::f64 &&
542         DstTy.getScalarType() == MVT::f32) ||
543        (ISD == ISD::FP_EXTEND && SrcTy.getScalarType() == MVT::f32 &&
544         DstTy.getScalarType() == MVT::f64))) {
545     static const CostTblEntry NEONFltDblTbl[] = {
546         // Vector fptrunc/fpext conversions.
547         {ISD::FP_ROUND, MVT::v2f64, 2},
548         {ISD::FP_EXTEND, MVT::v2f32, 2},
549         {ISD::FP_EXTEND, MVT::v4f32, 4}};
550 
551     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
552     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
553       return AdjustCost(LT.first * Entry->Cost);
554   }
555 
556   // Some arithmetic, load and store operations have specific instructions
557   // to cast up/down their types automatically at no extra cost.
558   // TODO: Get these tables to know at least what the related operations are.
559   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
560     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
561     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
562     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
563     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
564     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
565     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
566 
567     // The number of vmovl instructions for the extension.
568     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
569     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
570     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
571     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
572     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
573     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
574     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
575     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
576     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
577     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
578     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
579     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
580     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
581     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
582     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
583     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
584     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
585     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
586 
587     // Operations that we legalize using splitting.
588     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
589     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
590 
591     // Vector float <-> i32 conversions.
592     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
593     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
594 
595     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
596     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
597     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
598     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
599     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
600     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
601     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
602     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
603     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
604     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
605     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
606     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
607     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
608     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
609     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
610     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
611     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
612     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
613     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
614     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
615 
616     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
617     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
618     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
619     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
620     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
621     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
622 
623     // Vector double <-> i32 conversions.
624     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
625     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
626 
627     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
628     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
629     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
630     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
631     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
632     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
633 
634     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
635     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
636     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
637     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
638     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
639     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
640   };
641 
642   if (SrcTy.isVector() && ST->hasNEON()) {
643     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
644                                                    DstTy.getSimpleVT(),
645                                                    SrcTy.getSimpleVT()))
646       return AdjustCost(Entry->Cost);
647   }
648 
649   // Scalar float to integer conversions.
650   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
651     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
652     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
653     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
654     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
655     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
656     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
657     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
658     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
659     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
660     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
661     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
662     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
663     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
664     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
665     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
666     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
667     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
668     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
669     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
670     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
671   };
672   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
673     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
674                                                    DstTy.getSimpleVT(),
675                                                    SrcTy.getSimpleVT()))
676       return AdjustCost(Entry->Cost);
677   }
678 
679   // Scalar integer to float conversions.
680   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
681     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
682     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
683     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
684     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
685     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
686     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
687     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
688     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
689     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
690     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
691     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
692     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
693     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
694     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
695     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
696     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
697     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
698     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
699     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
700     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
701   };
702 
703   if (SrcTy.isInteger() && ST->hasNEON()) {
704     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
705                                                    ISD, DstTy.getSimpleVT(),
706                                                    SrcTy.getSimpleVT()))
707       return AdjustCost(Entry->Cost);
708   }
709 
710   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
711   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
712   // are linearised so take more.
713   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
714     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
715     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
716     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
717     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
718     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
719     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
720     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
721     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
722     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
723     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
724     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
725     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
726   };
727 
728   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
729     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
730                                                    ISD, DstTy.getSimpleVT(),
731                                                    SrcTy.getSimpleVT()))
732       return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
733   }
734 
735   if (ISD == ISD::FP_ROUND || ISD == ISD::FP_EXTEND) {
736     // As general rule, fp converts that were not matched above are scalarized
737     // and cost 1 vcvt for each lane, so long as the instruction is available.
738     // If not it will become a series of function calls.
739     const int CallCost = getCallInstrCost(nullptr, Dst, {Src}, CostKind);
740     int Lanes = 1;
741     if (SrcTy.isFixedLengthVector())
742       Lanes = SrcTy.getVectorNumElements();
743 
744     if (IsLegalFPType(SrcTy) && IsLegalFPType(DstTy))
745       return Lanes;
746     else
747       return Lanes * CallCost;
748   }
749 
750   // Scalar integer conversion costs.
751   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
752     // i16 -> i64 requires two dependent operations.
753     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
754 
755     // Truncates on i64 are assumed to be free.
756     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
757     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
758     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
759     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
760   };
761 
762   if (SrcTy.isInteger()) {
763     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
764                                                    DstTy.getSimpleVT(),
765                                                    SrcTy.getSimpleVT()))
766       return AdjustCost(Entry->Cost);
767   }
768 
769   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
770                      ? ST->getMVEVectorCostFactor()
771                      : 1;
772   return AdjustCost(
773       BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
774 }
775 
776 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
777                                    unsigned Index) {
778   // Penalize inserting into an D-subregister. We end up with a three times
779   // lower estimated throughput on swift.
780   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
781       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
782     return 3;
783 
784   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
785                         Opcode == Instruction::ExtractElement)) {
786     // Cross-class copies are expensive on many microarchitectures,
787     // so assume they are expensive by default.
788     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
789       return 3;
790 
791     // Even if it's not a cross class copy, this likely leads to mixing
792     // of NEON and VFP code and should be therefore penalized.
793     if (ValTy->isVectorTy() &&
794         ValTy->getScalarSizeInBits() <= 32)
795       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
796   }
797 
798   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
799                                  Opcode == Instruction::ExtractElement)) {
800     // We say MVE moves costs at least the MVEVectorCostFactor, even though
801     // they are scalar instructions. This helps prevent mixing scalar and
802     // vector, to prevent vectorising where we end up just scalarising the
803     // result anyway.
804     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
805                     ST->getMVEVectorCostFactor()) *
806            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
807   }
808 
809   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
810 }
811 
812 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
813                                    TTI::TargetCostKind CostKind,
814                                    const Instruction *I) {
815   int ISD = TLI->InstructionOpcodeToISD(Opcode);
816 
817   // Thumb scalar code size cost for select.
818   if (CostKind == TTI::TCK_CodeSize && ISD == ISD::SELECT &&
819       ST->isThumb() && !ValTy->isVectorTy()) {
820     // Assume expensive structs.
821     if (TLI->getValueType(DL, ValTy, true) == MVT::Other)
822       return TTI::TCC_Expensive;
823 
824     // Select costs can vary because they:
825     // - may require one or more conditional mov (including an IT),
826     // - can't operate directly on immediates,
827     // - require live flags, which we can't copy around easily.
828     int Cost = TLI->getTypeLegalizationCost(DL, ValTy).first;
829 
830     // Possible IT instruction for Thumb2, or more for Thumb1.
831     ++Cost;
832 
833     // i1 values may need rematerialising by using mov immediates and/or
834     // flag setting instructions.
835     if (ValTy->isIntegerTy(1))
836       ++Cost;
837 
838     return Cost;
839   }
840 
841   if (CostKind != TTI::TCK_RecipThroughput)
842     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
843 
844   // On NEON a vector select gets lowered to vbsl.
845   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
846     // Lowering of some vector selects is currently far from perfect.
847     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
848       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
849       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
850       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
851     };
852 
853     EVT SelCondTy = TLI->getValueType(DL, CondTy);
854     EVT SelValTy = TLI->getValueType(DL, ValTy);
855     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
856       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
857                                                      SelCondTy.getSimpleVT(),
858                                                      SelValTy.getSimpleVT()))
859         return Entry->Cost;
860     }
861 
862     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
863     return LT.first;
864   }
865 
866   int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
867                      ? ST->getMVEVectorCostFactor()
868                      : 1;
869   return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind,
870                                               I);
871 }
872 
873 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
874                                           const SCEV *Ptr) {
875   // Address computations in vectorized code with non-consecutive addresses will
876   // likely result in more instructions compared to scalar code where the
877   // computation can more often be merged into the index mode. The resulting
878   // extra micro-ops can significantly decrease throughput.
879   unsigned NumVectorInstToHideOverhead = 10;
880   int MaxMergeDistance = 64;
881 
882   if (ST->hasNEON()) {
883     if (Ty->isVectorTy() && SE &&
884         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
885       return NumVectorInstToHideOverhead;
886 
887     // In many cases the address computation is not merged into the instruction
888     // addressing mode.
889     return 1;
890   }
891   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
892 }
893 
894 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
895   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
896     // If a VCTP is part of a chain, it's already profitable and shouldn't be
897     // optimized, else LSR may block tail-predication.
898     switch (II->getIntrinsicID()) {
899     case Intrinsic::arm_mve_vctp8:
900     case Intrinsic::arm_mve_vctp16:
901     case Intrinsic::arm_mve_vctp32:
902     case Intrinsic::arm_mve_vctp64:
903       return true;
904     default:
905       break;
906     }
907   }
908   return false;
909 }
910 
911 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
912   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
913     return false;
914 
915   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
916     // Don't support v2i1 yet.
917     if (VecTy->getNumElements() == 2)
918       return false;
919 
920     // We don't support extending fp types.
921      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
922     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
923       return false;
924   }
925 
926   unsigned EltWidth = DataTy->getScalarSizeInBits();
927   return (EltWidth == 32 && Alignment >= 4) ||
928          (EltWidth == 16 && Alignment >= 2) || (EltWidth == 8);
929 }
930 
931 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, Align Alignment) {
932   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
933     return false;
934 
935   // This method is called in 2 places:
936   //  - from the vectorizer with a scalar type, in which case we need to get
937   //  this as good as we can with the limited info we have (and rely on the cost
938   //  model for the rest).
939   //  - from the masked intrinsic lowering pass with the actual vector type.
940   // For MVE, we have a custom lowering pass that will already have custom
941   // legalised any gathers that we can to MVE intrinsics, and want to expand all
942   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
943   // are here, we know we want to expand.
944   if (isa<VectorType>(Ty))
945     return false;
946 
947   unsigned EltWidth = Ty->getScalarSizeInBits();
948   return ((EltWidth == 32 && Alignment >= 4) ||
949           (EltWidth == 16 && Alignment >= 2) || EltWidth == 8);
950 }
951 
952 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
953   const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
954   assert(MI && "MemcpyInst expected");
955   ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
956 
957   // To model the cost of a library call, we assume 1 for the call, and
958   // 3 for the argument setup.
959   const unsigned LibCallCost = 4;
960 
961   // If 'size' is not a constant, a library call will be generated.
962   if (!C)
963     return LibCallCost;
964 
965   const unsigned Size = C->getValue().getZExtValue();
966   const Align DstAlign = *MI->getDestAlign();
967   const Align SrcAlign = *MI->getSourceAlign();
968   const Function *F = I->getParent()->getParent();
969   const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
970   std::vector<EVT> MemOps;
971 
972   // MemOps will be poplulated with a list of data types that needs to be
973   // loaded and stored. That's why we multiply the number of elements by 2 to
974   // get the cost for this memcpy.
975   if (getTLI()->findOptimalMemOpLowering(
976           MemOps, Limit,
977           MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
978                       /*IsVolatile*/ true),
979           MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
980           F->getAttributes()))
981     return MemOps.size() * 2;
982 
983   // If we can't find an optimal memop lowering, return the default cost
984   return LibCallCost;
985 }
986 
987 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
988                                int Index, VectorType *SubTp) {
989   if (ST->hasNEON()) {
990     if (Kind == TTI::SK_Broadcast) {
991       static const CostTblEntry NEONDupTbl[] = {
992           // VDUP handles these cases.
993           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
994           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
995           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
996           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
997           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
998           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
999 
1000           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1001           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1002           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1003           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
1004 
1005       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1006 
1007       if (const auto *Entry =
1008               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
1009         return LT.first * Entry->Cost;
1010     }
1011     if (Kind == TTI::SK_Reverse) {
1012       static const CostTblEntry NEONShuffleTbl[] = {
1013           // Reverse shuffle cost one instruction if we are shuffling within a
1014           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
1015           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1016           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1017           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1018           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1019           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
1020           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
1021 
1022           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1023           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1024           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
1025           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
1026 
1027       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1028 
1029       if (const auto *Entry =
1030               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
1031         return LT.first * Entry->Cost;
1032     }
1033     if (Kind == TTI::SK_Select) {
1034       static const CostTblEntry NEONSelShuffleTbl[] = {
1035           // Select shuffle cost table for ARM. Cost is the number of
1036           // instructions
1037           // required to create the shuffled vector.
1038 
1039           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
1040           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
1041           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
1042           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
1043 
1044           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
1045           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
1046           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
1047 
1048           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
1049 
1050           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
1051 
1052       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1053       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
1054                                               ISD::VECTOR_SHUFFLE, LT.second))
1055         return LT.first * Entry->Cost;
1056     }
1057   }
1058   if (ST->hasMVEIntegerOps()) {
1059     if (Kind == TTI::SK_Broadcast) {
1060       static const CostTblEntry MVEDupTbl[] = {
1061           // VDUP handles these cases.
1062           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
1063           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
1064           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
1065           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
1066           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
1067 
1068       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
1069 
1070       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
1071                                               LT.second))
1072         return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
1073     }
1074   }
1075   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
1076                      ? ST->getMVEVectorCostFactor()
1077                      : 1;
1078   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1079 }
1080 
1081 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
1082                                        TTI::TargetCostKind CostKind,
1083                                        TTI::OperandValueKind Op1Info,
1084                                        TTI::OperandValueKind Op2Info,
1085                                        TTI::OperandValueProperties Opd1PropInfo,
1086                                        TTI::OperandValueProperties Opd2PropInfo,
1087                                        ArrayRef<const Value *> Args,
1088                                        const Instruction *CxtI) {
1089   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
1090   if (ST->isThumb() && CostKind == TTI::TCK_CodeSize && Ty->isIntegerTy(1)) {
1091     // Make operations on i1 relatively expensive as this often involves
1092     // combining predicates. AND and XOR should be easier to handle with IT
1093     // blocks.
1094     switch (ISDOpcode) {
1095     default:
1096       break;
1097     case ISD::AND:
1098     case ISD::XOR:
1099       return 2;
1100     case ISD::OR:
1101       return 3;
1102     }
1103   }
1104 
1105   // TODO: Handle more cost kinds.
1106   if (CostKind != TTI::TCK_RecipThroughput)
1107     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1108                                          Op2Info, Opd1PropInfo,
1109                                          Opd2PropInfo, Args, CxtI);
1110 
1111   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
1112 
1113   if (ST->hasNEON()) {
1114     const unsigned FunctionCallDivCost = 20;
1115     const unsigned ReciprocalDivCost = 10;
1116     static const CostTblEntry CostTbl[] = {
1117       // Division.
1118       // These costs are somewhat random. Choose a cost of 20 to indicate that
1119       // vectorizing devision (added function call) is going to be very expensive.
1120       // Double registers types.
1121       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1122       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
1123       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
1124       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
1125       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1126       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
1127       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
1128       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
1129       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
1130       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
1131       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
1132       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
1133       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
1134       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
1135       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
1136       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
1137       // Quad register types.
1138       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1139       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
1140       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
1141       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
1142       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1143       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
1144       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
1145       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
1146       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1147       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
1148       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
1149       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
1150       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1151       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
1152       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
1153       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
1154       // Multiplication.
1155     };
1156 
1157     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
1158       return LT.first * Entry->Cost;
1159 
1160     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
1161                                              Op2Info,
1162                                              Opd1PropInfo, Opd2PropInfo);
1163 
1164     // This is somewhat of a hack. The problem that we are facing is that SROA
1165     // creates a sequence of shift, and, or instructions to construct values.
1166     // These sequences are recognized by the ISel and have zero-cost. Not so for
1167     // the vectorized code. Because we have support for v2i64 but not i64 those
1168     // sequences look particularly beneficial to vectorize.
1169     // To work around this we increase the cost of v2i64 operations to make them
1170     // seem less beneficial.
1171     if (LT.second == MVT::v2i64 &&
1172         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
1173       Cost += 4;
1174 
1175     return Cost;
1176   }
1177 
1178   // If this operation is a shift on arm/thumb2, it might well be folded into
1179   // the following instruction, hence having a cost of 0.
1180   auto LooksLikeAFreeShift = [&]() {
1181     if (ST->isThumb1Only() || Ty->isVectorTy())
1182       return false;
1183 
1184     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
1185       return false;
1186     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
1187       return false;
1188 
1189     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
1190     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
1191     case Instruction::Add:
1192     case Instruction::Sub:
1193     case Instruction::And:
1194     case Instruction::Xor:
1195     case Instruction::Or:
1196     case Instruction::ICmp:
1197       return true;
1198     default:
1199       return false;
1200     }
1201   };
1202   if (LooksLikeAFreeShift())
1203     return 0;
1204 
1205   int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
1206                      ? ST->getMVEVectorCostFactor()
1207                      : 1;
1208 
1209   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
1210   // without treating floats as more expensive that scalars or increasing the
1211   // costs for custom operations. The results is also multiplied by the
1212   // MVEVectorCostFactor where appropriate.
1213   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
1214     return LT.first * BaseCost;
1215 
1216   // Else this is expand, assume that we need to scalarize this op.
1217   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
1218     unsigned Num = VTy->getNumElements();
1219     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
1220                                            CostKind);
1221     // Return the cost of multiple scalar invocation plus the cost of
1222     // inserting and extracting the values.
1223     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
1224   }
1225 
1226   return BaseCost;
1227 }
1228 
1229 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
1230                                 MaybeAlign Alignment, unsigned AddressSpace,
1231                                 TTI::TargetCostKind CostKind,
1232                                 const Instruction *I) {
1233   // TODO: Handle other cost kinds.
1234   if (CostKind != TTI::TCK_RecipThroughput)
1235     return 1;
1236 
1237   // Type legalization can't handle structs
1238   if (TLI->getValueType(DL, Src, true) == MVT::Other)
1239     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1240                                   CostKind);
1241 
1242   if (ST->hasNEON() && Src->isVectorTy() &&
1243       (Alignment && *Alignment != Align(16)) &&
1244       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
1245     // Unaligned loads/stores are extremely inefficient.
1246     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
1247     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
1248     return LT.first * 4;
1249   }
1250 
1251   // MVE can optimize a fpext(load(4xhalf)) using an extending integer load.
1252   // Same for stores.
1253   if (ST->hasMVEFloatOps() && isa<FixedVectorType>(Src) && I &&
1254       ((Opcode == Instruction::Load && I->hasOneUse() &&
1255         isa<FPExtInst>(*I->user_begin())) ||
1256        (Opcode == Instruction::Store && isa<FPTruncInst>(I->getOperand(0))))) {
1257     FixedVectorType *SrcVTy = cast<FixedVectorType>(Src);
1258     Type *DstTy =
1259         Opcode == Instruction::Load
1260             ? (*I->user_begin())->getType()
1261             : cast<Instruction>(I->getOperand(0))->getOperand(0)->getType();
1262     if (SrcVTy->getNumElements() == 4 && SrcVTy->getScalarType()->isHalfTy() &&
1263         DstTy->getScalarType()->isFloatTy())
1264       return ST->getMVEVectorCostFactor();
1265   }
1266 
1267   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
1268                      ? ST->getMVEVectorCostFactor()
1269                      : 1;
1270   return BaseCost * BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
1271                                            CostKind, I);
1272 }
1273 
1274 int ARMTTIImpl::getInterleavedMemoryOpCost(
1275     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
1276     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
1277     bool UseMaskForCond, bool UseMaskForGaps) {
1278   assert(Factor >= 2 && "Invalid interleave factor");
1279   assert(isa<VectorType>(VecTy) && "Expect a vector type");
1280 
1281   // vldN/vstN doesn't support vector types of i64/f64 element.
1282   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
1283 
1284   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
1285       !UseMaskForCond && !UseMaskForGaps) {
1286     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
1287     auto *SubVecTy =
1288         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
1289 
1290     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
1291     // Accesses having vector types that are a multiple of 128 bits can be
1292     // matched to more than one vldN/vstN instruction.
1293     int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1;
1294     if (NumElts % Factor == 0 &&
1295         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL))
1296       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
1297 
1298     // Some smaller than legal interleaved patterns are cheap as we can make
1299     // use of the vmovn or vrev patterns to interleave a standard load. This is
1300     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
1301     // promoted differently). The cost of 2 here is then a load and vrev or
1302     // vmovn.
1303     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
1304         VecTy->isIntOrIntVectorTy() &&
1305         DL.getTypeSizeInBits(SubVecTy).getFixedSize() <= 64)
1306       return 2 * BaseCost;
1307   }
1308 
1309   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
1310                                            Alignment, AddressSpace, CostKind,
1311                                            UseMaskForCond, UseMaskForGaps);
1312 }
1313 
1314 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
1315                                             const Value *Ptr, bool VariableMask,
1316                                             Align Alignment,
1317                                             TTI::TargetCostKind CostKind,
1318                                             const Instruction *I) {
1319   using namespace PatternMatch;
1320   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
1321     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
1322                                          Alignment, CostKind, I);
1323 
1324   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
1325   auto *VTy = cast<FixedVectorType>(DataTy);
1326 
1327   // TODO: Splitting, once we do that.
1328 
1329   unsigned NumElems = VTy->getNumElements();
1330   unsigned EltSize = VTy->getScalarSizeInBits();
1331   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
1332 
1333   // For now, it is assumed that for the MVE gather instructions the loads are
1334   // all effectively serialised. This means the cost is the scalar cost
1335   // multiplied by the number of elements being loaded. This is possibly very
1336   // conservative, but even so we still end up vectorising loops because the
1337   // cost per iteration for many loops is lower than for scalar loops.
1338   unsigned VectorCost = NumElems * LT.first * ST->getMVEVectorCostFactor();
1339   // The scalarization cost should be a lot higher. We use the number of vector
1340   // elements plus the scalarization overhead.
1341   unsigned ScalarCost =
1342       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {});
1343 
1344   if (Alignment < EltSize / 8)
1345     return ScalarCost;
1346 
1347   unsigned ExtSize = EltSize;
1348   // Check whether there's a single user that asks for an extended type
1349   if (I != nullptr) {
1350     // Dependent of the caller of this function, a gather instruction will
1351     // either have opcode Instruction::Load or be a call to the masked_gather
1352     // intrinsic
1353     if ((I->getOpcode() == Instruction::Load ||
1354          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
1355         I->hasOneUse()) {
1356       const User *Us = *I->users().begin();
1357       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
1358         // only allow valid type combinations
1359         unsigned TypeSize =
1360             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1361         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1362              (TypeSize == 16 && EltSize == 8)) &&
1363             TypeSize * NumElems == 128) {
1364           ExtSize = TypeSize;
1365         }
1366       }
1367     }
1368     // Check whether the input data needs to be truncated
1369     TruncInst *T;
1370     if ((I->getOpcode() == Instruction::Store ||
1371          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1372         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1373       // Only allow valid type combinations
1374       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1375       if (((EltSize == 16 && TypeSize == 32) ||
1376            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1377           TypeSize * NumElems == 128)
1378         ExtSize = TypeSize;
1379     }
1380   }
1381 
1382   if (ExtSize * NumElems != 128 || NumElems < 4)
1383     return ScalarCost;
1384 
1385   // Any (aligned) i32 gather will not need to be scalarised.
1386   if (ExtSize == 32)
1387     return VectorCost;
1388   // For smaller types, we need to ensure that the gep's inputs are correctly
1389   // extended from a small enough value. Other sizes (including i64) are
1390   // scalarized for now.
1391   if (ExtSize != 8 && ExtSize != 16)
1392     return ScalarCost;
1393 
1394   if (const auto *BC = dyn_cast<BitCastInst>(Ptr))
1395     Ptr = BC->getOperand(0);
1396   if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1397     if (GEP->getNumOperands() != 2)
1398       return ScalarCost;
1399     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1400     // Scale needs to be correct (which is only relevant for i16s).
1401     if (Scale != 1 && Scale * 8 != ExtSize)
1402       return ScalarCost;
1403     // And we need to zext (not sext) the indexes from a small enough type.
1404     if (const auto *ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1405       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1406         return VectorCost;
1407     }
1408     return ScalarCost;
1409   }
1410   return ScalarCost;
1411 }
1412 
1413 int ARMTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
1414                                            bool IsPairwiseForm,
1415                                            TTI::TargetCostKind CostKind) {
1416   EVT ValVT = TLI->getValueType(DL, ValTy);
1417   int ISD = TLI->InstructionOpcodeToISD(Opcode);
1418   if (!ST->hasMVEIntegerOps() || !ValVT.isSimple() || ISD != ISD::ADD)
1419     return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1420                                              CostKind);
1421 
1422   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1423 
1424   static const CostTblEntry CostTblAdd[]{
1425       {ISD::ADD, MVT::v16i8, 1},
1426       {ISD::ADD, MVT::v8i16, 1},
1427       {ISD::ADD, MVT::v4i32, 1},
1428   };
1429   if (const auto *Entry = CostTableLookup(CostTblAdd, ISD, LT.second))
1430     return Entry->Cost * ST->getMVEVectorCostFactor() * LT.first;
1431 
1432   return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwiseForm,
1433                                            CostKind);
1434 }
1435 
1436 int ARMTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
1437                                       TTI::TargetCostKind CostKind) {
1438   // Currently we make a somewhat optimistic assumption that active_lane_mask's
1439   // are always free. In reality it may be freely folded into a tail predicated
1440   // loop, expanded into a VCPT or expanded into a lot of add/icmp code. We
1441   // may need to improve this in the future, but being able to detect if it
1442   // is free or not involves looking at a lot of other code. We currently assume
1443   // that the vectorizer inserted these, and knew what it was doing in adding
1444   // one.
1445   if (ST->hasMVEIntegerOps() && ICA.getID() == Intrinsic::get_active_lane_mask)
1446     return 0;
1447 
1448   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
1449 }
1450 
1451 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1452   if (!F->isIntrinsic())
1453     BaseT::isLoweredToCall(F);
1454 
1455   // Assume all Arm-specific intrinsics map to an instruction.
1456   if (F->getName().startswith("llvm.arm"))
1457     return false;
1458 
1459   switch (F->getIntrinsicID()) {
1460   default: break;
1461   case Intrinsic::powi:
1462   case Intrinsic::sin:
1463   case Intrinsic::cos:
1464   case Intrinsic::pow:
1465   case Intrinsic::log:
1466   case Intrinsic::log10:
1467   case Intrinsic::log2:
1468   case Intrinsic::exp:
1469   case Intrinsic::exp2:
1470     return true;
1471   case Intrinsic::sqrt:
1472   case Intrinsic::fabs:
1473   case Intrinsic::copysign:
1474   case Intrinsic::floor:
1475   case Intrinsic::ceil:
1476   case Intrinsic::trunc:
1477   case Intrinsic::rint:
1478   case Intrinsic::nearbyint:
1479   case Intrinsic::round:
1480   case Intrinsic::canonicalize:
1481   case Intrinsic::lround:
1482   case Intrinsic::llround:
1483   case Intrinsic::lrint:
1484   case Intrinsic::llrint:
1485     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1486       return true;
1487     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1488       return true;
1489     // Some operations can be handled by vector instructions and assume
1490     // unsupported vectors will be expanded into supported scalar ones.
1491     // TODO Handle scalar operations properly.
1492     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1493   case Intrinsic::masked_store:
1494   case Intrinsic::masked_load:
1495   case Intrinsic::masked_gather:
1496   case Intrinsic::masked_scatter:
1497     return !ST->hasMVEIntegerOps();
1498   case Intrinsic::sadd_with_overflow:
1499   case Intrinsic::uadd_with_overflow:
1500   case Intrinsic::ssub_with_overflow:
1501   case Intrinsic::usub_with_overflow:
1502   case Intrinsic::sadd_sat:
1503   case Intrinsic::uadd_sat:
1504   case Intrinsic::ssub_sat:
1505   case Intrinsic::usub_sat:
1506     return false;
1507   }
1508 
1509   return BaseT::isLoweredToCall(F);
1510 }
1511 
1512 bool ARMTTIImpl::maybeLoweredToCall(Instruction &I) {
1513   unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1514   EVT VT = TLI->getValueType(DL, I.getType(), true);
1515   if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1516     return true;
1517 
1518   // Check if an intrinsic will be lowered to a call and assume that any
1519   // other CallInst will generate a bl.
1520   if (auto *Call = dyn_cast<CallInst>(&I)) {
1521     if (isa<IntrinsicInst>(Call)) {
1522       if (const Function *F = Call->getCalledFunction())
1523         return isLoweredToCall(F);
1524     }
1525     return true;
1526   }
1527 
1528   // FPv5 provides conversions between integer, double-precision,
1529   // single-precision, and half-precision formats.
1530   switch (I.getOpcode()) {
1531   default:
1532     break;
1533   case Instruction::FPToSI:
1534   case Instruction::FPToUI:
1535   case Instruction::SIToFP:
1536   case Instruction::UIToFP:
1537   case Instruction::FPTrunc:
1538   case Instruction::FPExt:
1539     return !ST->hasFPARMv8Base();
1540   }
1541 
1542   // FIXME: Unfortunately the approach of checking the Operation Action does
1543   // not catch all cases of Legalization that use library calls. Our
1544   // Legalization step categorizes some transformations into library calls as
1545   // Custom, Expand or even Legal when doing type legalization. So for now
1546   // we have to special case for instance the SDIV of 64bit integers and the
1547   // use of floating point emulation.
1548   if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1549     switch (ISD) {
1550     default:
1551       break;
1552     case ISD::SDIV:
1553     case ISD::UDIV:
1554     case ISD::SREM:
1555     case ISD::UREM:
1556     case ISD::SDIVREM:
1557     case ISD::UDIVREM:
1558       return true;
1559     }
1560   }
1561 
1562   // Assume all other non-float operations are supported.
1563   if (!VT.isFloatingPoint())
1564     return false;
1565 
1566   // We'll need a library call to handle most floats when using soft.
1567   if (TLI->useSoftFloat()) {
1568     switch (I.getOpcode()) {
1569     default:
1570       return true;
1571     case Instruction::Alloca:
1572     case Instruction::Load:
1573     case Instruction::Store:
1574     case Instruction::Select:
1575     case Instruction::PHI:
1576       return false;
1577     }
1578   }
1579 
1580   // We'll need a libcall to perform double precision operations on a single
1581   // precision only FPU.
1582   if (I.getType()->isDoubleTy() && !ST->hasFP64())
1583     return true;
1584 
1585   // Likewise for half precision arithmetic.
1586   if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1587     return true;
1588 
1589   return false;
1590 }
1591 
1592 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1593                                           AssumptionCache &AC,
1594                                           TargetLibraryInfo *LibInfo,
1595                                           HardwareLoopInfo &HWLoopInfo) {
1596   // Low-overhead branches are only supported in the 'low-overhead branch'
1597   // extension of v8.1-m.
1598   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1599     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1600     return false;
1601   }
1602 
1603   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1604     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1605     return false;
1606   }
1607 
1608   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1609   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1610     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1611     return false;
1612   }
1613 
1614   const SCEV *TripCountSCEV =
1615     SE.getAddExpr(BackedgeTakenCount,
1616                   SE.getOne(BackedgeTakenCount->getType()));
1617 
1618   // We need to store the trip count in LR, a 32-bit register.
1619   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1620     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1621     return false;
1622   }
1623 
1624   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1625   // point in generating a hardware loop if that's going to happen.
1626 
1627   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1628     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1629       switch (Call->getIntrinsicID()) {
1630       default:
1631         break;
1632       case Intrinsic::set_loop_iterations:
1633       case Intrinsic::test_set_loop_iterations:
1634       case Intrinsic::loop_decrement:
1635       case Intrinsic::loop_decrement_reg:
1636         return true;
1637       }
1638     }
1639     return false;
1640   };
1641 
1642   // Scan the instructions to see if there's any that we know will turn into a
1643   // call or if this loop is already a low-overhead loop.
1644   auto ScanLoop = [&](Loop *L) {
1645     for (auto *BB : L->getBlocks()) {
1646       for (auto &I : *BB) {
1647         if (maybeLoweredToCall(I) || IsHardwareLoopIntrinsic(I)) {
1648           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1649           return false;
1650         }
1651       }
1652     }
1653     return true;
1654   };
1655 
1656   // Visit inner loops.
1657   for (auto Inner : *L)
1658     if (!ScanLoop(Inner))
1659       return false;
1660 
1661   if (!ScanLoop(L))
1662     return false;
1663 
1664   // TODO: Check whether the trip count calculation is expensive. If L is the
1665   // inner loop but we know it has a low trip count, calculating that trip
1666   // count (in the parent loop) may be detrimental.
1667 
1668   LLVMContext &C = L->getHeader()->getContext();
1669   HWLoopInfo.CounterInReg = true;
1670   HWLoopInfo.IsNestingLegal = false;
1671   HWLoopInfo.PerformEntryTest = true;
1672   HWLoopInfo.CountType = Type::getInt32Ty(C);
1673   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1674   return true;
1675 }
1676 
1677 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1678   // We don't allow icmp's, and because we only look at single block loops,
1679   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1680   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1681     return false;
1682 
1683   if (isa<FCmpInst>(&I))
1684     return false;
1685 
1686   // We could allow extending/narrowing FP loads/stores, but codegen is
1687   // too inefficient so reject this for now.
1688   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1689     return false;
1690 
1691   // Extends have to be extending-loads
1692   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1693     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1694       return false;
1695 
1696   // Truncs have to be narrowing-stores
1697   if (isa<TruncInst>(&I) )
1698     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1699       return false;
1700 
1701   return true;
1702 }
1703 
1704 // To set up a tail-predicated loop, we need to know the total number of
1705 // elements processed by that loop. Thus, we need to determine the element
1706 // size and:
1707 // 1) it should be uniform for all operations in the vector loop, so we
1708 //    e.g. don't want any widening/narrowing operations.
1709 // 2) it should be smaller than i64s because we don't have vector operations
1710 //    that work on i64s.
1711 // 3) we don't want elements to be reversed or shuffled, to make sure the
1712 //    tail-predication masks/predicates the right lanes.
1713 //
1714 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1715                                  const DataLayout &DL,
1716                                  const LoopAccessInfo *LAI) {
1717   LLVM_DEBUG(dbgs() << "Tail-predication: checking allowed instructions\n");
1718 
1719   // If there are live-out values, it is probably a reduction. We can predicate
1720   // most reduction operations freely under MVE using a combination of
1721   // prefer-predicated-reduction-select and inloop reductions. We limit this to
1722   // floating point and integer reductions, but don't check for operators
1723   // specifically here. If the value ends up not being a reduction (and so the
1724   // vectorizer cannot tailfold the loop), we should fall back to standard
1725   // vectorization automatically.
1726   SmallVector< Instruction *, 8 > LiveOuts;
1727   LiveOuts = llvm::findDefsUsedOutsideOfLoop(L);
1728   bool ReductionsDisabled =
1729       EnableTailPredication == TailPredication::EnabledNoReductions ||
1730       EnableTailPredication == TailPredication::ForceEnabledNoReductions;
1731 
1732   for (auto *I : LiveOuts) {
1733     if (!I->getType()->isIntegerTy() && !I->getType()->isFloatTy() &&
1734         !I->getType()->isHalfTy()) {
1735       LLVM_DEBUG(dbgs() << "Don't tail-predicate loop with non-integer/float "
1736                            "live-out value\n");
1737       return false;
1738     }
1739     if (ReductionsDisabled) {
1740       LLVM_DEBUG(dbgs() << "Reductions not enabled\n");
1741       return false;
1742     }
1743   }
1744 
1745   // Next, check that all instructions can be tail-predicated.
1746   PredicatedScalarEvolution PSE = LAI->getPSE();
1747   SmallVector<Instruction *, 16> LoadStores;
1748   int ICmpCount = 0;
1749 
1750   for (BasicBlock *BB : L->blocks()) {
1751     for (Instruction &I : BB->instructionsWithoutDebug()) {
1752       if (isa<PHINode>(&I))
1753         continue;
1754       if (!canTailPredicateInstruction(I, ICmpCount)) {
1755         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1756         return false;
1757       }
1758 
1759       Type *T  = I.getType();
1760       if (T->isPointerTy())
1761         T = T->getPointerElementType();
1762 
1763       if (T->getScalarSizeInBits() > 32) {
1764         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1765         return false;
1766       }
1767       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1768         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1769         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1770         if (NextStride == 1) {
1771           // TODO: for now only allow consecutive strides of 1. We could support
1772           // other strides as long as it is uniform, but let's keep it simple
1773           // for now.
1774           continue;
1775         } else if (NextStride == -1 ||
1776                    (NextStride == 2 && MVEMaxSupportedInterleaveFactor >= 2) ||
1777                    (NextStride == 4 && MVEMaxSupportedInterleaveFactor >= 4)) {
1778           LLVM_DEBUG(dbgs()
1779                      << "Consecutive strides of 2 found, vld2/vstr2 can't "
1780                         "be tail-predicated\n.");
1781           return false;
1782           // TODO: don't tail predicate if there is a reversed load?
1783         } else if (EnableMaskedGatherScatters) {
1784           // Gather/scatters do allow loading from arbitrary strides, at
1785           // least if they are loop invariant.
1786           // TODO: Loop variant strides should in theory work, too, but
1787           // this requires further testing.
1788           const SCEV *PtrScev =
1789               replaceSymbolicStrideSCEV(PSE, llvm::ValueToValueMap(), Ptr);
1790           if (auto AR = dyn_cast<SCEVAddRecExpr>(PtrScev)) {
1791             const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1792             if (PSE.getSE()->isLoopInvariant(Step, L))
1793               continue;
1794           }
1795         }
1796         LLVM_DEBUG(dbgs() << "Bad stride found, can't "
1797                              "tail-predicate\n.");
1798         return false;
1799       }
1800     }
1801   }
1802 
1803   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1804   return true;
1805 }
1806 
1807 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1808                                              ScalarEvolution &SE,
1809                                              AssumptionCache &AC,
1810                                              TargetLibraryInfo *TLI,
1811                                              DominatorTree *DT,
1812                                              const LoopAccessInfo *LAI) {
1813   if (!EnableTailPredication) {
1814     LLVM_DEBUG(dbgs() << "Tail-predication not enabled.\n");
1815     return false;
1816   }
1817 
1818   // Creating a predicated vector loop is the first step for generating a
1819   // tail-predicated hardware loop, for which we need the MVE masked
1820   // load/stores instructions:
1821   if (!ST->hasMVEIntegerOps())
1822     return false;
1823 
1824   // For now, restrict this to single block loops.
1825   if (L->getNumBlocks() > 1) {
1826     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1827                          "loop.\n");
1828     return false;
1829   }
1830 
1831   assert(L->isInnermost() && "preferPredicateOverEpilogue: inner-loop expected");
1832 
1833   HardwareLoopInfo HWLoopInfo(L);
1834   if (!HWLoopInfo.canAnalyze(*LI)) {
1835     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1836                          "analyzable.\n");
1837     return false;
1838   }
1839 
1840   // This checks if we have the low-overhead branch architecture
1841   // extension, and if we will create a hardware-loop:
1842   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1843     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1844                          "profitable.\n");
1845     return false;
1846   }
1847 
1848   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1849     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1850                          "a candidate.\n");
1851     return false;
1852   }
1853 
1854   return canTailPredicateLoop(L, LI, SE, DL, LAI);
1855 }
1856 
1857 bool ARMTTIImpl::emitGetActiveLaneMask() const {
1858   if (!ST->hasMVEIntegerOps() || !EnableTailPredication)
1859     return false;
1860 
1861   // Intrinsic @llvm.get.active.lane.mask is supported.
1862   // It is used in the MVETailPredication pass, which requires the number of
1863   // elements processed by this vector loop to setup the tail-predicated
1864   // loop.
1865   return true;
1866 }
1867 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1868                                          TTI::UnrollingPreferences &UP) {
1869   // Only currently enable these preferences for M-Class cores.
1870   if (!ST->isMClass())
1871     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
1872 
1873   // Disable loop unrolling for Oz and Os.
1874   UP.OptSizeThreshold = 0;
1875   UP.PartialOptSizeThreshold = 0;
1876   if (L->getHeader()->getParent()->hasOptSize())
1877     return;
1878 
1879   // Only enable on Thumb-2 targets.
1880   if (!ST->isThumb2())
1881     return;
1882 
1883   SmallVector<BasicBlock*, 4> ExitingBlocks;
1884   L->getExitingBlocks(ExitingBlocks);
1885   LLVM_DEBUG(dbgs() << "Loop has:\n"
1886                     << "Blocks: " << L->getNumBlocks() << "\n"
1887                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
1888 
1889   // Only allow another exit other than the latch. This acts as an early exit
1890   // as it mirrors the profitability calculation of the runtime unroller.
1891   if (ExitingBlocks.size() > 2)
1892     return;
1893 
1894   // Limit the CFG of the loop body for targets with a branch predictor.
1895   // Allowing 4 blocks permits if-then-else diamonds in the body.
1896   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
1897     return;
1898 
1899   // Scan the loop: don't unroll loops with calls as this could prevent
1900   // inlining.
1901   unsigned Cost = 0;
1902   for (auto *BB : L->getBlocks()) {
1903     for (auto &I : *BB) {
1904       // Don't unroll vectorised loop. MVE does not benefit from it as much as
1905       // scalar code.
1906       if (I.getType()->isVectorTy())
1907         return;
1908 
1909       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1910         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1911           if (!isLoweredToCall(F))
1912             continue;
1913         }
1914         return;
1915       }
1916 
1917       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1918                                             I.value_op_end());
1919       Cost +=
1920         getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
1921     }
1922   }
1923 
1924   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
1925 
1926   UP.Partial = true;
1927   UP.Runtime = true;
1928   UP.UpperBound = true;
1929   UP.UnrollRemainder = true;
1930   UP.DefaultUnrollRuntimeCount = 4;
1931   UP.UnrollAndJam = true;
1932   UP.UnrollAndJamInnerLoopThreshold = 60;
1933 
1934   // Force unrolling small loops can be very useful because of the branch
1935   // taken cost of the backedge.
1936   if (Cost < 12)
1937     UP.Force = true;
1938 }
1939 
1940 void ARMTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
1941                                        TTI::PeelingPreferences &PP) {
1942   BaseT::getPeelingPreferences(L, SE, PP);
1943 }
1944 
1945 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1946                                        TTI::ReductionFlags Flags) const {
1947   return ST->hasMVEIntegerOps();
1948 }
1949 
1950 bool ARMTTIImpl::preferInLoopReduction(unsigned Opcode, Type *Ty,
1951                                        TTI::ReductionFlags Flags) const {
1952   if (!ST->hasMVEIntegerOps())
1953     return false;
1954 
1955   unsigned ScalarBits = Ty->getScalarSizeInBits();
1956   switch (Opcode) {
1957   case Instruction::Add:
1958     return ScalarBits <= 32;
1959   default:
1960     return false;
1961   }
1962 }
1963 
1964 bool ARMTTIImpl::preferPredicatedReductionSelect(
1965     unsigned Opcode, Type *Ty, TTI::ReductionFlags Flags) const {
1966   if (!ST->hasMVEIntegerOps())
1967     return false;
1968   return true;
1969 }
1970