1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/IntrinsicsARM.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/MC/SubtargetFeature.h"
28 #include "llvm/Support/Casting.h"
29 #include "llvm/Support/MachineValueType.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include <algorithm>
32 #include <cassert>
33 #include <cstdint>
34 #include <utility>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "armtti"
39 
40 static cl::opt<bool> EnableMaskedLoadStores(
41   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
42   cl::desc("Enable the generation of masked loads and stores"));
43 
44 static cl::opt<bool> DisableLowOverheadLoops(
45   "disable-arm-loloops", cl::Hidden, cl::init(false),
46   cl::desc("Disable the generation of low-overhead loops"));
47 
48 extern cl::opt<bool> DisableTailPredication;
49 
50 extern cl::opt<bool> EnableMaskedGatherScatters;
51 
52 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
53                                      const Function *Callee) const {
54   const TargetMachine &TM = getTLI()->getTargetMachine();
55   const FeatureBitset &CallerBits =
56       TM.getSubtargetImpl(*Caller)->getFeatureBits();
57   const FeatureBitset &CalleeBits =
58       TM.getSubtargetImpl(*Callee)->getFeatureBits();
59 
60   // To inline a callee, all features not in the whitelist must match exactly.
61   bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
62                     (CalleeBits & ~InlineFeatureWhitelist);
63   // For features in the whitelist, the callee's features must be a subset of
64   // the callers'.
65   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
66                      (CalleeBits & InlineFeatureWhitelist);
67   return MatchExact && MatchSubset;
68 }
69 
70 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
71   if (L->getHeader()->getParent()->hasOptSize())
72     return false;
73   if (ST->hasMVEIntegerOps())
74     return false;
75   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
76 }
77 
78 bool ARMTTIImpl::shouldFavorPostInc() const {
79   if (ST->hasMVEIntegerOps())
80     return true;
81   return false;
82 }
83 
84 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
85                               TTI::TargetCostKind CostKind) {
86   assert(Ty->isIntegerTy());
87 
88  unsigned Bits = Ty->getPrimitiveSizeInBits();
89  if (Bits == 0 || Imm.getActiveBits() >= 64)
90    return 4;
91 
92   int64_t SImmVal = Imm.getSExtValue();
93   uint64_t ZImmVal = Imm.getZExtValue();
94   if (!ST->isThumb()) {
95     if ((SImmVal >= 0 && SImmVal < 65536) ||
96         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
97         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
98       return 1;
99     return ST->hasV6T2Ops() ? 2 : 3;
100   }
101   if (ST->isThumb2()) {
102     if ((SImmVal >= 0 && SImmVal < 65536) ||
103         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
104         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
105       return 1;
106     return ST->hasV6T2Ops() ? 2 : 3;
107   }
108   // Thumb1, any i8 imm cost 1.
109   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
110     return 1;
111   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
112     return 2;
113   // Load from constantpool.
114   return 3;
115 }
116 
117 // Constants smaller than 256 fit in the immediate field of
118 // Thumb1 instructions so we return a zero cost and 1 otherwise.
119 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
120                                       const APInt &Imm, Type *Ty) {
121   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
122     return 0;
123 
124   return 1;
125 }
126 
127 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
128                                   Type *Ty, TTI::TargetCostKind CostKind) {
129   // Division by a constant can be turned into multiplication, but only if we
130   // know it's constant. So it's not so much that the immediate is cheap (it's
131   // not), but that the alternative is worse.
132   // FIXME: this is probably unneeded with GlobalISel.
133   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
134        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
135       Idx == 1)
136     return 0;
137 
138   if (Opcode == Instruction::And) {
139     // UXTB/UXTH
140     if (Imm == 255 || Imm == 65535)
141       return 0;
142     // Conversion to BIC is free, and means we can use ~Imm instead.
143     return std::min(getIntImmCost(Imm, Ty, CostKind),
144                     getIntImmCost(~Imm, Ty, CostKind));
145   }
146 
147   if (Opcode == Instruction::Add)
148     // Conversion to SUB is free, and means we can use -Imm instead.
149     return std::min(getIntImmCost(Imm, Ty, CostKind),
150                     getIntImmCost(-Imm, Ty, CostKind));
151 
152   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
153       Ty->getIntegerBitWidth() == 32) {
154     int64_t NegImm = -Imm.getSExtValue();
155     if (ST->isThumb2() && NegImm < 1<<12)
156       // icmp X, #-C -> cmn X, #C
157       return 0;
158     if (ST->isThumb() && NegImm < 1<<8)
159       // icmp X, #-C -> adds X, #C
160       return 0;
161   }
162 
163   // xor a, -1 can always be folded to MVN
164   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
165     return 0;
166 
167   return getIntImmCost(Imm, Ty, CostKind);
168 }
169 
170 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
171                                  TTI::TargetCostKind CostKind,
172                                  const Instruction *I) {
173   int ISD = TLI->InstructionOpcodeToISD(Opcode);
174   assert(ISD && "Invalid opcode");
175 
176   // TODO: Allow non-throughput costs that aren't binary.
177   auto AdjustCost = [&CostKind](int Cost) {
178     if (CostKind != TTI::TCK_RecipThroughput)
179       return Cost == 0 ? 0 : 1;
180     return Cost;
181   };
182 
183   // Single to/from double precision conversions.
184   static const CostTblEntry NEONFltDblTbl[] = {
185     // Vector fptrunc/fpext conversions.
186     { ISD::FP_ROUND,   MVT::v2f64, 2 },
187     { ISD::FP_EXTEND,  MVT::v2f32, 2 },
188     { ISD::FP_EXTEND,  MVT::v4f32, 4 }
189   };
190 
191   if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
192                                           ISD == ISD::FP_EXTEND)) {
193     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
194     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
195       return AdjustCost(LT.first * Entry->Cost);
196   }
197 
198   EVT SrcTy = TLI->getValueType(DL, Src);
199   EVT DstTy = TLI->getValueType(DL, Dst);
200 
201   if (!SrcTy.isSimple() || !DstTy.isSimple())
202     return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I));
203 
204   // The extend of a load is free
205   if (I && isa<LoadInst>(I->getOperand(0))) {
206     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
207         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
208         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
209         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
210         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
211         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
212         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
213         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
214         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
215         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
216         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
217         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
218         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
219     };
220     if (const auto *Entry = ConvertCostTableLookup(
221             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
222       return AdjustCost(Entry->Cost);
223 
224     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
225         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
226         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
227         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
228         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
229         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
230         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
231     };
232     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
233       if (const auto *Entry =
234               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
235                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
236         return AdjustCost(Entry->Cost);
237     }
238   }
239 
240   // NEON vector operations that can extend their inputs.
241   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
242       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
243     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
244       // vaddl
245       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
246       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
247       // vsubl
248       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
249       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
250       // vmull
251       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
252       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
253       // vshll
254       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
255       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
256     };
257 
258     auto *User = cast<Instruction>(*I->user_begin());
259     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
260     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
261                                              DstTy.getSimpleVT(),
262                                              SrcTy.getSimpleVT())) {
263       return AdjustCost(Entry->Cost);
264     }
265   }
266 
267   // Some arithmetic, load and store operations have specific instructions
268   // to cast up/down their types automatically at no extra cost.
269   // TODO: Get these tables to know at least what the related operations are.
270   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
271     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
272     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
273     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
274     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
275     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
276     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
277 
278     // The number of vmovl instructions for the extension.
279     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
280     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
281     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
282     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
283     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
284     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
285     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
286     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
287     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
288     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
289     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
290     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
291     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
292     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
293     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
294     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
295     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
296     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
297 
298     // Operations that we legalize using splitting.
299     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
300     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
301 
302     // Vector float <-> i32 conversions.
303     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
304     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
305 
306     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
307     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
308     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
309     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
310     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
311     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
312     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
313     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
314     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
315     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
316     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
317     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
318     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
319     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
320     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
321     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
322     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
323     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
324     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
325     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
326 
327     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
328     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
329     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
330     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
331     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
332     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
333 
334     // Vector double <-> i32 conversions.
335     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
336     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
337 
338     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
339     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
340     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
341     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
342     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
343     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
344 
345     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
346     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
347     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
348     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
349     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
350     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
351   };
352 
353   if (SrcTy.isVector() && ST->hasNEON()) {
354     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
355                                                    DstTy.getSimpleVT(),
356                                                    SrcTy.getSimpleVT()))
357       return AdjustCost(Entry->Cost);
358   }
359 
360   // Scalar float to integer conversions.
361   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
362     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
363     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
364     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
365     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
366     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
367     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
368     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
369     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
370     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
371     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
372     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
373     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
374     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
375     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
376     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
377     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
378     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
379     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
380     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
381     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
382   };
383   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
384     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
385                                                    DstTy.getSimpleVT(),
386                                                    SrcTy.getSimpleVT()))
387       return AdjustCost(Entry->Cost);
388   }
389 
390   // Scalar integer to float conversions.
391   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
392     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
393     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
394     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
395     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
396     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
397     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
398     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
399     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
400     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
401     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
402     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
403     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
404     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
405     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
406     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
407     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
408     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
409     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
410     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
411     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
412   };
413 
414   if (SrcTy.isInteger() && ST->hasNEON()) {
415     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
416                                                    ISD, DstTy.getSimpleVT(),
417                                                    SrcTy.getSimpleVT()))
418       return AdjustCost(Entry->Cost);
419   }
420 
421   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
422   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
423   // are linearised so take more.
424   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
425     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
426     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
427     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
428     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
429     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
430     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
431     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
432     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
433     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
434     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
435     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
436     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
437   };
438 
439   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
440     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
441                                                    ISD, DstTy.getSimpleVT(),
442                                                    SrcTy.getSimpleVT()))
443       return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
444   }
445 
446   // Scalar integer conversion costs.
447   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
448     // i16 -> i64 requires two dependent operations.
449     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
450 
451     // Truncates on i64 are assumed to be free.
452     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
453     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
454     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
455     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
456   };
457 
458   if (SrcTy.isInteger()) {
459     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
460                                                    DstTy.getSimpleVT(),
461                                                    SrcTy.getSimpleVT()))
462       return AdjustCost(Entry->Cost);
463   }
464 
465   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
466                      ? ST->getMVEVectorCostFactor()
467                      : 1;
468   return AdjustCost(
469     BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I));
470 }
471 
472 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
473                                    unsigned Index) {
474   // Penalize inserting into an D-subregister. We end up with a three times
475   // lower estimated throughput on swift.
476   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
477       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
478     return 3;
479 
480   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
481                         Opcode == Instruction::ExtractElement)) {
482     // Cross-class copies are expensive on many microarchitectures,
483     // so assume they are expensive by default.
484     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
485       return 3;
486 
487     // Even if it's not a cross class copy, this likely leads to mixing
488     // of NEON and VFP code and should be therefore penalized.
489     if (ValTy->isVectorTy() &&
490         ValTy->getScalarSizeInBits() <= 32)
491       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
492   }
493 
494   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
495                                  Opcode == Instruction::ExtractElement)) {
496     // We say MVE moves costs at least the MVEVectorCostFactor, even though
497     // they are scalar instructions. This helps prevent mixing scalar and
498     // vector, to prevent vectorising where we end up just scalarising the
499     // result anyway.
500     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
501                     ST->getMVEVectorCostFactor()) *
502            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
503   }
504 
505   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
506 }
507 
508 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
509                                    TTI::TargetCostKind CostKind,
510                                    const Instruction *I) {
511   // TODO: Handle other cost kinds.
512   if (CostKind != TTI::TCK_RecipThroughput)
513     return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind, I);
514 
515   int ISD = TLI->InstructionOpcodeToISD(Opcode);
516   // On NEON a vector select gets lowered to vbsl.
517   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
518     // Lowering of some vector selects is currently far from perfect.
519     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
520       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
521       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
522       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
523     };
524 
525     EVT SelCondTy = TLI->getValueType(DL, CondTy);
526     EVT SelValTy = TLI->getValueType(DL, ValTy);
527     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
528       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
529                                                      SelCondTy.getSimpleVT(),
530                                                      SelValTy.getSimpleVT()))
531         return Entry->Cost;
532     }
533 
534     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
535     return LT.first;
536   }
537 
538   int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
539                      ? ST->getMVEVectorCostFactor()
540                      : 1;
541   return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind,
542                                               I);
543 }
544 
545 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
546                                           const SCEV *Ptr) {
547   // Address computations in vectorized code with non-consecutive addresses will
548   // likely result in more instructions compared to scalar code where the
549   // computation can more often be merged into the index mode. The resulting
550   // extra micro-ops can significantly decrease throughput.
551   unsigned NumVectorInstToHideOverhead = 10;
552   int MaxMergeDistance = 64;
553 
554   if (ST->hasNEON()) {
555     if (Ty->isVectorTy() && SE &&
556         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
557       return NumVectorInstToHideOverhead;
558 
559     // In many cases the address computation is not merged into the instruction
560     // addressing mode.
561     return 1;
562   }
563   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
564 }
565 
566 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
567   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
568     // If a VCTP is part of a chain, it's already profitable and shouldn't be
569     // optimized, else LSR may block tail-predication.
570     switch (II->getIntrinsicID()) {
571     case Intrinsic::arm_mve_vctp8:
572     case Intrinsic::arm_mve_vctp16:
573     case Intrinsic::arm_mve_vctp32:
574     case Intrinsic::arm_mve_vctp64:
575       return true;
576     default:
577       break;
578     }
579   }
580   return false;
581 }
582 
583 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
584   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
585     return false;
586 
587   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
588     // Don't support v2i1 yet.
589     if (VecTy->getNumElements() == 2)
590       return false;
591 
592     // We don't support extending fp types.
593      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
594     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
595       return false;
596   }
597 
598   unsigned EltWidth = DataTy->getScalarSizeInBits();
599   return (EltWidth == 32 && (!Alignment || *Alignment >= 4)) ||
600          (EltWidth == 16 && (!Alignment || *Alignment >= 2)) ||
601          (EltWidth == 8);
602 }
603 
604 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) {
605   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
606     return false;
607 
608   // This method is called in 2 places:
609   //  - from the vectorizer with a scalar type, in which case we need to get
610   //  this as good as we can with the limited info we have (and rely on the cost
611   //  model for the rest).
612   //  - from the masked intrinsic lowering pass with the actual vector type.
613   // For MVE, we have a custom lowering pass that will already have custom
614   // legalised any gathers that we can to MVE intrinsics, and want to expand all
615   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
616   // are here, we know we want to expand.
617   if (isa<VectorType>(Ty))
618     return false;
619 
620   unsigned EltWidth = Ty->getScalarSizeInBits();
621   return ((EltWidth == 32 && (!Alignment || *Alignment >= 4)) ||
622           (EltWidth == 16 && (!Alignment || *Alignment >= 2)) || EltWidth == 8);
623 }
624 
625 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
626   const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
627   assert(MI && "MemcpyInst expected");
628   ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
629 
630   // To model the cost of a library call, we assume 1 for the call, and
631   // 3 for the argument setup.
632   const unsigned LibCallCost = 4;
633 
634   // If 'size' is not a constant, a library call will be generated.
635   if (!C)
636     return LibCallCost;
637 
638   const unsigned Size = C->getValue().getZExtValue();
639   const Align DstAlign = *MI->getDestAlign();
640   const Align SrcAlign = *MI->getSourceAlign();
641   const Function *F = I->getParent()->getParent();
642   const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
643   std::vector<EVT> MemOps;
644 
645   // MemOps will be poplulated with a list of data types that needs to be
646   // loaded and stored. That's why we multiply the number of elements by 2 to
647   // get the cost for this memcpy.
648   if (getTLI()->findOptimalMemOpLowering(
649           MemOps, Limit,
650           MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
651                       /*IsVolatile*/ true),
652           MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
653           F->getAttributes()))
654     return MemOps.size() * 2;
655 
656   // If we can't find an optimal memop lowering, return the default cost
657   return LibCallCost;
658 }
659 
660 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
661                                int Index, VectorType *SubTp) {
662   if (ST->hasNEON()) {
663     if (Kind == TTI::SK_Broadcast) {
664       static const CostTblEntry NEONDupTbl[] = {
665           // VDUP handles these cases.
666           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
667           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
668           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
669           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
670           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
671           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
672 
673           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
674           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
675           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
676           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
677 
678       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
679 
680       if (const auto *Entry =
681               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
682         return LT.first * Entry->Cost;
683     }
684     if (Kind == TTI::SK_Reverse) {
685       static const CostTblEntry NEONShuffleTbl[] = {
686           // Reverse shuffle cost one instruction if we are shuffling within a
687           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
688           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
689           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
690           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
691           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
692           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
693           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
694 
695           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
696           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
697           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
698           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
699 
700       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
701 
702       if (const auto *Entry =
703               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
704         return LT.first * Entry->Cost;
705     }
706     if (Kind == TTI::SK_Select) {
707       static const CostTblEntry NEONSelShuffleTbl[] = {
708           // Select shuffle cost table for ARM. Cost is the number of
709           // instructions
710           // required to create the shuffled vector.
711 
712           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
713           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
714           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
715           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
716 
717           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
718           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
719           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
720 
721           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
722 
723           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
724 
725       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
726       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
727                                               ISD::VECTOR_SHUFFLE, LT.second))
728         return LT.first * Entry->Cost;
729     }
730   }
731   if (ST->hasMVEIntegerOps()) {
732     if (Kind == TTI::SK_Broadcast) {
733       static const CostTblEntry MVEDupTbl[] = {
734           // VDUP handles these cases.
735           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
736           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
737           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
738           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
739           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
740 
741       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
742 
743       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
744                                               LT.second))
745         return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
746     }
747   }
748   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
749                      ? ST->getMVEVectorCostFactor()
750                      : 1;
751   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
752 }
753 
754 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
755                                        TTI::TargetCostKind CostKind,
756                                        TTI::OperandValueKind Op1Info,
757                                        TTI::OperandValueKind Op2Info,
758                                        TTI::OperandValueProperties Opd1PropInfo,
759                                        TTI::OperandValueProperties Opd2PropInfo,
760                                        ArrayRef<const Value *> Args,
761                                        const Instruction *CxtI) {
762   // TODO: Handle more cost kinds.
763   if (CostKind != TTI::TCK_RecipThroughput)
764     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
765                                          Op2Info, Opd1PropInfo,
766                                          Opd2PropInfo, Args, CxtI);
767 
768   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
769   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
770 
771   if (ST->hasNEON()) {
772     const unsigned FunctionCallDivCost = 20;
773     const unsigned ReciprocalDivCost = 10;
774     static const CostTblEntry CostTbl[] = {
775       // Division.
776       // These costs are somewhat random. Choose a cost of 20 to indicate that
777       // vectorizing devision (added function call) is going to be very expensive.
778       // Double registers types.
779       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
780       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
781       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
782       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
783       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
784       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
785       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
786       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
787       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
788       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
789       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
790       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
791       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
792       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
793       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
794       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
795       // Quad register types.
796       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
797       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
798       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
799       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
800       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
801       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
802       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
803       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
804       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
805       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
806       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
807       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
808       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
809       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
810       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
811       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
812       // Multiplication.
813     };
814 
815     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
816       return LT.first * Entry->Cost;
817 
818     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
819                                              Op2Info,
820                                              Opd1PropInfo, Opd2PropInfo);
821 
822     // This is somewhat of a hack. The problem that we are facing is that SROA
823     // creates a sequence of shift, and, or instructions to construct values.
824     // These sequences are recognized by the ISel and have zero-cost. Not so for
825     // the vectorized code. Because we have support for v2i64 but not i64 those
826     // sequences look particularly beneficial to vectorize.
827     // To work around this we increase the cost of v2i64 operations to make them
828     // seem less beneficial.
829     if (LT.second == MVT::v2i64 &&
830         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
831       Cost += 4;
832 
833     return Cost;
834   }
835 
836   // If this operation is a shift on arm/thumb2, it might well be folded into
837   // the following instruction, hence having a cost of 0.
838   auto LooksLikeAFreeShift = [&]() {
839     if (ST->isThumb1Only() || Ty->isVectorTy())
840       return false;
841 
842     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
843       return false;
844     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
845       return false;
846 
847     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
848     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
849     case Instruction::Add:
850     case Instruction::Sub:
851     case Instruction::And:
852     case Instruction::Xor:
853     case Instruction::Or:
854     case Instruction::ICmp:
855       return true;
856     default:
857       return false;
858     }
859   };
860   if (LooksLikeAFreeShift())
861     return 0;
862 
863   int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
864                      ? ST->getMVEVectorCostFactor()
865                      : 1;
866 
867   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
868   // without treating floats as more expensive that scalars or increasing the
869   // costs for custom operations. The results is also multiplied by the
870   // MVEVectorCostFactor where appropriate.
871   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
872     return LT.first * BaseCost;
873 
874   // Else this is expand, assume that we need to scalarize this op.
875   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
876     unsigned Num = VTy->getNumElements();
877     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
878                                            CostKind);
879     // Return the cost of multiple scalar invocation plus the cost of
880     // inserting and extracting the values.
881     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
882   }
883 
884   return BaseCost;
885 }
886 
887 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
888                                 MaybeAlign Alignment, unsigned AddressSpace,
889                                 TTI::TargetCostKind CostKind,
890                                 const Instruction *I) {
891   // TODO: Handle other cost kinds.
892   if (CostKind != TTI::TCK_RecipThroughput)
893     return 1;
894 
895   // Type legalization can't handle structs
896   if (TLI->getValueType(DL, Src,  true) == MVT::Other)
897     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
898                                   CostKind);
899 
900   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
901 
902   if (ST->hasNEON() && Src->isVectorTy() &&
903       (Alignment && *Alignment != Align(16)) &&
904       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
905     // Unaligned loads/stores are extremely inefficient.
906     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
907     return LT.first * 4;
908   }
909   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
910                      ? ST->getMVEVectorCostFactor()
911                      : 1;
912   return BaseCost * LT.first;
913 }
914 
915 int ARMTTIImpl::getInterleavedMemoryOpCost(
916     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
917     unsigned Alignment, unsigned AddressSpace,
918     TTI::TargetCostKind CostKind,
919     bool UseMaskForCond, bool UseMaskForGaps) {
920   assert(Factor >= 2 && "Invalid interleave factor");
921   assert(isa<VectorType>(VecTy) && "Expect a vector type");
922 
923   // vldN/vstN doesn't support vector types of i64/f64 element.
924   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
925 
926   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
927       !UseMaskForCond && !UseMaskForGaps) {
928     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
929     auto *SubVecTy =
930         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
931 
932     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
933     // Accesses having vector types that are a multiple of 128 bits can be
934     // matched to more than one vldN/vstN instruction.
935     int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1;
936     if (NumElts % Factor == 0 &&
937         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL))
938       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
939 
940     // Some smaller than legal interleaved patterns are cheap as we can make
941     // use of the vmovn or vrev patterns to interleave a standard load. This is
942     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
943     // promoted differently). The cost of 2 here is then a load and vrev or
944     // vmovn.
945     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
946         VecTy->isIntOrIntVectorTy() && DL.getTypeSizeInBits(SubVecTy) <= 64)
947       return 2 * BaseCost;
948   }
949 
950   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
951                                            Alignment, AddressSpace, CostKind,
952                                            UseMaskForCond, UseMaskForGaps);
953 }
954 
955 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
956                                             Value *Ptr, bool VariableMask,
957                                             unsigned Alignment,
958                                             TTI::TargetCostKind CostKind,
959                                             const Instruction *I) {
960   using namespace PatternMatch;
961   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
962     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
963                                          Alignment, CostKind, I);
964 
965   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
966   auto *VTy = cast<FixedVectorType>(DataTy);
967 
968   // TODO: Splitting, once we do that.
969 
970   unsigned NumElems = VTy->getNumElements();
971   unsigned EltSize = VTy->getScalarSizeInBits();
972   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
973 
974   // For now, it is assumed that for the MVE gather instructions the loads are
975   // all effectively serialised. This means the cost is the scalar cost
976   // multiplied by the number of elements being loaded. This is possibly very
977   // conservative, but even so we still end up vectorising loops because the
978   // cost per iteration for many loops is lower than for scalar loops.
979   unsigned VectorCost = NumElems * LT.first;
980   // The scalarization cost should be a lot higher. We use the number of vector
981   // elements plus the scalarization overhead.
982   unsigned ScalarCost =
983       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {});
984 
985   if (Alignment < EltSize / 8)
986     return ScalarCost;
987 
988   unsigned ExtSize = EltSize;
989   // Check whether there's a single user that asks for an extended type
990   if (I != nullptr) {
991     // Dependent of the caller of this function, a gather instruction will
992     // either have opcode Instruction::Load or be a call to the masked_gather
993     // intrinsic
994     if ((I->getOpcode() == Instruction::Load ||
995          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
996         I->hasOneUse()) {
997       const User *Us = *I->users().begin();
998       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
999         // only allow valid type combinations
1000         unsigned TypeSize =
1001             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
1002         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
1003              (TypeSize == 16 && EltSize == 8)) &&
1004             TypeSize * NumElems == 128) {
1005           ExtSize = TypeSize;
1006         }
1007       }
1008     }
1009     // Check whether the input data needs to be truncated
1010     TruncInst *T;
1011     if ((I->getOpcode() == Instruction::Store ||
1012          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
1013         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
1014       // Only allow valid type combinations
1015       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
1016       if (((EltSize == 16 && TypeSize == 32) ||
1017            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
1018           TypeSize * NumElems == 128)
1019         ExtSize = TypeSize;
1020     }
1021   }
1022 
1023   if (ExtSize * NumElems != 128 || NumElems < 4)
1024     return ScalarCost;
1025 
1026   // Any (aligned) i32 gather will not need to be scalarised.
1027   if (ExtSize == 32)
1028     return VectorCost;
1029   // For smaller types, we need to ensure that the gep's inputs are correctly
1030   // extended from a small enough value. Other sizes (including i64) are
1031   // scalarized for now.
1032   if (ExtSize != 8 && ExtSize != 16)
1033     return ScalarCost;
1034 
1035   if (auto BC = dyn_cast<BitCastInst>(Ptr))
1036     Ptr = BC->getOperand(0);
1037   if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1038     if (GEP->getNumOperands() != 2)
1039       return ScalarCost;
1040     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1041     // Scale needs to be correct (which is only relevant for i16s).
1042     if (Scale != 1 && Scale * 8 != ExtSize)
1043       return ScalarCost;
1044     // And we need to zext (not sext) the indexes from a small enough type.
1045     if (auto ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1046       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1047         return VectorCost;
1048     }
1049     return ScalarCost;
1050   }
1051   return ScalarCost;
1052 }
1053 
1054 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1055   if (!F->isIntrinsic())
1056     BaseT::isLoweredToCall(F);
1057 
1058   // Assume all Arm-specific intrinsics map to an instruction.
1059   if (F->getName().startswith("llvm.arm"))
1060     return false;
1061 
1062   switch (F->getIntrinsicID()) {
1063   default: break;
1064   case Intrinsic::powi:
1065   case Intrinsic::sin:
1066   case Intrinsic::cos:
1067   case Intrinsic::pow:
1068   case Intrinsic::log:
1069   case Intrinsic::log10:
1070   case Intrinsic::log2:
1071   case Intrinsic::exp:
1072   case Intrinsic::exp2:
1073     return true;
1074   case Intrinsic::sqrt:
1075   case Intrinsic::fabs:
1076   case Intrinsic::copysign:
1077   case Intrinsic::floor:
1078   case Intrinsic::ceil:
1079   case Intrinsic::trunc:
1080   case Intrinsic::rint:
1081   case Intrinsic::nearbyint:
1082   case Intrinsic::round:
1083   case Intrinsic::canonicalize:
1084   case Intrinsic::lround:
1085   case Intrinsic::llround:
1086   case Intrinsic::lrint:
1087   case Intrinsic::llrint:
1088     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1089       return true;
1090     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1091       return true;
1092     // Some operations can be handled by vector instructions and assume
1093     // unsupported vectors will be expanded into supported scalar ones.
1094     // TODO Handle scalar operations properly.
1095     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1096   case Intrinsic::masked_store:
1097   case Intrinsic::masked_load:
1098   case Intrinsic::masked_gather:
1099   case Intrinsic::masked_scatter:
1100     return !ST->hasMVEIntegerOps();
1101   case Intrinsic::sadd_with_overflow:
1102   case Intrinsic::uadd_with_overflow:
1103   case Intrinsic::ssub_with_overflow:
1104   case Intrinsic::usub_with_overflow:
1105   case Intrinsic::sadd_sat:
1106   case Intrinsic::uadd_sat:
1107   case Intrinsic::ssub_sat:
1108   case Intrinsic::usub_sat:
1109     return false;
1110   }
1111 
1112   return BaseT::isLoweredToCall(F);
1113 }
1114 
1115 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1116                                           AssumptionCache &AC,
1117                                           TargetLibraryInfo *LibInfo,
1118                                           HardwareLoopInfo &HWLoopInfo) {
1119   // Low-overhead branches are only supported in the 'low-overhead branch'
1120   // extension of v8.1-m.
1121   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1122     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1123     return false;
1124   }
1125 
1126   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1127     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1128     return false;
1129   }
1130 
1131   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1132   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1133     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1134     return false;
1135   }
1136 
1137   const SCEV *TripCountSCEV =
1138     SE.getAddExpr(BackedgeTakenCount,
1139                   SE.getOne(BackedgeTakenCount->getType()));
1140 
1141   // We need to store the trip count in LR, a 32-bit register.
1142   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1143     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1144     return false;
1145   }
1146 
1147   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1148   // point in generating a hardware loop if that's going to happen.
1149   auto MaybeCall = [this](Instruction &I) {
1150     const ARMTargetLowering *TLI = getTLI();
1151     unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1152     EVT VT = TLI->getValueType(DL, I.getType(), true);
1153     if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1154       return true;
1155 
1156     // Check if an intrinsic will be lowered to a call and assume that any
1157     // other CallInst will generate a bl.
1158     if (auto *Call = dyn_cast<CallInst>(&I)) {
1159       if (isa<IntrinsicInst>(Call)) {
1160         if (const Function *F = Call->getCalledFunction())
1161           return isLoweredToCall(F);
1162       }
1163       return true;
1164     }
1165 
1166     // FPv5 provides conversions between integer, double-precision,
1167     // single-precision, and half-precision formats.
1168     switch (I.getOpcode()) {
1169     default:
1170       break;
1171     case Instruction::FPToSI:
1172     case Instruction::FPToUI:
1173     case Instruction::SIToFP:
1174     case Instruction::UIToFP:
1175     case Instruction::FPTrunc:
1176     case Instruction::FPExt:
1177       return !ST->hasFPARMv8Base();
1178     }
1179 
1180     // FIXME: Unfortunately the approach of checking the Operation Action does
1181     // not catch all cases of Legalization that use library calls. Our
1182     // Legalization step categorizes some transformations into library calls as
1183     // Custom, Expand or even Legal when doing type legalization. So for now
1184     // we have to special case for instance the SDIV of 64bit integers and the
1185     // use of floating point emulation.
1186     if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1187       switch (ISD) {
1188       default:
1189         break;
1190       case ISD::SDIV:
1191       case ISD::UDIV:
1192       case ISD::SREM:
1193       case ISD::UREM:
1194       case ISD::SDIVREM:
1195       case ISD::UDIVREM:
1196         return true;
1197       }
1198     }
1199 
1200     // Assume all other non-float operations are supported.
1201     if (!VT.isFloatingPoint())
1202       return false;
1203 
1204     // We'll need a library call to handle most floats when using soft.
1205     if (TLI->useSoftFloat()) {
1206       switch (I.getOpcode()) {
1207       default:
1208         return true;
1209       case Instruction::Alloca:
1210       case Instruction::Load:
1211       case Instruction::Store:
1212       case Instruction::Select:
1213       case Instruction::PHI:
1214         return false;
1215       }
1216     }
1217 
1218     // We'll need a libcall to perform double precision operations on a single
1219     // precision only FPU.
1220     if (I.getType()->isDoubleTy() && !ST->hasFP64())
1221       return true;
1222 
1223     // Likewise for half precision arithmetic.
1224     if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1225       return true;
1226 
1227     return false;
1228   };
1229 
1230   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1231     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1232       switch (Call->getIntrinsicID()) {
1233       default:
1234         break;
1235       case Intrinsic::set_loop_iterations:
1236       case Intrinsic::test_set_loop_iterations:
1237       case Intrinsic::loop_decrement:
1238       case Intrinsic::loop_decrement_reg:
1239         return true;
1240       }
1241     }
1242     return false;
1243   };
1244 
1245   // Scan the instructions to see if there's any that we know will turn into a
1246   // call or if this loop is already a low-overhead loop.
1247   auto ScanLoop = [&](Loop *L) {
1248     for (auto *BB : L->getBlocks()) {
1249       for (auto &I : *BB) {
1250         if (MaybeCall(I) || IsHardwareLoopIntrinsic(I)) {
1251           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1252           return false;
1253         }
1254       }
1255     }
1256     return true;
1257   };
1258 
1259   // Visit inner loops.
1260   for (auto Inner : *L)
1261     if (!ScanLoop(Inner))
1262       return false;
1263 
1264   if (!ScanLoop(L))
1265     return false;
1266 
1267   // TODO: Check whether the trip count calculation is expensive. If L is the
1268   // inner loop but we know it has a low trip count, calculating that trip
1269   // count (in the parent loop) may be detrimental.
1270 
1271   LLVMContext &C = L->getHeader()->getContext();
1272   HWLoopInfo.CounterInReg = true;
1273   HWLoopInfo.IsNestingLegal = false;
1274   HWLoopInfo.PerformEntryTest = true;
1275   HWLoopInfo.CountType = Type::getInt32Ty(C);
1276   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1277   return true;
1278 }
1279 
1280 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1281   // We don't allow icmp's, and because we only look at single block loops,
1282   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1283   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1284     return false;
1285 
1286   if (isa<FCmpInst>(&I))
1287     return false;
1288 
1289   // We could allow extending/narrowing FP loads/stores, but codegen is
1290   // too inefficient so reject this for now.
1291   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1292     return false;
1293 
1294   // Extends have to be extending-loads
1295   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1296     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1297       return false;
1298 
1299   // Truncs have to be narrowing-stores
1300   if (isa<TruncInst>(&I) )
1301     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1302       return false;
1303 
1304   return true;
1305 }
1306 
1307 // To set up a tail-predicated loop, we need to know the total number of
1308 // elements processed by that loop. Thus, we need to determine the element
1309 // size and:
1310 // 1) it should be uniform for all operations in the vector loop, so we
1311 //    e.g. don't want any widening/narrowing operations.
1312 // 2) it should be smaller than i64s because we don't have vector operations
1313 //    that work on i64s.
1314 // 3) we don't want elements to be reversed or shuffled, to make sure the
1315 //    tail-predication masks/predicates the right lanes.
1316 //
1317 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1318                                  const DataLayout &DL,
1319                                  const LoopAccessInfo *LAI) {
1320   PredicatedScalarEvolution PSE = LAI->getPSE();
1321   int ICmpCount = 0;
1322   int Stride = 0;
1323 
1324   LLVM_DEBUG(dbgs() << "tail-predication: checking allowed instructions\n");
1325   SmallVector<Instruction *, 16> LoadStores;
1326   for (BasicBlock *BB : L->blocks()) {
1327     for (Instruction &I : BB->instructionsWithoutDebug()) {
1328       if (isa<PHINode>(&I))
1329         continue;
1330       if (!canTailPredicateInstruction(I, ICmpCount)) {
1331         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1332         return false;
1333       }
1334 
1335       Type *T  = I.getType();
1336       if (T->isPointerTy())
1337         T = T->getPointerElementType();
1338 
1339       if (T->getScalarSizeInBits() > 32) {
1340         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1341         return false;
1342       }
1343 
1344       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1345         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1346         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1347         // TODO: for now only allow consecutive strides of 1. We could support
1348         // other strides as long as it is uniform, but let's keep it simple for
1349         // now.
1350         if (Stride == 0 && NextStride == 1) {
1351           Stride = NextStride;
1352           continue;
1353         }
1354         if (Stride != NextStride) {
1355           LLVM_DEBUG(dbgs() << "Different strides found, can't "
1356                                "tail-predicate\n.");
1357           return false;
1358         }
1359       }
1360     }
1361   }
1362 
1363   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1364   return true;
1365 }
1366 
1367 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1368                                              ScalarEvolution &SE,
1369                                              AssumptionCache &AC,
1370                                              TargetLibraryInfo *TLI,
1371                                              DominatorTree *DT,
1372                                              const LoopAccessInfo *LAI) {
1373   if (DisableTailPredication)
1374     return false;
1375 
1376   // Creating a predicated vector loop is the first step for generating a
1377   // tail-predicated hardware loop, for which we need the MVE masked
1378   // load/stores instructions:
1379   if (!ST->hasMVEIntegerOps())
1380     return false;
1381 
1382   // For now, restrict this to single block loops.
1383   if (L->getNumBlocks() > 1) {
1384     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1385                          "loop.\n");
1386     return false;
1387   }
1388 
1389   assert(L->empty() && "preferPredicateOverEpilogue: inner-loop expected");
1390 
1391   HardwareLoopInfo HWLoopInfo(L);
1392   if (!HWLoopInfo.canAnalyze(*LI)) {
1393     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1394                          "analyzable.\n");
1395     return false;
1396   }
1397 
1398   // This checks if we have the low-overhead branch architecture
1399   // extension, and if we will create a hardware-loop:
1400   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1401     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1402                          "profitable.\n");
1403     return false;
1404   }
1405 
1406   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1407     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1408                          "a candidate.\n");
1409     return false;
1410   }
1411 
1412   return canTailPredicateLoop(L, LI, SE, DL, LAI);
1413 }
1414 
1415 bool ARMTTIImpl::emitGetActiveLaneMask(Loop *L, LoopInfo *LI,
1416     ScalarEvolution &SE, bool TailFolded) const {
1417   // TODO: if this loop is tail-folded, we want to emit the
1418   // llvm.get.active.lane.mask intrinsic so that this can be picked up in the
1419   // MVETailPredication pass that needs to know the number of elements
1420   // processed by this vector loop.
1421   return false;
1422 }
1423 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1424                                          TTI::UnrollingPreferences &UP) {
1425   // Only currently enable these preferences for M-Class cores.
1426   if (!ST->isMClass())
1427     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
1428 
1429   // Disable loop unrolling for Oz and Os.
1430   UP.OptSizeThreshold = 0;
1431   UP.PartialOptSizeThreshold = 0;
1432   if (L->getHeader()->getParent()->hasOptSize())
1433     return;
1434 
1435   // Only enable on Thumb-2 targets.
1436   if (!ST->isThumb2())
1437     return;
1438 
1439   SmallVector<BasicBlock*, 4> ExitingBlocks;
1440   L->getExitingBlocks(ExitingBlocks);
1441   LLVM_DEBUG(dbgs() << "Loop has:\n"
1442                     << "Blocks: " << L->getNumBlocks() << "\n"
1443                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
1444 
1445   // Only allow another exit other than the latch. This acts as an early exit
1446   // as it mirrors the profitability calculation of the runtime unroller.
1447   if (ExitingBlocks.size() > 2)
1448     return;
1449 
1450   // Limit the CFG of the loop body for targets with a branch predictor.
1451   // Allowing 4 blocks permits if-then-else diamonds in the body.
1452   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
1453     return;
1454 
1455   // Scan the loop: don't unroll loops with calls as this could prevent
1456   // inlining.
1457   unsigned Cost = 0;
1458   for (auto *BB : L->getBlocks()) {
1459     for (auto &I : *BB) {
1460       // Don't unroll vectorised loop. MVE does not benefit from it as much as
1461       // scalar code.
1462       if (I.getType()->isVectorTy())
1463         return;
1464 
1465       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1466         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1467           if (!isLoweredToCall(F))
1468             continue;
1469         }
1470         return;
1471       }
1472 
1473       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1474                                             I.value_op_end());
1475       Cost += getUserCost(&I, Operands, TargetTransformInfo::TCK_CodeSize);
1476     }
1477   }
1478 
1479   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
1480 
1481   UP.Partial = true;
1482   UP.Runtime = true;
1483   UP.UpperBound = true;
1484   UP.UnrollRemainder = true;
1485   UP.DefaultUnrollRuntimeCount = 4;
1486   UP.UnrollAndJam = true;
1487   UP.UnrollAndJamInnerLoopThreshold = 60;
1488 
1489   // Force unrolling small loops can be very useful because of the branch
1490   // taken cost of the backedge.
1491   if (Cost < 12)
1492     UP.Force = true;
1493 }
1494 
1495 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1496                                        TTI::ReductionFlags Flags) const {
1497   return ST->hasMVEIntegerOps();
1498 }
1499