1 //===- ARMTargetTransformInfo.cpp - ARM specific TTI ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMTargetTransformInfo.h"
10 #include "ARMSubtarget.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/SmallVector.h"
14 #include "llvm/Analysis/LoopInfo.h"
15 #include "llvm/CodeGen/CostTable.h"
16 #include "llvm/CodeGen/ISDOpcodes.h"
17 #include "llvm/CodeGen/ValueTypes.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/IntrinsicsARM.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/MC/SubtargetFeature.h"
28 #include "llvm/Support/Casting.h"
29 #include "llvm/Support/MachineValueType.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include <algorithm>
32 #include <cassert>
33 #include <cstdint>
34 #include <utility>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "armtti"
39 
40 static cl::opt<bool> EnableMaskedLoadStores(
41   "enable-arm-maskedldst", cl::Hidden, cl::init(true),
42   cl::desc("Enable the generation of masked loads and stores"));
43 
44 static cl::opt<bool> DisableLowOverheadLoops(
45   "disable-arm-loloops", cl::Hidden, cl::init(false),
46   cl::desc("Disable the generation of low-overhead loops"));
47 
48 extern cl::opt<bool> DisableTailPredication;
49 
50 extern cl::opt<bool> EnableMaskedGatherScatters;
51 
52 bool ARMTTIImpl::areInlineCompatible(const Function *Caller,
53                                      const Function *Callee) const {
54   const TargetMachine &TM = getTLI()->getTargetMachine();
55   const FeatureBitset &CallerBits =
56       TM.getSubtargetImpl(*Caller)->getFeatureBits();
57   const FeatureBitset &CalleeBits =
58       TM.getSubtargetImpl(*Callee)->getFeatureBits();
59 
60   // To inline a callee, all features not in the whitelist must match exactly.
61   bool MatchExact = (CallerBits & ~InlineFeatureWhitelist) ==
62                     (CalleeBits & ~InlineFeatureWhitelist);
63   // For features in the whitelist, the callee's features must be a subset of
64   // the callers'.
65   bool MatchSubset = ((CallerBits & CalleeBits) & InlineFeatureWhitelist) ==
66                      (CalleeBits & InlineFeatureWhitelist);
67   return MatchExact && MatchSubset;
68 }
69 
70 bool ARMTTIImpl::shouldFavorBackedgeIndex(const Loop *L) const {
71   if (L->getHeader()->getParent()->hasOptSize())
72     return false;
73   if (ST->hasMVEIntegerOps())
74     return false;
75   return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1;
76 }
77 
78 bool ARMTTIImpl::shouldFavorPostInc() const {
79   if (ST->hasMVEIntegerOps())
80     return true;
81   return false;
82 }
83 
84 int ARMTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
85                               TTI::TargetCostKind CostKind) {
86   assert(Ty->isIntegerTy());
87 
88  unsigned Bits = Ty->getPrimitiveSizeInBits();
89  if (Bits == 0 || Imm.getActiveBits() >= 64)
90    return 4;
91 
92   int64_t SImmVal = Imm.getSExtValue();
93   uint64_t ZImmVal = Imm.getZExtValue();
94   if (!ST->isThumb()) {
95     if ((SImmVal >= 0 && SImmVal < 65536) ||
96         (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
97         (ARM_AM::getSOImmVal(~ZImmVal) != -1))
98       return 1;
99     return ST->hasV6T2Ops() ? 2 : 3;
100   }
101   if (ST->isThumb2()) {
102     if ((SImmVal >= 0 && SImmVal < 65536) ||
103         (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
104         (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
105       return 1;
106     return ST->hasV6T2Ops() ? 2 : 3;
107   }
108   // Thumb1, any i8 imm cost 1.
109   if (Bits == 8 || (SImmVal >= 0 && SImmVal < 256))
110     return 1;
111   if ((~SImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
112     return 2;
113   // Load from constantpool.
114   return 3;
115 }
116 
117 // Constants smaller than 256 fit in the immediate field of
118 // Thumb1 instructions so we return a zero cost and 1 otherwise.
119 int ARMTTIImpl::getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx,
120                                       const APInt &Imm, Type *Ty) {
121   if (Imm.isNonNegative() && Imm.getLimitedValue() < 256)
122     return 0;
123 
124   return 1;
125 }
126 
127 int ARMTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, const APInt &Imm,
128                                   Type *Ty, TTI::TargetCostKind CostKind) {
129   // Division by a constant can be turned into multiplication, but only if we
130   // know it's constant. So it's not so much that the immediate is cheap (it's
131   // not), but that the alternative is worse.
132   // FIXME: this is probably unneeded with GlobalISel.
133   if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv ||
134        Opcode == Instruction::SRem || Opcode == Instruction::URem) &&
135       Idx == 1)
136     return 0;
137 
138   if (Opcode == Instruction::And) {
139     // UXTB/UXTH
140     if (Imm == 255 || Imm == 65535)
141       return 0;
142     // Conversion to BIC is free, and means we can use ~Imm instead.
143     return std::min(getIntImmCost(Imm, Ty, CostKind),
144                     getIntImmCost(~Imm, Ty, CostKind));
145   }
146 
147   if (Opcode == Instruction::Add)
148     // Conversion to SUB is free, and means we can use -Imm instead.
149     return std::min(getIntImmCost(Imm, Ty, CostKind),
150                     getIntImmCost(-Imm, Ty, CostKind));
151 
152   if (Opcode == Instruction::ICmp && Imm.isNegative() &&
153       Ty->getIntegerBitWidth() == 32) {
154     int64_t NegImm = -Imm.getSExtValue();
155     if (ST->isThumb2() && NegImm < 1<<12)
156       // icmp X, #-C -> cmn X, #C
157       return 0;
158     if (ST->isThumb() && NegImm < 1<<8)
159       // icmp X, #-C -> adds X, #C
160       return 0;
161   }
162 
163   // xor a, -1 can always be folded to MVN
164   if (Opcode == Instruction::Xor && Imm.isAllOnesValue())
165     return 0;
166 
167   return getIntImmCost(Imm, Ty, CostKind);
168 }
169 
170 int ARMTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
171                                  TTI::TargetCostKind CostKind,
172                                  const Instruction *I) {
173   int ISD = TLI->InstructionOpcodeToISD(Opcode);
174   assert(ISD && "Invalid opcode");
175 
176   // TODO: Allow non-throughput costs that aren't binary.
177   auto AdjustCost = [&CostKind](int Cost) {
178     if (CostKind != TTI::TCK_RecipThroughput)
179       return Cost == 0 ? 0 : 1;
180     return Cost;
181   };
182 
183   // Single to/from double precision conversions.
184   static const CostTblEntry NEONFltDblTbl[] = {
185     // Vector fptrunc/fpext conversions.
186     { ISD::FP_ROUND,   MVT::v2f64, 2 },
187     { ISD::FP_EXTEND,  MVT::v2f32, 2 },
188     { ISD::FP_EXTEND,  MVT::v4f32, 4 }
189   };
190 
191   if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
192                                           ISD == ISD::FP_EXTEND)) {
193     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
194     if (const auto *Entry = CostTableLookup(NEONFltDblTbl, ISD, LT.second))
195       return AdjustCost(LT.first * Entry->Cost);
196   }
197 
198   EVT SrcTy = TLI->getValueType(DL, Src);
199   EVT DstTy = TLI->getValueType(DL, Dst);
200 
201   if (!SrcTy.isSimple() || !DstTy.isSimple())
202     return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I));
203 
204   // The extend of a load is free
205   if (I && isa<LoadInst>(I->getOperand(0))) {
206     static const TypeConversionCostTblEntry LoadConversionTbl[] = {
207         {ISD::SIGN_EXTEND, MVT::i32, MVT::i16, 0},
208         {ISD::ZERO_EXTEND, MVT::i32, MVT::i16, 0},
209         {ISD::SIGN_EXTEND, MVT::i32, MVT::i8, 0},
210         {ISD::ZERO_EXTEND, MVT::i32, MVT::i8, 0},
211         {ISD::SIGN_EXTEND, MVT::i16, MVT::i8, 0},
212         {ISD::ZERO_EXTEND, MVT::i16, MVT::i8, 0},
213         {ISD::SIGN_EXTEND, MVT::i64, MVT::i32, 1},
214         {ISD::ZERO_EXTEND, MVT::i64, MVT::i32, 1},
215         {ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 1},
216         {ISD::ZERO_EXTEND, MVT::i64, MVT::i16, 1},
217         {ISD::SIGN_EXTEND, MVT::i64, MVT::i8, 1},
218         {ISD::ZERO_EXTEND, MVT::i64, MVT::i8, 1},
219     };
220     if (const auto *Entry = ConvertCostTableLookup(
221             LoadConversionTbl, ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
222       return AdjustCost(Entry->Cost);
223 
224     static const TypeConversionCostTblEntry MVELoadConversionTbl[] = {
225         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0},
226         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0},
227         {ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 0},
228         {ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 0},
229         {ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 0},
230         {ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 0},
231     };
232     if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
233       if (const auto *Entry =
234               ConvertCostTableLookup(MVELoadConversionTbl, ISD,
235                                      DstTy.getSimpleVT(), SrcTy.getSimpleVT()))
236         return AdjustCost(Entry->Cost);
237     }
238   }
239 
240   // NEON vector operations that can extend their inputs.
241   if ((ISD == ISD::SIGN_EXTEND || ISD == ISD::ZERO_EXTEND) &&
242       I && I->hasOneUse() && ST->hasNEON() && SrcTy.isVector()) {
243     static const TypeConversionCostTblEntry NEONDoubleWidthTbl[] = {
244       // vaddl
245       { ISD::ADD, MVT::v4i32, MVT::v4i16, 0 },
246       { ISD::ADD, MVT::v8i16, MVT::v8i8,  0 },
247       // vsubl
248       { ISD::SUB, MVT::v4i32, MVT::v4i16, 0 },
249       { ISD::SUB, MVT::v8i16, MVT::v8i8,  0 },
250       // vmull
251       { ISD::MUL, MVT::v4i32, MVT::v4i16, 0 },
252       { ISD::MUL, MVT::v8i16, MVT::v8i8,  0 },
253       // vshll
254       { ISD::SHL, MVT::v4i32, MVT::v4i16, 0 },
255       { ISD::SHL, MVT::v8i16, MVT::v8i8,  0 },
256     };
257 
258     auto *User = cast<Instruction>(*I->user_begin());
259     int UserISD = TLI->InstructionOpcodeToISD(User->getOpcode());
260     if (auto *Entry = ConvertCostTableLookup(NEONDoubleWidthTbl, UserISD,
261                                              DstTy.getSimpleVT(),
262                                              SrcTy.getSimpleVT())) {
263       return AdjustCost(Entry->Cost);
264     }
265   }
266 
267   // Some arithmetic, load and store operations have specific instructions
268   // to cast up/down their types automatically at no extra cost.
269   // TODO: Get these tables to know at least what the related operations are.
270   static const TypeConversionCostTblEntry NEONVectorConversionTbl[] = {
271     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
272     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
273     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
274     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
275     { ISD::TRUNCATE,    MVT::v4i32, MVT::v4i64, 0 },
276     { ISD::TRUNCATE,    MVT::v4i16, MVT::v4i32, 1 },
277 
278     // The number of vmovl instructions for the extension.
279     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
280     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8,  1 },
281     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
282     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8,  2 },
283     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
284     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8,  3 },
285     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
286     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
287     { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
288     { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
289     { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
290     { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
291     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
292     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 },
293     { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
294     { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 },
295     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
296     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
297 
298     // Operations that we legalize using splitting.
299     { ISD::TRUNCATE,    MVT::v16i8, MVT::v16i32, 6 },
300     { ISD::TRUNCATE,    MVT::v8i8, MVT::v8i32, 3 },
301 
302     // Vector float <-> i32 conversions.
303     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
304     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i32, 1 },
305 
306     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
307     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i8, 3 },
308     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
309     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i16, 2 },
310     { ISD::SINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
311     { ISD::UINT_TO_FP,  MVT::v2f32, MVT::v2i32, 1 },
312     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
313     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i1, 3 },
314     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
315     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i8, 3 },
316     { ISD::SINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
317     { ISD::UINT_TO_FP,  MVT::v4f32, MVT::v4i16, 2 },
318     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
319     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i16, 4 },
320     { ISD::SINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
321     { ISD::UINT_TO_FP,  MVT::v8f32, MVT::v8i32, 2 },
322     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
323     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i16, 8 },
324     { ISD::SINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
325     { ISD::UINT_TO_FP,  MVT::v16f32, MVT::v16i32, 4 },
326 
327     { ISD::FP_TO_SINT,  MVT::v4i32, MVT::v4f32, 1 },
328     { ISD::FP_TO_UINT,  MVT::v4i32, MVT::v4f32, 1 },
329     { ISD::FP_TO_SINT,  MVT::v4i8, MVT::v4f32, 3 },
330     { ISD::FP_TO_UINT,  MVT::v4i8, MVT::v4f32, 3 },
331     { ISD::FP_TO_SINT,  MVT::v4i16, MVT::v4f32, 2 },
332     { ISD::FP_TO_UINT,  MVT::v4i16, MVT::v4f32, 2 },
333 
334     // Vector double <-> i32 conversions.
335     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
336     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
337 
338     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
339     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i8, 4 },
340     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
341     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i16, 3 },
342     { ISD::SINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
343     { ISD::UINT_TO_FP,  MVT::v2f64, MVT::v2i32, 2 },
344 
345     { ISD::FP_TO_SINT,  MVT::v2i32, MVT::v2f64, 2 },
346     { ISD::FP_TO_UINT,  MVT::v2i32, MVT::v2f64, 2 },
347     { ISD::FP_TO_SINT,  MVT::v8i16, MVT::v8f32, 4 },
348     { ISD::FP_TO_UINT,  MVT::v8i16, MVT::v8f32, 4 },
349     { ISD::FP_TO_SINT,  MVT::v16i16, MVT::v16f32, 8 },
350     { ISD::FP_TO_UINT,  MVT::v16i16, MVT::v16f32, 8 }
351   };
352 
353   if (SrcTy.isVector() && ST->hasNEON()) {
354     if (const auto *Entry = ConvertCostTableLookup(NEONVectorConversionTbl, ISD,
355                                                    DstTy.getSimpleVT(),
356                                                    SrcTy.getSimpleVT()))
357       return AdjustCost(Entry->Cost);
358   }
359 
360   // Scalar float to integer conversions.
361   static const TypeConversionCostTblEntry NEONFloatConversionTbl[] = {
362     { ISD::FP_TO_SINT,  MVT::i1, MVT::f32, 2 },
363     { ISD::FP_TO_UINT,  MVT::i1, MVT::f32, 2 },
364     { ISD::FP_TO_SINT,  MVT::i1, MVT::f64, 2 },
365     { ISD::FP_TO_UINT,  MVT::i1, MVT::f64, 2 },
366     { ISD::FP_TO_SINT,  MVT::i8, MVT::f32, 2 },
367     { ISD::FP_TO_UINT,  MVT::i8, MVT::f32, 2 },
368     { ISD::FP_TO_SINT,  MVT::i8, MVT::f64, 2 },
369     { ISD::FP_TO_UINT,  MVT::i8, MVT::f64, 2 },
370     { ISD::FP_TO_SINT,  MVT::i16, MVT::f32, 2 },
371     { ISD::FP_TO_UINT,  MVT::i16, MVT::f32, 2 },
372     { ISD::FP_TO_SINT,  MVT::i16, MVT::f64, 2 },
373     { ISD::FP_TO_UINT,  MVT::i16, MVT::f64, 2 },
374     { ISD::FP_TO_SINT,  MVT::i32, MVT::f32, 2 },
375     { ISD::FP_TO_UINT,  MVT::i32, MVT::f32, 2 },
376     { ISD::FP_TO_SINT,  MVT::i32, MVT::f64, 2 },
377     { ISD::FP_TO_UINT,  MVT::i32, MVT::f64, 2 },
378     { ISD::FP_TO_SINT,  MVT::i64, MVT::f32, 10 },
379     { ISD::FP_TO_UINT,  MVT::i64, MVT::f32, 10 },
380     { ISD::FP_TO_SINT,  MVT::i64, MVT::f64, 10 },
381     { ISD::FP_TO_UINT,  MVT::i64, MVT::f64, 10 }
382   };
383   if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
384     if (const auto *Entry = ConvertCostTableLookup(NEONFloatConversionTbl, ISD,
385                                                    DstTy.getSimpleVT(),
386                                                    SrcTy.getSimpleVT()))
387       return AdjustCost(Entry->Cost);
388   }
389 
390   // Scalar integer to float conversions.
391   static const TypeConversionCostTblEntry NEONIntegerConversionTbl[] = {
392     { ISD::SINT_TO_FP,  MVT::f32, MVT::i1, 2 },
393     { ISD::UINT_TO_FP,  MVT::f32, MVT::i1, 2 },
394     { ISD::SINT_TO_FP,  MVT::f64, MVT::i1, 2 },
395     { ISD::UINT_TO_FP,  MVT::f64, MVT::i1, 2 },
396     { ISD::SINT_TO_FP,  MVT::f32, MVT::i8, 2 },
397     { ISD::UINT_TO_FP,  MVT::f32, MVT::i8, 2 },
398     { ISD::SINT_TO_FP,  MVT::f64, MVT::i8, 2 },
399     { ISD::UINT_TO_FP,  MVT::f64, MVT::i8, 2 },
400     { ISD::SINT_TO_FP,  MVT::f32, MVT::i16, 2 },
401     { ISD::UINT_TO_FP,  MVT::f32, MVT::i16, 2 },
402     { ISD::SINT_TO_FP,  MVT::f64, MVT::i16, 2 },
403     { ISD::UINT_TO_FP,  MVT::f64, MVT::i16, 2 },
404     { ISD::SINT_TO_FP,  MVT::f32, MVT::i32, 2 },
405     { ISD::UINT_TO_FP,  MVT::f32, MVT::i32, 2 },
406     { ISD::SINT_TO_FP,  MVT::f64, MVT::i32, 2 },
407     { ISD::UINT_TO_FP,  MVT::f64, MVT::i32, 2 },
408     { ISD::SINT_TO_FP,  MVT::f32, MVT::i64, 10 },
409     { ISD::UINT_TO_FP,  MVT::f32, MVT::i64, 10 },
410     { ISD::SINT_TO_FP,  MVT::f64, MVT::i64, 10 },
411     { ISD::UINT_TO_FP,  MVT::f64, MVT::i64, 10 }
412   };
413 
414   if (SrcTy.isInteger() && ST->hasNEON()) {
415     if (const auto *Entry = ConvertCostTableLookup(NEONIntegerConversionTbl,
416                                                    ISD, DstTy.getSimpleVT(),
417                                                    SrcTy.getSimpleVT()))
418       return AdjustCost(Entry->Cost);
419   }
420 
421   // MVE extend costs, taken from codegen tests. i8->i16 or i16->i32 is one
422   // instruction, i8->i32 is two. i64 zexts are an VAND with a constant, sext
423   // are linearised so take more.
424   static const TypeConversionCostTblEntry MVEVectorConversionTbl[] = {
425     { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
426     { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
427     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
428     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
429     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i8, 10 },
430     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i8, 2 },
431     { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
432     { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
433     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i16, 10 },
434     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i16, 2 },
435     { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 8 },
436     { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 2 },
437   };
438 
439   if (SrcTy.isVector() && ST->hasMVEIntegerOps()) {
440     if (const auto *Entry = ConvertCostTableLookup(MVEVectorConversionTbl,
441                                                    ISD, DstTy.getSimpleVT(),
442                                                    SrcTy.getSimpleVT()))
443       return AdjustCost(Entry->Cost * ST->getMVEVectorCostFactor());
444   }
445 
446   // Scalar integer conversion costs.
447   static const TypeConversionCostTblEntry ARMIntegerConversionTbl[] = {
448     // i16 -> i64 requires two dependent operations.
449     { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
450 
451     // Truncates on i64 are assumed to be free.
452     { ISD::TRUNCATE,    MVT::i32, MVT::i64, 0 },
453     { ISD::TRUNCATE,    MVT::i16, MVT::i64, 0 },
454     { ISD::TRUNCATE,    MVT::i8,  MVT::i64, 0 },
455     { ISD::TRUNCATE,    MVT::i1,  MVT::i64, 0 }
456   };
457 
458   if (SrcTy.isInteger()) {
459     if (const auto *Entry = ConvertCostTableLookup(ARMIntegerConversionTbl, ISD,
460                                                    DstTy.getSimpleVT(),
461                                                    SrcTy.getSimpleVT()))
462       return AdjustCost(Entry->Cost);
463   }
464 
465   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
466                      ? ST->getMVEVectorCostFactor()
467                      : 1;
468   return AdjustCost(
469     BaseCost * BaseT::getCastInstrCost(Opcode, Dst, Src, CostKind, I));
470 }
471 
472 int ARMTTIImpl::getVectorInstrCost(unsigned Opcode, Type *ValTy,
473                                    unsigned Index) {
474   // Penalize inserting into an D-subregister. We end up with a three times
475   // lower estimated throughput on swift.
476   if (ST->hasSlowLoadDSubregister() && Opcode == Instruction::InsertElement &&
477       ValTy->isVectorTy() && ValTy->getScalarSizeInBits() <= 32)
478     return 3;
479 
480   if (ST->hasNEON() && (Opcode == Instruction::InsertElement ||
481                         Opcode == Instruction::ExtractElement)) {
482     // Cross-class copies are expensive on many microarchitectures,
483     // so assume they are expensive by default.
484     if (cast<VectorType>(ValTy)->getElementType()->isIntegerTy())
485       return 3;
486 
487     // Even if it's not a cross class copy, this likely leads to mixing
488     // of NEON and VFP code and should be therefore penalized.
489     if (ValTy->isVectorTy() &&
490         ValTy->getScalarSizeInBits() <= 32)
491       return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index), 2U);
492   }
493 
494   if (ST->hasMVEIntegerOps() && (Opcode == Instruction::InsertElement ||
495                                  Opcode == Instruction::ExtractElement)) {
496     // We say MVE moves costs at least the MVEVectorCostFactor, even though
497     // they are scalar instructions. This helps prevent mixing scalar and
498     // vector, to prevent vectorising where we end up just scalarising the
499     // result anyway.
500     return std::max(BaseT::getVectorInstrCost(Opcode, ValTy, Index),
501                     ST->getMVEVectorCostFactor()) *
502            cast<FixedVectorType>(ValTy)->getNumElements() / 2;
503   }
504 
505   return BaseT::getVectorInstrCost(Opcode, ValTy, Index);
506 }
507 
508 int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
509                                    TTI::TargetCostKind CostKind,
510                                    const Instruction *I) {
511   int ISD = TLI->InstructionOpcodeToISD(Opcode);
512   // On NEON a vector select gets lowered to vbsl.
513   if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
514     // Lowering of some vector selects is currently far from perfect.
515     static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = {
516       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
517       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
518       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
519     };
520 
521     EVT SelCondTy = TLI->getValueType(DL, CondTy);
522     EVT SelValTy = TLI->getValueType(DL, ValTy);
523     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
524       if (const auto *Entry = ConvertCostTableLookup(NEONVectorSelectTbl, ISD,
525                                                      SelCondTy.getSimpleVT(),
526                                                      SelValTy.getSimpleVT()))
527         return Entry->Cost;
528     }
529 
530     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
531     return LT.first;
532   }
533 
534   int BaseCost = ST->hasMVEIntegerOps() && ValTy->isVectorTy()
535                      ? ST->getMVEVectorCostFactor()
536                      : 1;
537   return BaseCost * BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, CostKind,
538                                               I);
539 }
540 
541 int ARMTTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
542                                           const SCEV *Ptr) {
543   // Address computations in vectorized code with non-consecutive addresses will
544   // likely result in more instructions compared to scalar code where the
545   // computation can more often be merged into the index mode. The resulting
546   // extra micro-ops can significantly decrease throughput.
547   unsigned NumVectorInstToHideOverhead = 10;
548   int MaxMergeDistance = 64;
549 
550   if (ST->hasNEON()) {
551     if (Ty->isVectorTy() && SE &&
552         !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
553       return NumVectorInstToHideOverhead;
554 
555     // In many cases the address computation is not merged into the instruction
556     // addressing mode.
557     return 1;
558   }
559   return BaseT::getAddressComputationCost(Ty, SE, Ptr);
560 }
561 
562 bool ARMTTIImpl::isProfitableLSRChainElement(Instruction *I) {
563   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
564     // If a VCTP is part of a chain, it's already profitable and shouldn't be
565     // optimized, else LSR may block tail-predication.
566     switch (II->getIntrinsicID()) {
567     case Intrinsic::arm_mve_vctp8:
568     case Intrinsic::arm_mve_vctp16:
569     case Intrinsic::arm_mve_vctp32:
570     case Intrinsic::arm_mve_vctp64:
571       return true;
572     default:
573       break;
574     }
575   }
576   return false;
577 }
578 
579 bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
580   if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
581     return false;
582 
583   if (auto *VecTy = dyn_cast<FixedVectorType>(DataTy)) {
584     // Don't support v2i1 yet.
585     if (VecTy->getNumElements() == 2)
586       return false;
587 
588     // We don't support extending fp types.
589      unsigned VecWidth = DataTy->getPrimitiveSizeInBits();
590     if (VecWidth != 128 && VecTy->getElementType()->isFloatingPointTy())
591       return false;
592   }
593 
594   unsigned EltWidth = DataTy->getScalarSizeInBits();
595   return (EltWidth == 32 && (!Alignment || *Alignment >= 4)) ||
596          (EltWidth == 16 && (!Alignment || *Alignment >= 2)) ||
597          (EltWidth == 8);
598 }
599 
600 bool ARMTTIImpl::isLegalMaskedGather(Type *Ty, MaybeAlign Alignment) {
601   if (!EnableMaskedGatherScatters || !ST->hasMVEIntegerOps())
602     return false;
603 
604   // This method is called in 2 places:
605   //  - from the vectorizer with a scalar type, in which case we need to get
606   //  this as good as we can with the limited info we have (and rely on the cost
607   //  model for the rest).
608   //  - from the masked intrinsic lowering pass with the actual vector type.
609   // For MVE, we have a custom lowering pass that will already have custom
610   // legalised any gathers that we can to MVE intrinsics, and want to expand all
611   // the rest. The pass runs before the masked intrinsic lowering pass, so if we
612   // are here, we know we want to expand.
613   if (isa<VectorType>(Ty))
614     return false;
615 
616   unsigned EltWidth = Ty->getScalarSizeInBits();
617   return ((EltWidth == 32 && (!Alignment || *Alignment >= 4)) ||
618           (EltWidth == 16 && (!Alignment || *Alignment >= 2)) || EltWidth == 8);
619 }
620 
621 int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
622   const MemCpyInst *MI = dyn_cast<MemCpyInst>(I);
623   assert(MI && "MemcpyInst expected");
624   ConstantInt *C = dyn_cast<ConstantInt>(MI->getLength());
625 
626   // To model the cost of a library call, we assume 1 for the call, and
627   // 3 for the argument setup.
628   const unsigned LibCallCost = 4;
629 
630   // If 'size' is not a constant, a library call will be generated.
631   if (!C)
632     return LibCallCost;
633 
634   const unsigned Size = C->getValue().getZExtValue();
635   const Align DstAlign = *MI->getDestAlign();
636   const Align SrcAlign = *MI->getSourceAlign();
637   const Function *F = I->getParent()->getParent();
638   const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
639   std::vector<EVT> MemOps;
640 
641   // MemOps will be poplulated with a list of data types that needs to be
642   // loaded and stored. That's why we multiply the number of elements by 2 to
643   // get the cost for this memcpy.
644   if (getTLI()->findOptimalMemOpLowering(
645           MemOps, Limit,
646           MemOp::Copy(Size, /*DstAlignCanChange*/ false, DstAlign, SrcAlign,
647                       /*IsVolatile*/ true),
648           MI->getDestAddressSpace(), MI->getSourceAddressSpace(),
649           F->getAttributes()))
650     return MemOps.size() * 2;
651 
652   // If we can't find an optimal memop lowering, return the default cost
653   return LibCallCost;
654 }
655 
656 int ARMTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp,
657                                int Index, VectorType *SubTp) {
658   if (ST->hasNEON()) {
659     if (Kind == TTI::SK_Broadcast) {
660       static const CostTblEntry NEONDupTbl[] = {
661           // VDUP handles these cases.
662           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
663           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
664           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
665           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
666           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
667           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
668 
669           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
670           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
671           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
672           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1}};
673 
674       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
675 
676       if (const auto *Entry =
677               CostTableLookup(NEONDupTbl, ISD::VECTOR_SHUFFLE, LT.second))
678         return LT.first * Entry->Cost;
679     }
680     if (Kind == TTI::SK_Reverse) {
681       static const CostTblEntry NEONShuffleTbl[] = {
682           // Reverse shuffle cost one instruction if we are shuffling within a
683           // double word (vrev) or two if we shuffle a quad word (vrev, vext).
684           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
685           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
686           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
687           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
688           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 1},
689           {ISD::VECTOR_SHUFFLE, MVT::v8i8, 1},
690 
691           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
692           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
693           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 2},
694           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 2}};
695 
696       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
697 
698       if (const auto *Entry =
699               CostTableLookup(NEONShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second))
700         return LT.first * Entry->Cost;
701     }
702     if (Kind == TTI::SK_Select) {
703       static const CostTblEntry NEONSelShuffleTbl[] = {
704           // Select shuffle cost table for ARM. Cost is the number of
705           // instructions
706           // required to create the shuffled vector.
707 
708           {ISD::VECTOR_SHUFFLE, MVT::v2f32, 1},
709           {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
710           {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
711           {ISD::VECTOR_SHUFFLE, MVT::v2i32, 1},
712 
713           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
714           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
715           {ISD::VECTOR_SHUFFLE, MVT::v4i16, 2},
716 
717           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 16},
718 
719           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 32}};
720 
721       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
722       if (const auto *Entry = CostTableLookup(NEONSelShuffleTbl,
723                                               ISD::VECTOR_SHUFFLE, LT.second))
724         return LT.first * Entry->Cost;
725     }
726   }
727   if (ST->hasMVEIntegerOps()) {
728     if (Kind == TTI::SK_Broadcast) {
729       static const CostTblEntry MVEDupTbl[] = {
730           // VDUP handles these cases.
731           {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
732           {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
733           {ISD::VECTOR_SHUFFLE, MVT::v16i8, 1},
734           {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
735           {ISD::VECTOR_SHUFFLE, MVT::v8f16, 1}};
736 
737       std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
738 
739       if (const auto *Entry = CostTableLookup(MVEDupTbl, ISD::VECTOR_SHUFFLE,
740                                               LT.second))
741         return LT.first * Entry->Cost * ST->getMVEVectorCostFactor();
742     }
743   }
744   int BaseCost = ST->hasMVEIntegerOps() && Tp->isVectorTy()
745                      ? ST->getMVEVectorCostFactor()
746                      : 1;
747   return BaseCost * BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
748 }
749 
750 int ARMTTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
751                                        TTI::TargetCostKind CostKind,
752                                        TTI::OperandValueKind Op1Info,
753                                        TTI::OperandValueKind Op2Info,
754                                        TTI::OperandValueProperties Opd1PropInfo,
755                                        TTI::OperandValueProperties Opd2PropInfo,
756                                        ArrayRef<const Value *> Args,
757                                        const Instruction *CxtI) {
758   int ISDOpcode = TLI->InstructionOpcodeToISD(Opcode);
759   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
760 
761   if (ST->hasNEON()) {
762     const unsigned FunctionCallDivCost = 20;
763     const unsigned ReciprocalDivCost = 10;
764     static const CostTblEntry CostTbl[] = {
765       // Division.
766       // These costs are somewhat random. Choose a cost of 20 to indicate that
767       // vectorizing devision (added function call) is going to be very expensive.
768       // Double registers types.
769       { ISD::SDIV, MVT::v1i64, 1 * FunctionCallDivCost},
770       { ISD::UDIV, MVT::v1i64, 1 * FunctionCallDivCost},
771       { ISD::SREM, MVT::v1i64, 1 * FunctionCallDivCost},
772       { ISD::UREM, MVT::v1i64, 1 * FunctionCallDivCost},
773       { ISD::SDIV, MVT::v2i32, 2 * FunctionCallDivCost},
774       { ISD::UDIV, MVT::v2i32, 2 * FunctionCallDivCost},
775       { ISD::SREM, MVT::v2i32, 2 * FunctionCallDivCost},
776       { ISD::UREM, MVT::v2i32, 2 * FunctionCallDivCost},
777       { ISD::SDIV, MVT::v4i16,     ReciprocalDivCost},
778       { ISD::UDIV, MVT::v4i16,     ReciprocalDivCost},
779       { ISD::SREM, MVT::v4i16, 4 * FunctionCallDivCost},
780       { ISD::UREM, MVT::v4i16, 4 * FunctionCallDivCost},
781       { ISD::SDIV, MVT::v8i8,      ReciprocalDivCost},
782       { ISD::UDIV, MVT::v8i8,      ReciprocalDivCost},
783       { ISD::SREM, MVT::v8i8,  8 * FunctionCallDivCost},
784       { ISD::UREM, MVT::v8i8,  8 * FunctionCallDivCost},
785       // Quad register types.
786       { ISD::SDIV, MVT::v2i64, 2 * FunctionCallDivCost},
787       { ISD::UDIV, MVT::v2i64, 2 * FunctionCallDivCost},
788       { ISD::SREM, MVT::v2i64, 2 * FunctionCallDivCost},
789       { ISD::UREM, MVT::v2i64, 2 * FunctionCallDivCost},
790       { ISD::SDIV, MVT::v4i32, 4 * FunctionCallDivCost},
791       { ISD::UDIV, MVT::v4i32, 4 * FunctionCallDivCost},
792       { ISD::SREM, MVT::v4i32, 4 * FunctionCallDivCost},
793       { ISD::UREM, MVT::v4i32, 4 * FunctionCallDivCost},
794       { ISD::SDIV, MVT::v8i16, 8 * FunctionCallDivCost},
795       { ISD::UDIV, MVT::v8i16, 8 * FunctionCallDivCost},
796       { ISD::SREM, MVT::v8i16, 8 * FunctionCallDivCost},
797       { ISD::UREM, MVT::v8i16, 8 * FunctionCallDivCost},
798       { ISD::SDIV, MVT::v16i8, 16 * FunctionCallDivCost},
799       { ISD::UDIV, MVT::v16i8, 16 * FunctionCallDivCost},
800       { ISD::SREM, MVT::v16i8, 16 * FunctionCallDivCost},
801       { ISD::UREM, MVT::v16i8, 16 * FunctionCallDivCost},
802       // Multiplication.
803     };
804 
805     if (const auto *Entry = CostTableLookup(CostTbl, ISDOpcode, LT.second))
806       return LT.first * Entry->Cost;
807 
808     int Cost = BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
809                                              Op2Info,
810                                              Opd1PropInfo, Opd2PropInfo);
811 
812     // This is somewhat of a hack. The problem that we are facing is that SROA
813     // creates a sequence of shift, and, or instructions to construct values.
814     // These sequences are recognized by the ISel and have zero-cost. Not so for
815     // the vectorized code. Because we have support for v2i64 but not i64 those
816     // sequences look particularly beneficial to vectorize.
817     // To work around this we increase the cost of v2i64 operations to make them
818     // seem less beneficial.
819     if (LT.second == MVT::v2i64 &&
820         Op2Info == TargetTransformInfo::OK_UniformConstantValue)
821       Cost += 4;
822 
823     return Cost;
824   }
825 
826   // If this operation is a shift on arm/thumb2, it might well be folded into
827   // the following instruction, hence having a cost of 0.
828   auto LooksLikeAFreeShift = [&]() {
829     if (ST->isThumb1Only() || Ty->isVectorTy())
830       return false;
831 
832     if (!CxtI || !CxtI->hasOneUse() || !CxtI->isShift())
833       return false;
834     if (Op2Info != TargetTransformInfo::OK_UniformConstantValue)
835       return false;
836 
837     // Folded into a ADC/ADD/AND/BIC/CMP/EOR/MVN/ORR/ORN/RSB/SBC/SUB
838     switch (cast<Instruction>(CxtI->user_back())->getOpcode()) {
839     case Instruction::Add:
840     case Instruction::Sub:
841     case Instruction::And:
842     case Instruction::Xor:
843     case Instruction::Or:
844     case Instruction::ICmp:
845       return true;
846     default:
847       return false;
848     }
849   };
850   if (LooksLikeAFreeShift())
851     return 0;
852 
853   int BaseCost = ST->hasMVEIntegerOps() && Ty->isVectorTy()
854                      ? ST->getMVEVectorCostFactor()
855                      : 1;
856 
857   // The rest of this mostly follows what is done in BaseT::getArithmeticInstrCost,
858   // without treating floats as more expensive that scalars or increasing the
859   // costs for custom operations. The results is also multiplied by the
860   // MVEVectorCostFactor where appropriate.
861   if (TLI->isOperationLegalOrCustomOrPromote(ISDOpcode, LT.second))
862     return LT.first * BaseCost;
863 
864   // Else this is expand, assume that we need to scalarize this op.
865   if (auto *VTy = dyn_cast<FixedVectorType>(Ty)) {
866     unsigned Num = VTy->getNumElements();
867     unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType(),
868                                            CostKind);
869     // Return the cost of multiple scalar invocation plus the cost of
870     // inserting and extracting the values.
871     return BaseT::getScalarizationOverhead(VTy, Args) + Num * Cost;
872   }
873 
874   return BaseCost;
875 }
876 
877 int ARMTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
878                                 MaybeAlign Alignment, unsigned AddressSpace,
879                                 TTI::TargetCostKind CostKind,
880                                 const Instruction *I) {
881   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
882 
883   if (ST->hasNEON() && Src->isVectorTy() &&
884       (Alignment && *Alignment != Align(16)) &&
885       cast<VectorType>(Src)->getElementType()->isDoubleTy()) {
886     // Unaligned loads/stores are extremely inefficient.
887     // We need 4 uops for vst.1/vld.1 vs 1uop for vldr/vstr.
888     return LT.first * 4;
889   }
890   int BaseCost = ST->hasMVEIntegerOps() && Src->isVectorTy()
891                      ? ST->getMVEVectorCostFactor()
892                      : 1;
893   return BaseCost * LT.first;
894 }
895 
896 int ARMTTIImpl::getInterleavedMemoryOpCost(
897     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
898     unsigned Alignment, unsigned AddressSpace,
899     TTI::TargetCostKind CostKind,
900     bool UseMaskForCond, bool UseMaskForGaps) {
901   assert(Factor >= 2 && "Invalid interleave factor");
902   assert(isa<VectorType>(VecTy) && "Expect a vector type");
903 
904   // vldN/vstN doesn't support vector types of i64/f64 element.
905   bool EltIs64Bits = DL.getTypeSizeInBits(VecTy->getScalarType()) == 64;
906 
907   if (Factor <= TLI->getMaxSupportedInterleaveFactor() && !EltIs64Bits &&
908       !UseMaskForCond && !UseMaskForGaps) {
909     unsigned NumElts = cast<FixedVectorType>(VecTy)->getNumElements();
910     auto *SubVecTy =
911         FixedVectorType::get(VecTy->getScalarType(), NumElts / Factor);
912 
913     // vldN/vstN only support legal vector types of size 64 or 128 in bits.
914     // Accesses having vector types that are a multiple of 128 bits can be
915     // matched to more than one vldN/vstN instruction.
916     int BaseCost = ST->hasMVEIntegerOps() ? ST->getMVEVectorCostFactor() : 1;
917     if (NumElts % Factor == 0 &&
918         TLI->isLegalInterleavedAccessType(Factor, SubVecTy, DL))
919       return Factor * BaseCost * TLI->getNumInterleavedAccesses(SubVecTy, DL);
920 
921     // Some smaller than legal interleaved patterns are cheap as we can make
922     // use of the vmovn or vrev patterns to interleave a standard load. This is
923     // true for v4i8, v8i8 and v4i16 at least (but not for v4f16 as it is
924     // promoted differently). The cost of 2 here is then a load and vrev or
925     // vmovn.
926     if (ST->hasMVEIntegerOps() && Factor == 2 && NumElts / Factor > 2 &&
927         VecTy->isIntOrIntVectorTy() && DL.getTypeSizeInBits(SubVecTy) <= 64)
928       return 2 * BaseCost;
929   }
930 
931   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
932                                            Alignment, AddressSpace, CostKind,
933                                            UseMaskForCond, UseMaskForGaps);
934 }
935 
936 unsigned ARMTTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *DataTy,
937                                             Value *Ptr, bool VariableMask,
938                                             unsigned Alignment,
939                                             TTI::TargetCostKind CostKind,
940                                             const Instruction *I) {
941   using namespace PatternMatch;
942   if (!ST->hasMVEIntegerOps() || !EnableMaskedGatherScatters)
943     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
944                                          Alignment, CostKind, I);
945 
946   assert(DataTy->isVectorTy() && "Can't do gather/scatters on scalar!");
947   auto *VTy = cast<FixedVectorType>(DataTy);
948 
949   // TODO: Splitting, once we do that.
950 
951   unsigned NumElems = VTy->getNumElements();
952   unsigned EltSize = VTy->getScalarSizeInBits();
953   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, DataTy);
954 
955   // For now, it is assumed that for the MVE gather instructions the loads are
956   // all effectively serialised. This means the cost is the scalar cost
957   // multiplied by the number of elements being loaded. This is possibly very
958   // conservative, but even so we still end up vectorising loops because the
959   // cost per iteration for many loops is lower than for scalar loops.
960   unsigned VectorCost = NumElems * LT.first;
961   // The scalarization cost should be a lot higher. We use the number of vector
962   // elements plus the scalarization overhead.
963   unsigned ScalarCost =
964       NumElems * LT.first + BaseT::getScalarizationOverhead(VTy, {});
965 
966   if (Alignment < EltSize / 8)
967     return ScalarCost;
968 
969   unsigned ExtSize = EltSize;
970   // Check whether there's a single user that asks for an extended type
971   if (I != nullptr) {
972     // Dependent of the caller of this function, a gather instruction will
973     // either have opcode Instruction::Load or be a call to the masked_gather
974     // intrinsic
975     if ((I->getOpcode() == Instruction::Load ||
976          match(I, m_Intrinsic<Intrinsic::masked_gather>())) &&
977         I->hasOneUse()) {
978       const User *Us = *I->users().begin();
979       if (isa<ZExtInst>(Us) || isa<SExtInst>(Us)) {
980         // only allow valid type combinations
981         unsigned TypeSize =
982             cast<Instruction>(Us)->getType()->getScalarSizeInBits();
983         if (((TypeSize == 32 && (EltSize == 8 || EltSize == 16)) ||
984              (TypeSize == 16 && EltSize == 8)) &&
985             TypeSize * NumElems == 128) {
986           ExtSize = TypeSize;
987         }
988       }
989     }
990     // Check whether the input data needs to be truncated
991     TruncInst *T;
992     if ((I->getOpcode() == Instruction::Store ||
993          match(I, m_Intrinsic<Intrinsic::masked_scatter>())) &&
994         (T = dyn_cast<TruncInst>(I->getOperand(0)))) {
995       // Only allow valid type combinations
996       unsigned TypeSize = T->getOperand(0)->getType()->getScalarSizeInBits();
997       if (((EltSize == 16 && TypeSize == 32) ||
998            (EltSize == 8 && (TypeSize == 32 || TypeSize == 16))) &&
999           TypeSize * NumElems == 128)
1000         ExtSize = TypeSize;
1001     }
1002   }
1003 
1004   if (ExtSize * NumElems != 128 || NumElems < 4)
1005     return ScalarCost;
1006 
1007   // Any (aligned) i32 gather will not need to be scalarised.
1008   if (ExtSize == 32)
1009     return VectorCost;
1010   // For smaller types, we need to ensure that the gep's inputs are correctly
1011   // extended from a small enough value. Other sizes (including i64) are
1012   // scalarized for now.
1013   if (ExtSize != 8 && ExtSize != 16)
1014     return ScalarCost;
1015 
1016   if (auto BC = dyn_cast<BitCastInst>(Ptr))
1017     Ptr = BC->getOperand(0);
1018   if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1019     if (GEP->getNumOperands() != 2)
1020       return ScalarCost;
1021     unsigned Scale = DL.getTypeAllocSize(GEP->getResultElementType());
1022     // Scale needs to be correct (which is only relevant for i16s).
1023     if (Scale != 1 && Scale * 8 != ExtSize)
1024       return ScalarCost;
1025     // And we need to zext (not sext) the indexes from a small enough type.
1026     if (auto ZExt = dyn_cast<ZExtInst>(GEP->getOperand(1))) {
1027       if (ZExt->getOperand(0)->getType()->getScalarSizeInBits() <= ExtSize)
1028         return VectorCost;
1029     }
1030     return ScalarCost;
1031   }
1032   return ScalarCost;
1033 }
1034 
1035 bool ARMTTIImpl::isLoweredToCall(const Function *F) {
1036   if (!F->isIntrinsic())
1037     BaseT::isLoweredToCall(F);
1038 
1039   // Assume all Arm-specific intrinsics map to an instruction.
1040   if (F->getName().startswith("llvm.arm"))
1041     return false;
1042 
1043   switch (F->getIntrinsicID()) {
1044   default: break;
1045   case Intrinsic::powi:
1046   case Intrinsic::sin:
1047   case Intrinsic::cos:
1048   case Intrinsic::pow:
1049   case Intrinsic::log:
1050   case Intrinsic::log10:
1051   case Intrinsic::log2:
1052   case Intrinsic::exp:
1053   case Intrinsic::exp2:
1054     return true;
1055   case Intrinsic::sqrt:
1056   case Intrinsic::fabs:
1057   case Intrinsic::copysign:
1058   case Intrinsic::floor:
1059   case Intrinsic::ceil:
1060   case Intrinsic::trunc:
1061   case Intrinsic::rint:
1062   case Intrinsic::nearbyint:
1063   case Intrinsic::round:
1064   case Intrinsic::canonicalize:
1065   case Intrinsic::lround:
1066   case Intrinsic::llround:
1067   case Intrinsic::lrint:
1068   case Intrinsic::llrint:
1069     if (F->getReturnType()->isDoubleTy() && !ST->hasFP64())
1070       return true;
1071     if (F->getReturnType()->isHalfTy() && !ST->hasFullFP16())
1072       return true;
1073     // Some operations can be handled by vector instructions and assume
1074     // unsupported vectors will be expanded into supported scalar ones.
1075     // TODO Handle scalar operations properly.
1076     return !ST->hasFPARMv8Base() && !ST->hasVFP2Base();
1077   case Intrinsic::masked_store:
1078   case Intrinsic::masked_load:
1079   case Intrinsic::masked_gather:
1080   case Intrinsic::masked_scatter:
1081     return !ST->hasMVEIntegerOps();
1082   case Intrinsic::sadd_with_overflow:
1083   case Intrinsic::uadd_with_overflow:
1084   case Intrinsic::ssub_with_overflow:
1085   case Intrinsic::usub_with_overflow:
1086   case Intrinsic::sadd_sat:
1087   case Intrinsic::uadd_sat:
1088   case Intrinsic::ssub_sat:
1089   case Intrinsic::usub_sat:
1090     return false;
1091   }
1092 
1093   return BaseT::isLoweredToCall(F);
1094 }
1095 
1096 bool ARMTTIImpl::isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE,
1097                                           AssumptionCache &AC,
1098                                           TargetLibraryInfo *LibInfo,
1099                                           HardwareLoopInfo &HWLoopInfo) {
1100   // Low-overhead branches are only supported in the 'low-overhead branch'
1101   // extension of v8.1-m.
1102   if (!ST->hasLOB() || DisableLowOverheadLoops) {
1103     LLVM_DEBUG(dbgs() << "ARMHWLoops: Disabled\n");
1104     return false;
1105   }
1106 
1107   if (!SE.hasLoopInvariantBackedgeTakenCount(L)) {
1108     LLVM_DEBUG(dbgs() << "ARMHWLoops: No BETC\n");
1109     return false;
1110   }
1111 
1112   const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1113   if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) {
1114     LLVM_DEBUG(dbgs() << "ARMHWLoops: Uncomputable BETC\n");
1115     return false;
1116   }
1117 
1118   const SCEV *TripCountSCEV =
1119     SE.getAddExpr(BackedgeTakenCount,
1120                   SE.getOne(BackedgeTakenCount->getType()));
1121 
1122   // We need to store the trip count in LR, a 32-bit register.
1123   if (SE.getUnsignedRangeMax(TripCountSCEV).getBitWidth() > 32) {
1124     LLVM_DEBUG(dbgs() << "ARMHWLoops: Trip count does not fit into 32bits\n");
1125     return false;
1126   }
1127 
1128   // Making a call will trash LR and clear LO_BRANCH_INFO, so there's little
1129   // point in generating a hardware loop if that's going to happen.
1130   auto MaybeCall = [this](Instruction &I) {
1131     const ARMTargetLowering *TLI = getTLI();
1132     unsigned ISD = TLI->InstructionOpcodeToISD(I.getOpcode());
1133     EVT VT = TLI->getValueType(DL, I.getType(), true);
1134     if (TLI->getOperationAction(ISD, VT) == TargetLowering::LibCall)
1135       return true;
1136 
1137     // Check if an intrinsic will be lowered to a call and assume that any
1138     // other CallInst will generate a bl.
1139     if (auto *Call = dyn_cast<CallInst>(&I)) {
1140       if (isa<IntrinsicInst>(Call)) {
1141         if (const Function *F = Call->getCalledFunction())
1142           return isLoweredToCall(F);
1143       }
1144       return true;
1145     }
1146 
1147     // FPv5 provides conversions between integer, double-precision,
1148     // single-precision, and half-precision formats.
1149     switch (I.getOpcode()) {
1150     default:
1151       break;
1152     case Instruction::FPToSI:
1153     case Instruction::FPToUI:
1154     case Instruction::SIToFP:
1155     case Instruction::UIToFP:
1156     case Instruction::FPTrunc:
1157     case Instruction::FPExt:
1158       return !ST->hasFPARMv8Base();
1159     }
1160 
1161     // FIXME: Unfortunately the approach of checking the Operation Action does
1162     // not catch all cases of Legalization that use library calls. Our
1163     // Legalization step categorizes some transformations into library calls as
1164     // Custom, Expand or even Legal when doing type legalization. So for now
1165     // we have to special case for instance the SDIV of 64bit integers and the
1166     // use of floating point emulation.
1167     if (VT.isInteger() && VT.getSizeInBits() >= 64) {
1168       switch (ISD) {
1169       default:
1170         break;
1171       case ISD::SDIV:
1172       case ISD::UDIV:
1173       case ISD::SREM:
1174       case ISD::UREM:
1175       case ISD::SDIVREM:
1176       case ISD::UDIVREM:
1177         return true;
1178       }
1179     }
1180 
1181     // Assume all other non-float operations are supported.
1182     if (!VT.isFloatingPoint())
1183       return false;
1184 
1185     // We'll need a library call to handle most floats when using soft.
1186     if (TLI->useSoftFloat()) {
1187       switch (I.getOpcode()) {
1188       default:
1189         return true;
1190       case Instruction::Alloca:
1191       case Instruction::Load:
1192       case Instruction::Store:
1193       case Instruction::Select:
1194       case Instruction::PHI:
1195         return false;
1196       }
1197     }
1198 
1199     // We'll need a libcall to perform double precision operations on a single
1200     // precision only FPU.
1201     if (I.getType()->isDoubleTy() && !ST->hasFP64())
1202       return true;
1203 
1204     // Likewise for half precision arithmetic.
1205     if (I.getType()->isHalfTy() && !ST->hasFullFP16())
1206       return true;
1207 
1208     return false;
1209   };
1210 
1211   auto IsHardwareLoopIntrinsic = [](Instruction &I) {
1212     if (auto *Call = dyn_cast<IntrinsicInst>(&I)) {
1213       switch (Call->getIntrinsicID()) {
1214       default:
1215         break;
1216       case Intrinsic::set_loop_iterations:
1217       case Intrinsic::test_set_loop_iterations:
1218       case Intrinsic::loop_decrement:
1219       case Intrinsic::loop_decrement_reg:
1220         return true;
1221       }
1222     }
1223     return false;
1224   };
1225 
1226   // Scan the instructions to see if there's any that we know will turn into a
1227   // call or if this loop is already a low-overhead loop.
1228   auto ScanLoop = [&](Loop *L) {
1229     for (auto *BB : L->getBlocks()) {
1230       for (auto &I : *BB) {
1231         if (MaybeCall(I) || IsHardwareLoopIntrinsic(I)) {
1232           LLVM_DEBUG(dbgs() << "ARMHWLoops: Bad instruction: " << I << "\n");
1233           return false;
1234         }
1235       }
1236     }
1237     return true;
1238   };
1239 
1240   // Visit inner loops.
1241   for (auto Inner : *L)
1242     if (!ScanLoop(Inner))
1243       return false;
1244 
1245   if (!ScanLoop(L))
1246     return false;
1247 
1248   // TODO: Check whether the trip count calculation is expensive. If L is the
1249   // inner loop but we know it has a low trip count, calculating that trip
1250   // count (in the parent loop) may be detrimental.
1251 
1252   LLVMContext &C = L->getHeader()->getContext();
1253   HWLoopInfo.CounterInReg = true;
1254   HWLoopInfo.IsNestingLegal = false;
1255   HWLoopInfo.PerformEntryTest = true;
1256   HWLoopInfo.CountType = Type::getInt32Ty(C);
1257   HWLoopInfo.LoopDecrement = ConstantInt::get(HWLoopInfo.CountType, 1);
1258   return true;
1259 }
1260 
1261 static bool canTailPredicateInstruction(Instruction &I, int &ICmpCount) {
1262   // We don't allow icmp's, and because we only look at single block loops,
1263   // we simply count the icmps, i.e. there should only be 1 for the backedge.
1264   if (isa<ICmpInst>(&I) && ++ICmpCount > 1)
1265     return false;
1266 
1267   if (isa<FCmpInst>(&I))
1268     return false;
1269 
1270   // We could allow extending/narrowing FP loads/stores, but codegen is
1271   // too inefficient so reject this for now.
1272   if (isa<FPExtInst>(&I) || isa<FPTruncInst>(&I))
1273     return false;
1274 
1275   // Extends have to be extending-loads
1276   if (isa<SExtInst>(&I) || isa<ZExtInst>(&I) )
1277     if (!I.getOperand(0)->hasOneUse() || !isa<LoadInst>(I.getOperand(0)))
1278       return false;
1279 
1280   // Truncs have to be narrowing-stores
1281   if (isa<TruncInst>(&I) )
1282     if (!I.hasOneUse() || !isa<StoreInst>(*I.user_begin()))
1283       return false;
1284 
1285   return true;
1286 }
1287 
1288 // To set up a tail-predicated loop, we need to know the total number of
1289 // elements processed by that loop. Thus, we need to determine the element
1290 // size and:
1291 // 1) it should be uniform for all operations in the vector loop, so we
1292 //    e.g. don't want any widening/narrowing operations.
1293 // 2) it should be smaller than i64s because we don't have vector operations
1294 //    that work on i64s.
1295 // 3) we don't want elements to be reversed or shuffled, to make sure the
1296 //    tail-predication masks/predicates the right lanes.
1297 //
1298 static bool canTailPredicateLoop(Loop *L, LoopInfo *LI, ScalarEvolution &SE,
1299                                  const DataLayout &DL,
1300                                  const LoopAccessInfo *LAI) {
1301   PredicatedScalarEvolution PSE = LAI->getPSE();
1302   int ICmpCount = 0;
1303   int Stride = 0;
1304 
1305   LLVM_DEBUG(dbgs() << "tail-predication: checking allowed instructions\n");
1306   SmallVector<Instruction *, 16> LoadStores;
1307   for (BasicBlock *BB : L->blocks()) {
1308     for (Instruction &I : BB->instructionsWithoutDebug()) {
1309       if (isa<PHINode>(&I))
1310         continue;
1311       if (!canTailPredicateInstruction(I, ICmpCount)) {
1312         LLVM_DEBUG(dbgs() << "Instruction not allowed: "; I.dump());
1313         return false;
1314       }
1315 
1316       Type *T  = I.getType();
1317       if (T->isPointerTy())
1318         T = T->getPointerElementType();
1319 
1320       if (T->getScalarSizeInBits() > 32) {
1321         LLVM_DEBUG(dbgs() << "Unsupported Type: "; T->dump());
1322         return false;
1323       }
1324 
1325       if (isa<StoreInst>(I) || isa<LoadInst>(I)) {
1326         Value *Ptr = isa<LoadInst>(I) ? I.getOperand(0) : I.getOperand(1);
1327         int64_t NextStride = getPtrStride(PSE, Ptr, L);
1328         // TODO: for now only allow consecutive strides of 1. We could support
1329         // other strides as long as it is uniform, but let's keep it simple for
1330         // now.
1331         if (Stride == 0 && NextStride == 1) {
1332           Stride = NextStride;
1333           continue;
1334         }
1335         if (Stride != NextStride) {
1336           LLVM_DEBUG(dbgs() << "Different strides found, can't "
1337                                "tail-predicate\n.");
1338           return false;
1339         }
1340       }
1341     }
1342   }
1343 
1344   LLVM_DEBUG(dbgs() << "tail-predication: all instructions allowed!\n");
1345   return true;
1346 }
1347 
1348 bool ARMTTIImpl::preferPredicateOverEpilogue(Loop *L, LoopInfo *LI,
1349                                              ScalarEvolution &SE,
1350                                              AssumptionCache &AC,
1351                                              TargetLibraryInfo *TLI,
1352                                              DominatorTree *DT,
1353                                              const LoopAccessInfo *LAI) {
1354   if (DisableTailPredication)
1355     return false;
1356 
1357   // Creating a predicated vector loop is the first step for generating a
1358   // tail-predicated hardware loop, for which we need the MVE masked
1359   // load/stores instructions:
1360   if (!ST->hasMVEIntegerOps())
1361     return false;
1362 
1363   // For now, restrict this to single block loops.
1364   if (L->getNumBlocks() > 1) {
1365     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: not a single block "
1366                          "loop.\n");
1367     return false;
1368   }
1369 
1370   assert(L->empty() && "preferPredicateOverEpilogue: inner-loop expected");
1371 
1372   HardwareLoopInfo HWLoopInfo(L);
1373   if (!HWLoopInfo.canAnalyze(*LI)) {
1374     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1375                          "analyzable.\n");
1376     return false;
1377   }
1378 
1379   // This checks if we have the low-overhead branch architecture
1380   // extension, and if we will create a hardware-loop:
1381   if (!isHardwareLoopProfitable(L, SE, AC, TLI, HWLoopInfo)) {
1382     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1383                          "profitable.\n");
1384     return false;
1385   }
1386 
1387   if (!HWLoopInfo.isHardwareLoopCandidate(SE, *LI, *DT)) {
1388     LLVM_DEBUG(dbgs() << "preferPredicateOverEpilogue: hardware-loop is not "
1389                          "a candidate.\n");
1390     return false;
1391   }
1392 
1393   return canTailPredicateLoop(L, LI, SE, DL, LAI);
1394 }
1395 
1396 bool ARMTTIImpl::emitGetActiveLaneMask(Loop *L, LoopInfo *LI,
1397     ScalarEvolution &SE, bool TailFolded) const {
1398   // TODO: if this loop is tail-folded, we want to emit the
1399   // llvm.get.active.lane.mask intrinsic so that this can be picked up in the
1400   // MVETailPredication pass that needs to know the number of elements
1401   // processed by this vector loop.
1402   return false;
1403 }
1404 void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
1405                                          TTI::UnrollingPreferences &UP) {
1406   // Only currently enable these preferences for M-Class cores.
1407   if (!ST->isMClass())
1408     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP);
1409 
1410   // Disable loop unrolling for Oz and Os.
1411   UP.OptSizeThreshold = 0;
1412   UP.PartialOptSizeThreshold = 0;
1413   if (L->getHeader()->getParent()->hasOptSize())
1414     return;
1415 
1416   // Only enable on Thumb-2 targets.
1417   if (!ST->isThumb2())
1418     return;
1419 
1420   SmallVector<BasicBlock*, 4> ExitingBlocks;
1421   L->getExitingBlocks(ExitingBlocks);
1422   LLVM_DEBUG(dbgs() << "Loop has:\n"
1423                     << "Blocks: " << L->getNumBlocks() << "\n"
1424                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
1425 
1426   // Only allow another exit other than the latch. This acts as an early exit
1427   // as it mirrors the profitability calculation of the runtime unroller.
1428   if (ExitingBlocks.size() > 2)
1429     return;
1430 
1431   // Limit the CFG of the loop body for targets with a branch predictor.
1432   // Allowing 4 blocks permits if-then-else diamonds in the body.
1433   if (ST->hasBranchPredictor() && L->getNumBlocks() > 4)
1434     return;
1435 
1436   // Scan the loop: don't unroll loops with calls as this could prevent
1437   // inlining.
1438   unsigned Cost = 0;
1439   for (auto *BB : L->getBlocks()) {
1440     for (auto &I : *BB) {
1441       // Don't unroll vectorised loop. MVE does not benefit from it as much as
1442       // scalar code.
1443       if (I.getType()->isVectorTy())
1444         return;
1445 
1446       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
1447         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
1448           if (!isLoweredToCall(F))
1449             continue;
1450         }
1451         return;
1452       }
1453 
1454       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
1455                                             I.value_op_end());
1456       Cost += getUserCost(&I, Operands, TargetTransformInfo::TCK_CodeSize);
1457     }
1458   }
1459 
1460   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
1461 
1462   UP.Partial = true;
1463   UP.Runtime = true;
1464   UP.UpperBound = true;
1465   UP.UnrollRemainder = true;
1466   UP.DefaultUnrollRuntimeCount = 4;
1467   UP.UnrollAndJam = true;
1468   UP.UnrollAndJamInnerLoopThreshold = 60;
1469 
1470   // Force unrolling small loops can be very useful because of the branch
1471   // taken cost of the backedge.
1472   if (Cost < 12)
1473     UP.Force = true;
1474 }
1475 
1476 bool ARMTTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty,
1477                                        TTI::ReductionFlags Flags) const {
1478   assert(isa<VectorType>(Ty) && "Expected Ty to be a vector type");
1479   unsigned ScalarBits = Ty->getScalarSizeInBits();
1480   if (!ST->hasMVEIntegerOps())
1481     return false;
1482 
1483   switch (Opcode) {
1484   case Instruction::FAdd:
1485   case Instruction::FMul:
1486   case Instruction::And:
1487   case Instruction::Or:
1488   case Instruction::Xor:
1489   case Instruction::Mul:
1490   case Instruction::FCmp:
1491     return false;
1492   case Instruction::ICmp:
1493   case Instruction::Add:
1494     return ScalarBits < 64 &&
1495            (ScalarBits * cast<FixedVectorType>(Ty)->getNumElements()) % 128 ==
1496                0;
1497   default:
1498     llvm_unreachable("Unhandled reduction opcode");
1499   }
1500   return false;
1501 }
1502