1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AArch64TargetTransformInfo.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/Analysis/LoopInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/Support/Debug.h"
16 #include "llvm/Target/CostTable.h"
17 #include "llvm/Target/TargetLowering.h"
18 #include <algorithm>
19 using namespace llvm;
20 
21 #define DEBUG_TYPE "aarch64tti"
22 
23 /// \brief Calculate the cost of materializing a 64-bit value. This helper
24 /// method might only calculate a fraction of a larger immediate. Therefore it
25 /// is valid to return a cost of ZERO.
26 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
27   // Check if the immediate can be encoded within an instruction.
28   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
29     return 0;
30 
31   if (Val < 0)
32     Val = ~Val;
33 
34   // Calculate how many moves we will need to materialize this constant.
35   unsigned LZ = countLeadingZeros((uint64_t)Val);
36   return (64 - LZ + 15) / 16;
37 }
38 
39 /// \brief Calculate the cost of materializing the given constant.
40 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
41   assert(Ty->isIntegerTy());
42 
43   unsigned BitSize = Ty->getPrimitiveSizeInBits();
44   if (BitSize == 0)
45     return ~0U;
46 
47   // Sign-extend all constants to a multiple of 64-bit.
48   APInt ImmVal = Imm;
49   if (BitSize & 0x3f)
50     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
51 
52   // Split the constant into 64-bit chunks and calculate the cost for each
53   // chunk.
54   int Cost = 0;
55   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
56     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
57     int64_t Val = Tmp.getSExtValue();
58     Cost += getIntImmCost(Val);
59   }
60   // We need at least one instruction to materialze the constant.
61   return std::max(1, Cost);
62 }
63 
64 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
65                                   const APInt &Imm, Type *Ty) {
66   assert(Ty->isIntegerTy());
67 
68   unsigned BitSize = Ty->getPrimitiveSizeInBits();
69   // There is no cost model for constants with a bit size of 0. Return TCC_Free
70   // here, so that constant hoisting will ignore this constant.
71   if (BitSize == 0)
72     return TTI::TCC_Free;
73 
74   unsigned ImmIdx = ~0U;
75   switch (Opcode) {
76   default:
77     return TTI::TCC_Free;
78   case Instruction::GetElementPtr:
79     // Always hoist the base address of a GetElementPtr.
80     if (Idx == 0)
81       return 2 * TTI::TCC_Basic;
82     return TTI::TCC_Free;
83   case Instruction::Store:
84     ImmIdx = 0;
85     break;
86   case Instruction::Add:
87   case Instruction::Sub:
88   case Instruction::Mul:
89   case Instruction::UDiv:
90   case Instruction::SDiv:
91   case Instruction::URem:
92   case Instruction::SRem:
93   case Instruction::And:
94   case Instruction::Or:
95   case Instruction::Xor:
96   case Instruction::ICmp:
97     ImmIdx = 1;
98     break;
99   // Always return TCC_Free for the shift value of a shift instruction.
100   case Instruction::Shl:
101   case Instruction::LShr:
102   case Instruction::AShr:
103     if (Idx == 1)
104       return TTI::TCC_Free;
105     break;
106   case Instruction::Trunc:
107   case Instruction::ZExt:
108   case Instruction::SExt:
109   case Instruction::IntToPtr:
110   case Instruction::PtrToInt:
111   case Instruction::BitCast:
112   case Instruction::PHI:
113   case Instruction::Call:
114   case Instruction::Select:
115   case Instruction::Ret:
116   case Instruction::Load:
117     break;
118   }
119 
120   if (Idx == ImmIdx) {
121     int NumConstants = (BitSize + 63) / 64;
122     int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
123     return (Cost <= NumConstants * TTI::TCC_Basic)
124                ? static_cast<int>(TTI::TCC_Free)
125                : Cost;
126   }
127   return AArch64TTIImpl::getIntImmCost(Imm, Ty);
128 }
129 
130 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
131                                   const APInt &Imm, Type *Ty) {
132   assert(Ty->isIntegerTy());
133 
134   unsigned BitSize = Ty->getPrimitiveSizeInBits();
135   // There is no cost model for constants with a bit size of 0. Return TCC_Free
136   // here, so that constant hoisting will ignore this constant.
137   if (BitSize == 0)
138     return TTI::TCC_Free;
139 
140   switch (IID) {
141   default:
142     return TTI::TCC_Free;
143   case Intrinsic::sadd_with_overflow:
144   case Intrinsic::uadd_with_overflow:
145   case Intrinsic::ssub_with_overflow:
146   case Intrinsic::usub_with_overflow:
147   case Intrinsic::smul_with_overflow:
148   case Intrinsic::umul_with_overflow:
149     if (Idx == 1) {
150       int NumConstants = (BitSize + 63) / 64;
151       int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
152       return (Cost <= NumConstants * TTI::TCC_Basic)
153                  ? static_cast<int>(TTI::TCC_Free)
154                  : Cost;
155     }
156     break;
157   case Intrinsic::experimental_stackmap:
158     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
159       return TTI::TCC_Free;
160     break;
161   case Intrinsic::experimental_patchpoint_void:
162   case Intrinsic::experimental_patchpoint_i64:
163     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
164       return TTI::TCC_Free;
165     break;
166   }
167   return AArch64TTIImpl::getIntImmCost(Imm, Ty);
168 }
169 
170 TargetTransformInfo::PopcntSupportKind
171 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
172   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
173   if (TyWidth == 32 || TyWidth == 64)
174     return TTI::PSK_FastHardware;
175   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
176   return TTI::PSK_Software;
177 }
178 
179 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
180                                      const Instruction *I) {
181   int ISD = TLI->InstructionOpcodeToISD(Opcode);
182   assert(ISD && "Invalid opcode");
183 
184   EVT SrcTy = TLI->getValueType(DL, Src);
185   EVT DstTy = TLI->getValueType(DL, Dst);
186 
187   if (!SrcTy.isSimple() || !DstTy.isSimple())
188     return BaseT::getCastInstrCost(Opcode, Dst, Src);
189 
190   static const TypeConversionCostTblEntry
191   ConversionTbl[] = {
192     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
193     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
194     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
195     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
196 
197     // The number of shll instructions for the extension.
198     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
199     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
200     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
201     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
202     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
203     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
204     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
205     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
206     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
207     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
208     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
209     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
210     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
211     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
212     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
213     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
214 
215     // LowerVectorINT_TO_FP:
216     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
217     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
218     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
219     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
220     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
221     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
222 
223     // Complex: to v2f32
224     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
225     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
226     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
227     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
228     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
229     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
230 
231     // Complex: to v4f32
232     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
233     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
234     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
235     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
236 
237     // Complex: to v8f32
238     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
239     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
240     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
241     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
242 
243     // Complex: to v16f32
244     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
245     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
246 
247     // Complex: to v2f64
248     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
249     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
250     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
251     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
252     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
253     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
254 
255 
256     // LowerVectorFP_TO_INT
257     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
258     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
259     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
260     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
261     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
262     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
263 
264     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
265     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
266     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
267     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
268     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
269     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
270     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
271 
272     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
273     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
274     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
275     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
276     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
277 
278     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
279     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
280     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
281     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
282     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
283     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
284     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
285   };
286 
287   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
288                                                  DstTy.getSimpleVT(),
289                                                  SrcTy.getSimpleVT()))
290     return Entry->Cost;
291 
292   return BaseT::getCastInstrCost(Opcode, Dst, Src);
293 }
294 
295 int AArch64TTIImpl::getExtractWithExtendCost(unsigned Opcode, Type *Dst,
296                                              VectorType *VecTy,
297                                              unsigned Index) {
298 
299   // Make sure we were given a valid extend opcode.
300   assert((Opcode == Instruction::SExt || Opcode == Instruction::ZExt) &&
301          "Invalid opcode");
302 
303   // We are extending an element we extract from a vector, so the source type
304   // of the extend is the element type of the vector.
305   auto *Src = VecTy->getElementType();
306 
307   // Sign- and zero-extends are for integer types only.
308   assert(isa<IntegerType>(Dst) && isa<IntegerType>(Src) && "Invalid type");
309 
310   // Get the cost for the extract. We compute the cost (if any) for the extend
311   // below.
312   auto Cost = getVectorInstrCost(Instruction::ExtractElement, VecTy, Index);
313 
314   // Legalize the types.
315   auto VecLT = TLI->getTypeLegalizationCost(DL, VecTy);
316   auto DstVT = TLI->getValueType(DL, Dst);
317   auto SrcVT = TLI->getValueType(DL, Src);
318 
319   // If the resulting type is still a vector and the destination type is legal,
320   // we may get the extension for free. If not, get the default cost for the
321   // extend.
322   if (!VecLT.second.isVector() || !TLI->isTypeLegal(DstVT))
323     return Cost + getCastInstrCost(Opcode, Dst, Src);
324 
325   // The destination type should be larger than the element type. If not, get
326   // the default cost for the extend.
327   if (DstVT.getSizeInBits() < SrcVT.getSizeInBits())
328     return Cost + getCastInstrCost(Opcode, Dst, Src);
329 
330   switch (Opcode) {
331   default:
332     llvm_unreachable("Opcode should be either SExt or ZExt");
333 
334   // For sign-extends, we only need a smov, which performs the extension
335   // automatically.
336   case Instruction::SExt:
337     return Cost;
338 
339   // For zero-extends, the extend is performed automatically by a umov unless
340   // the destination type is i64 and the element type is i8 or i16.
341   case Instruction::ZExt:
342     if (DstVT.getSizeInBits() != 64u || SrcVT.getSizeInBits() == 32u)
343       return Cost;
344   }
345 
346   // If we are unable to perform the extend for free, get the default cost.
347   return Cost + getCastInstrCost(Opcode, Dst, Src);
348 }
349 
350 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
351                                        unsigned Index) {
352   assert(Val->isVectorTy() && "This must be a vector type");
353 
354   if (Index != -1U) {
355     // Legalize the type.
356     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
357 
358     // This type is legalized to a scalar type.
359     if (!LT.second.isVector())
360       return 0;
361 
362     // The type may be split. Normalize the index to the new type.
363     unsigned Width = LT.second.getVectorNumElements();
364     Index = Index % Width;
365 
366     // The element at index zero is already inside the vector.
367     if (Index == 0)
368       return 0;
369   }
370 
371   // All other insert/extracts cost this much.
372   return ST->getVectorInsertExtractBaseCost();
373 }
374 
375 int AArch64TTIImpl::getArithmeticInstrCost(
376     unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
377     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
378     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args) {
379   // Legalize the type.
380   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
381 
382   int ISD = TLI->InstructionOpcodeToISD(Opcode);
383 
384   if (ISD == ISD::SDIV &&
385       Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
386       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
387     // On AArch64, scalar signed division by constants power-of-two are
388     // normally expanded to the sequence ADD + CMP + SELECT + SRA.
389     // The OperandValue properties many not be same as that of previous
390     // operation; conservatively assume OP_None.
391     int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
392                                       TargetTransformInfo::OP_None,
393                                       TargetTransformInfo::OP_None);
394     Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
395                                    TargetTransformInfo::OP_None,
396                                    TargetTransformInfo::OP_None);
397     Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
398                                    TargetTransformInfo::OP_None,
399                                    TargetTransformInfo::OP_None);
400     Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
401                                    TargetTransformInfo::OP_None,
402                                    TargetTransformInfo::OP_None);
403     return Cost;
404   }
405 
406   switch (ISD) {
407   default:
408     return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
409                                          Opd1PropInfo, Opd2PropInfo);
410   case ISD::ADD:
411   case ISD::MUL:
412   case ISD::XOR:
413   case ISD::OR:
414   case ISD::AND:
415     // These nodes are marked as 'custom' for combining purposes only.
416     // We know that they are legal. See LowerAdd in ISelLowering.
417     return 1 * LT.first;
418   }
419 }
420 
421 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
422                                               const SCEV *Ptr) {
423   // Address computations in vectorized code with non-consecutive addresses will
424   // likely result in more instructions compared to scalar code where the
425   // computation can more often be merged into the index mode. The resulting
426   // extra micro-ops can significantly decrease throughput.
427   unsigned NumVectorInstToHideOverhead = 10;
428   int MaxMergeDistance = 64;
429 
430   if (Ty->isVectorTy() && SE &&
431       !BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
432     return NumVectorInstToHideOverhead;
433 
434   // In many cases the address computation is not merged into the instruction
435   // addressing mode.
436   return 1;
437 }
438 
439 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
440                                        Type *CondTy, const Instruction *I) {
441 
442   int ISD = TLI->InstructionOpcodeToISD(Opcode);
443   // We don't lower some vector selects well that are wider than the register
444   // width.
445   if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
446     // We would need this many instructions to hide the scalarization happening.
447     const int AmortizationCost = 20;
448     static const TypeConversionCostTblEntry
449     VectorSelectTbl[] = {
450       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
451       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
452       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
453       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
454       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
455       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
456     };
457 
458     EVT SelCondTy = TLI->getValueType(DL, CondTy);
459     EVT SelValTy = TLI->getValueType(DL, ValTy);
460     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
461       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
462                                                      SelCondTy.getSimpleVT(),
463                                                      SelValTy.getSimpleVT()))
464         return Entry->Cost;
465     }
466   }
467   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
468 }
469 
470 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Ty,
471                                     unsigned Alignment, unsigned AddressSpace,
472                                     const Instruction *I) {
473   auto LT = TLI->getTypeLegalizationCost(DL, Ty);
474 
475   if (ST->isMisaligned128StoreSlow() && Opcode == Instruction::Store &&
476       LT.second.is128BitVector() && Alignment < 16) {
477     // Unaligned stores are extremely inefficient. We don't split all
478     // unaligned 128-bit stores because the negative impact that has shown in
479     // practice on inlined block copy code.
480     // We make such stores expensive so that we will only vectorize if there
481     // are 6 other instructions getting vectorized.
482     const int AmortizationCost = 6;
483 
484     return LT.first * 2 * AmortizationCost;
485   }
486 
487   if (Ty->isVectorTy() && Ty->getVectorElementType()->isIntegerTy(8) &&
488       Ty->getVectorNumElements() < 8) {
489     // We scalarize the loads/stores because there is not v.4b register and we
490     // have to promote the elements to v.4h.
491     unsigned NumVecElts = Ty->getVectorNumElements();
492     unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
493     // We generate 2 instructions per vector element.
494     return NumVectorizableInstsToAmortize * NumVecElts * 2;
495   }
496 
497   return LT.first;
498 }
499 
500 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
501                                                unsigned Factor,
502                                                ArrayRef<unsigned> Indices,
503                                                unsigned Alignment,
504                                                unsigned AddressSpace) {
505   assert(Factor >= 2 && "Invalid interleave factor");
506   assert(isa<VectorType>(VecTy) && "Expect a vector type");
507 
508   if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
509     unsigned NumElts = VecTy->getVectorNumElements();
510     auto *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
511 
512     // ldN/stN only support legal vector types of size 64 or 128 in bits.
513     // Accesses having vector types that are a multiple of 128 bits can be
514     // matched to more than one ldN/stN instruction.
515     if (NumElts % Factor == 0 &&
516         TLI->isLegalInterleavedAccessType(SubVecTy, DL))
517       return Factor * TLI->getNumInterleavedAccesses(SubVecTy, DL);
518   }
519 
520   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
521                                            Alignment, AddressSpace);
522 }
523 
524 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
525   int Cost = 0;
526   for (auto *I : Tys) {
527     if (!I->isVectorTy())
528       continue;
529     if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
530       Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
531         getMemoryOpCost(Instruction::Load, I, 128, 0);
532   }
533   return Cost;
534 }
535 
536 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
537   return ST->getMaxInterleaveFactor();
538 }
539 
540 void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
541                                              TTI::UnrollingPreferences &UP) {
542   // Enable partial unrolling and runtime unrolling.
543   BaseT::getUnrollingPreferences(L, UP);
544 
545   // For inner loop, it is more likely to be a hot one, and the runtime check
546   // can be promoted out from LICM pass, so the overhead is less, let's try
547   // a larger threshold to unroll more loops.
548   if (L->getLoopDepth() > 1)
549     UP.PartialThreshold *= 2;
550 
551   // Disable partial & runtime unrolling on -Os.
552   UP.PartialOptSizeThreshold = 0;
553 }
554 
555 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
556                                                          Type *ExpectedType) {
557   switch (Inst->getIntrinsicID()) {
558   default:
559     return nullptr;
560   case Intrinsic::aarch64_neon_st2:
561   case Intrinsic::aarch64_neon_st3:
562   case Intrinsic::aarch64_neon_st4: {
563     // Create a struct type
564     StructType *ST = dyn_cast<StructType>(ExpectedType);
565     if (!ST)
566       return nullptr;
567     unsigned NumElts = Inst->getNumArgOperands() - 1;
568     if (ST->getNumElements() != NumElts)
569       return nullptr;
570     for (unsigned i = 0, e = NumElts; i != e; ++i) {
571       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
572         return nullptr;
573     }
574     Value *Res = UndefValue::get(ExpectedType);
575     IRBuilder<> Builder(Inst);
576     for (unsigned i = 0, e = NumElts; i != e; ++i) {
577       Value *L = Inst->getArgOperand(i);
578       Res = Builder.CreateInsertValue(Res, L, i);
579     }
580     return Res;
581   }
582   case Intrinsic::aarch64_neon_ld2:
583   case Intrinsic::aarch64_neon_ld3:
584   case Intrinsic::aarch64_neon_ld4:
585     if (Inst->getType() == ExpectedType)
586       return Inst;
587     return nullptr;
588   }
589 }
590 
591 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
592                                         MemIntrinsicInfo &Info) {
593   switch (Inst->getIntrinsicID()) {
594   default:
595     break;
596   case Intrinsic::aarch64_neon_ld2:
597   case Intrinsic::aarch64_neon_ld3:
598   case Intrinsic::aarch64_neon_ld4:
599     Info.ReadMem = true;
600     Info.WriteMem = false;
601     Info.PtrVal = Inst->getArgOperand(0);
602     break;
603   case Intrinsic::aarch64_neon_st2:
604   case Intrinsic::aarch64_neon_st3:
605   case Intrinsic::aarch64_neon_st4:
606     Info.ReadMem = false;
607     Info.WriteMem = true;
608     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
609     break;
610   }
611 
612   switch (Inst->getIntrinsicID()) {
613   default:
614     return false;
615   case Intrinsic::aarch64_neon_ld2:
616   case Intrinsic::aarch64_neon_st2:
617     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
618     break;
619   case Intrinsic::aarch64_neon_ld3:
620   case Intrinsic::aarch64_neon_st3:
621     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
622     break;
623   case Intrinsic::aarch64_neon_ld4:
624   case Intrinsic::aarch64_neon_st4:
625     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
626     break;
627   }
628   return true;
629 }
630 
631 /// See if \p I should be considered for address type promotion. We check if \p
632 /// I is a sext with right type and used in memory accesses. If it used in a
633 /// "complex" getelementptr, we allow it to be promoted without finding other
634 /// sext instructions that sign extended the same initial value. A getelementptr
635 /// is considered as "complex" if it has more than 2 operands.
636 bool AArch64TTIImpl::shouldConsiderAddressTypePromotion(
637     const Instruction &I, bool &AllowPromotionWithoutCommonHeader) {
638   bool Considerable = false;
639   AllowPromotionWithoutCommonHeader = false;
640   if (!isa<SExtInst>(&I))
641     return false;
642   Type *ConsideredSExtType =
643       Type::getInt64Ty(I.getParent()->getParent()->getContext());
644   if (I.getType() != ConsideredSExtType)
645     return false;
646   // See if the sext is the one with the right type and used in at least one
647   // GetElementPtrInst.
648   for (const User *U : I.users()) {
649     if (const GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) {
650       Considerable = true;
651       // A getelementptr is considered as "complex" if it has more than 2
652       // operands. We will promote a SExt used in such complex GEP as we
653       // expect some computation to be merged if they are done on 64 bits.
654       if (GEPInst->getNumOperands() > 2) {
655         AllowPromotionWithoutCommonHeader = true;
656         break;
657       }
658     }
659   }
660   return Considerable;
661 }
662 
663 unsigned AArch64TTIImpl::getCacheLineSize() {
664   return ST->getCacheLineSize();
665 }
666 
667 unsigned AArch64TTIImpl::getPrefetchDistance() {
668   return ST->getPrefetchDistance();
669 }
670 
671 unsigned AArch64TTIImpl::getMinPrefetchStride() {
672   return ST->getMinPrefetchStride();
673 }
674 
675 unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() {
676   return ST->getMaxPrefetchIterationsAhead();
677 }
678