1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "AArch64TargetTransformInfo.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/Analysis/LoopInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/Support/Debug.h"
16 #include "llvm/Target/CostTable.h"
17 #include "llvm/Target/TargetLowering.h"
18 #include <algorithm>
19 using namespace llvm;
20 
21 #define DEBUG_TYPE "aarch64tti"
22 
23 /// \brief Calculate the cost of materializing a 64-bit value. This helper
24 /// method might only calculate a fraction of a larger immediate. Therefore it
25 /// is valid to return a cost of ZERO.
26 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
27   // Check if the immediate can be encoded within an instruction.
28   if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
29     return 0;
30 
31   if (Val < 0)
32     Val = ~Val;
33 
34   // Calculate how many moves we will need to materialize this constant.
35   unsigned LZ = countLeadingZeros((uint64_t)Val);
36   return (64 - LZ + 15) / 16;
37 }
38 
39 /// \brief Calculate the cost of materializing the given constant.
40 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
41   assert(Ty->isIntegerTy());
42 
43   unsigned BitSize = Ty->getPrimitiveSizeInBits();
44   if (BitSize == 0)
45     return ~0U;
46 
47   // Sign-extend all constants to a multiple of 64-bit.
48   APInt ImmVal = Imm;
49   if (BitSize & 0x3f)
50     ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
51 
52   // Split the constant into 64-bit chunks and calculate the cost for each
53   // chunk.
54   int Cost = 0;
55   for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
56     APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
57     int64_t Val = Tmp.getSExtValue();
58     Cost += getIntImmCost(Val);
59   }
60   // We need at least one instruction to materialze the constant.
61   return std::max(1, Cost);
62 }
63 
64 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
65                                   const APInt &Imm, Type *Ty) {
66   assert(Ty->isIntegerTy());
67 
68   unsigned BitSize = Ty->getPrimitiveSizeInBits();
69   // There is no cost model for constants with a bit size of 0. Return TCC_Free
70   // here, so that constant hoisting will ignore this constant.
71   if (BitSize == 0)
72     return TTI::TCC_Free;
73 
74   unsigned ImmIdx = ~0U;
75   switch (Opcode) {
76   default:
77     return TTI::TCC_Free;
78   case Instruction::GetElementPtr:
79     // Always hoist the base address of a GetElementPtr.
80     if (Idx == 0)
81       return 2 * TTI::TCC_Basic;
82     return TTI::TCC_Free;
83   case Instruction::Store:
84     ImmIdx = 0;
85     break;
86   case Instruction::Add:
87   case Instruction::Sub:
88   case Instruction::Mul:
89   case Instruction::UDiv:
90   case Instruction::SDiv:
91   case Instruction::URem:
92   case Instruction::SRem:
93   case Instruction::And:
94   case Instruction::Or:
95   case Instruction::Xor:
96   case Instruction::ICmp:
97     ImmIdx = 1;
98     break;
99   // Always return TCC_Free for the shift value of a shift instruction.
100   case Instruction::Shl:
101   case Instruction::LShr:
102   case Instruction::AShr:
103     if (Idx == 1)
104       return TTI::TCC_Free;
105     break;
106   case Instruction::Trunc:
107   case Instruction::ZExt:
108   case Instruction::SExt:
109   case Instruction::IntToPtr:
110   case Instruction::PtrToInt:
111   case Instruction::BitCast:
112   case Instruction::PHI:
113   case Instruction::Call:
114   case Instruction::Select:
115   case Instruction::Ret:
116   case Instruction::Load:
117     break;
118   }
119 
120   if (Idx == ImmIdx) {
121     int NumConstants = (BitSize + 63) / 64;
122     int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
123     return (Cost <= NumConstants * TTI::TCC_Basic)
124                ? static_cast<int>(TTI::TCC_Free)
125                : Cost;
126   }
127   return AArch64TTIImpl::getIntImmCost(Imm, Ty);
128 }
129 
130 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
131                                   const APInt &Imm, Type *Ty) {
132   assert(Ty->isIntegerTy());
133 
134   unsigned BitSize = Ty->getPrimitiveSizeInBits();
135   // There is no cost model for constants with a bit size of 0. Return TCC_Free
136   // here, so that constant hoisting will ignore this constant.
137   if (BitSize == 0)
138     return TTI::TCC_Free;
139 
140   switch (IID) {
141   default:
142     return TTI::TCC_Free;
143   case Intrinsic::sadd_with_overflow:
144   case Intrinsic::uadd_with_overflow:
145   case Intrinsic::ssub_with_overflow:
146   case Intrinsic::usub_with_overflow:
147   case Intrinsic::smul_with_overflow:
148   case Intrinsic::umul_with_overflow:
149     if (Idx == 1) {
150       int NumConstants = (BitSize + 63) / 64;
151       int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
152       return (Cost <= NumConstants * TTI::TCC_Basic)
153                  ? static_cast<int>(TTI::TCC_Free)
154                  : Cost;
155     }
156     break;
157   case Intrinsic::experimental_stackmap:
158     if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
159       return TTI::TCC_Free;
160     break;
161   case Intrinsic::experimental_patchpoint_void:
162   case Intrinsic::experimental_patchpoint_i64:
163     if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
164       return TTI::TCC_Free;
165     break;
166   }
167   return AArch64TTIImpl::getIntImmCost(Imm, Ty);
168 }
169 
170 TargetTransformInfo::PopcntSupportKind
171 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
172   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
173   if (TyWidth == 32 || TyWidth == 64)
174     return TTI::PSK_FastHardware;
175   // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
176   return TTI::PSK_Software;
177 }
178 
179 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
180   int ISD = TLI->InstructionOpcodeToISD(Opcode);
181   assert(ISD && "Invalid opcode");
182 
183   EVT SrcTy = TLI->getValueType(DL, Src);
184   EVT DstTy = TLI->getValueType(DL, Dst);
185 
186   if (!SrcTy.isSimple() || !DstTy.isSimple())
187     return BaseT::getCastInstrCost(Opcode, Dst, Src);
188 
189   static const TypeConversionCostTblEntry
190   ConversionTbl[] = {
191     { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32,  1 },
192     { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64,  0 },
193     { ISD::TRUNCATE, MVT::v8i8,  MVT::v8i32,  3 },
194     { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
195 
196     // The number of shll instructions for the extension.
197     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
198     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i16, 3 },
199     { ISD::SIGN_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
200     { ISD::ZERO_EXTEND, MVT::v4i64,  MVT::v4i32, 2 },
201     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
202     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i8,  3 },
203     { ISD::SIGN_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
204     { ISD::ZERO_EXTEND, MVT::v8i32,  MVT::v8i16, 2 },
205     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
206     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i8,  7 },
207     { ISD::SIGN_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
208     { ISD::ZERO_EXTEND, MVT::v8i64,  MVT::v8i16, 6 },
209     { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
210     { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
211     { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
212     { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 6 },
213 
214     // LowerVectorINT_TO_FP:
215     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
216     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
217     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
218     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
219     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
220     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
221 
222     // Complex: to v2f32
223     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
224     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
225     { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
226     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8,  3 },
227     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
228     { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
229 
230     // Complex: to v4f32
231     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8,  4 },
232     { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
233     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8,  3 },
234     { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
235 
236     // Complex: to v8f32
237     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
238     { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
239     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8,  10 },
240     { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
241 
242     // Complex: to v16f32
243     { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
244     { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 21 },
245 
246     // Complex: to v2f64
247     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
248     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
249     { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
250     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8,  4 },
251     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
252     { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
253 
254 
255     // LowerVectorFP_TO_INT
256     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
257     { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
258     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
259     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
260     { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
261     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
262 
263     // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
264     { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
265     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
266     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f32, 1 },
267     { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
268     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
269     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f32, 1 },
270 
271     // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
272     { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
273     { ISD::FP_TO_SINT, MVT::v4i8,  MVT::v4f32, 2 },
274     { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
275     { ISD::FP_TO_UINT, MVT::v4i8,  MVT::v4f32, 2 },
276 
277     // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
278     { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
279     { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
280     { ISD::FP_TO_SINT, MVT::v2i8,  MVT::v2f64, 2 },
281     { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
282     { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
283     { ISD::FP_TO_UINT, MVT::v2i8,  MVT::v2f64, 2 },
284   };
285 
286   if (const auto *Entry = ConvertCostTableLookup(ConversionTbl, ISD,
287                                                  DstTy.getSimpleVT(),
288                                                  SrcTy.getSimpleVT()))
289     return Entry->Cost;
290 
291   return BaseT::getCastInstrCost(Opcode, Dst, Src);
292 }
293 
294 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
295                                        unsigned Index) {
296   assert(Val->isVectorTy() && "This must be a vector type");
297 
298   if (Index != -1U) {
299     // Legalize the type.
300     std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
301 
302     // This type is legalized to a scalar type.
303     if (!LT.second.isVector())
304       return 0;
305 
306     // The type may be split. Normalize the index to the new type.
307     unsigned Width = LT.second.getVectorNumElements();
308     Index = Index % Width;
309 
310     // The element at index zero is already inside the vector.
311     if (Index == 0)
312       return 0;
313   }
314 
315   // All other insert/extracts cost this much.
316   if (ST->isKryo())
317     return 2;
318   return 3;
319 }
320 
321 int AArch64TTIImpl::getArithmeticInstrCost(
322     unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
323     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
324     TTI::OperandValueProperties Opd2PropInfo) {
325   // Legalize the type.
326   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
327 
328   int ISD = TLI->InstructionOpcodeToISD(Opcode);
329 
330   if (ISD == ISD::SDIV &&
331       Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
332       Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
333     // On AArch64, scalar signed division by constants power-of-two are
334     // normally expanded to the sequence ADD + CMP + SELECT + SRA.
335     // The OperandValue properties many not be same as that of previous
336     // operation; conservatively assume OP_None.
337     int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
338                                       TargetTransformInfo::OP_None,
339                                       TargetTransformInfo::OP_None);
340     Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
341                                    TargetTransformInfo::OP_None,
342                                    TargetTransformInfo::OP_None);
343     Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
344                                    TargetTransformInfo::OP_None,
345                                    TargetTransformInfo::OP_None);
346     Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
347                                    TargetTransformInfo::OP_None,
348                                    TargetTransformInfo::OP_None);
349     return Cost;
350   }
351 
352   switch (ISD) {
353   default:
354     return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
355                                          Opd1PropInfo, Opd2PropInfo);
356   case ISD::ADD:
357   case ISD::MUL:
358   case ISD::XOR:
359   case ISD::OR:
360   case ISD::AND:
361     // These nodes are marked as 'custom' for combining purposes only.
362     // We know that they are legal. See LowerAdd in ISelLowering.
363     return 1 * LT.first;
364   }
365 }
366 
367 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
368   // Address computations in vectorized code with non-consecutive addresses will
369   // likely result in more instructions compared to scalar code where the
370   // computation can more often be merged into the index mode. The resulting
371   // extra micro-ops can significantly decrease throughput.
372   unsigned NumVectorInstToHideOverhead = 10;
373 
374   if (Ty->isVectorTy() && IsComplex)
375     return NumVectorInstToHideOverhead;
376 
377   // In many cases the address computation is not merged into the instruction
378   // addressing mode.
379   return 1;
380 }
381 
382 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
383                                        Type *CondTy) {
384 
385   int ISD = TLI->InstructionOpcodeToISD(Opcode);
386   // We don't lower some vector selects well that are wider than the register
387   // width.
388   if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
389     // We would need this many instructions to hide the scalarization happening.
390     const int AmortizationCost = 20;
391     static const TypeConversionCostTblEntry
392     VectorSelectTbl[] = {
393       { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 },
394       { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 },
395       { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 },
396       { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
397       { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
398       { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
399     };
400 
401     EVT SelCondTy = TLI->getValueType(DL, CondTy);
402     EVT SelValTy = TLI->getValueType(DL, ValTy);
403     if (SelCondTy.isSimple() && SelValTy.isSimple()) {
404       if (const auto *Entry = ConvertCostTableLookup(VectorSelectTbl, ISD,
405                                                      SelCondTy.getSimpleVT(),
406                                                      SelValTy.getSimpleVT()))
407         return Entry->Cost;
408     }
409   }
410   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
411 }
412 
413 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
414                                     unsigned Alignment, unsigned AddressSpace) {
415   std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
416 
417   if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
418       Src->getVectorElementType()->isIntegerTy(64)) {
419     // Unaligned stores are extremely inefficient. We don't split
420     // unaligned v2i64 stores because the negative impact that has shown in
421     // practice on inlined memcpy code.
422     // We make v2i64 stores expensive so that we will only vectorize if there
423     // are 6 other instructions getting vectorized.
424     int AmortizationCost = 6;
425 
426     return LT.first * 2 * AmortizationCost;
427   }
428 
429   if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
430       Src->getVectorNumElements() < 8) {
431     // We scalarize the loads/stores because there is not v.4b register and we
432     // have to promote the elements to v.4h.
433     unsigned NumVecElts = Src->getVectorNumElements();
434     unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
435     // We generate 2 instructions per vector element.
436     return NumVectorizableInstsToAmortize * NumVecElts * 2;
437   }
438 
439   return LT.first;
440 }
441 
442 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
443                                                unsigned Factor,
444                                                ArrayRef<unsigned> Indices,
445                                                unsigned Alignment,
446                                                unsigned AddressSpace) {
447   assert(Factor >= 2 && "Invalid interleave factor");
448   assert(isa<VectorType>(VecTy) && "Expect a vector type");
449 
450   if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
451     unsigned NumElts = VecTy->getVectorNumElements();
452     Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
453     unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
454 
455     // ldN/stN only support legal vector types of size 64 or 128 in bits.
456     if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
457       return Factor;
458   }
459 
460   return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
461                                            Alignment, AddressSpace);
462 }
463 
464 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
465   int Cost = 0;
466   for (auto *I : Tys) {
467     if (!I->isVectorTy())
468       continue;
469     if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
470       Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
471         getMemoryOpCost(Instruction::Load, I, 128, 0);
472   }
473   return Cost;
474 }
475 
476 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
477   if (ST->isCortexA57() || ST->isKryo())
478     return 4;
479   return 2;
480 }
481 
482 void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
483                                              TTI::UnrollingPreferences &UP) {
484   // Enable partial unrolling and runtime unrolling.
485   BaseT::getUnrollingPreferences(L, UP);
486 
487   // For inner loop, it is more likely to be a hot one, and the runtime check
488   // can be promoted out from LICM pass, so the overhead is less, let's try
489   // a larger threshold to unroll more loops.
490   if (L->getLoopDepth() > 1)
491     UP.PartialThreshold *= 2;
492 
493   // Disable partial & runtime unrolling on -Os.
494   UP.PartialOptSizeThreshold = 0;
495 }
496 
497 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
498                                                          Type *ExpectedType) {
499   switch (Inst->getIntrinsicID()) {
500   default:
501     return nullptr;
502   case Intrinsic::aarch64_neon_st2:
503   case Intrinsic::aarch64_neon_st3:
504   case Intrinsic::aarch64_neon_st4: {
505     // Create a struct type
506     StructType *ST = dyn_cast<StructType>(ExpectedType);
507     if (!ST)
508       return nullptr;
509     unsigned NumElts = Inst->getNumArgOperands() - 1;
510     if (ST->getNumElements() != NumElts)
511       return nullptr;
512     for (unsigned i = 0, e = NumElts; i != e; ++i) {
513       if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
514         return nullptr;
515     }
516     Value *Res = UndefValue::get(ExpectedType);
517     IRBuilder<> Builder(Inst);
518     for (unsigned i = 0, e = NumElts; i != e; ++i) {
519       Value *L = Inst->getArgOperand(i);
520       Res = Builder.CreateInsertValue(Res, L, i);
521     }
522     return Res;
523   }
524   case Intrinsic::aarch64_neon_ld2:
525   case Intrinsic::aarch64_neon_ld3:
526   case Intrinsic::aarch64_neon_ld4:
527     if (Inst->getType() == ExpectedType)
528       return Inst;
529     return nullptr;
530   }
531 }
532 
533 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
534                                         MemIntrinsicInfo &Info) {
535   switch (Inst->getIntrinsicID()) {
536   default:
537     break;
538   case Intrinsic::aarch64_neon_ld2:
539   case Intrinsic::aarch64_neon_ld3:
540   case Intrinsic::aarch64_neon_ld4:
541     Info.ReadMem = true;
542     Info.WriteMem = false;
543     Info.IsSimple = true;
544     Info.NumMemRefs = 1;
545     Info.PtrVal = Inst->getArgOperand(0);
546     break;
547   case Intrinsic::aarch64_neon_st2:
548   case Intrinsic::aarch64_neon_st3:
549   case Intrinsic::aarch64_neon_st4:
550     Info.ReadMem = false;
551     Info.WriteMem = true;
552     Info.IsSimple = true;
553     Info.NumMemRefs = 1;
554     Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
555     break;
556   }
557 
558   switch (Inst->getIntrinsicID()) {
559   default:
560     return false;
561   case Intrinsic::aarch64_neon_ld2:
562   case Intrinsic::aarch64_neon_st2:
563     Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
564     break;
565   case Intrinsic::aarch64_neon_ld3:
566   case Intrinsic::aarch64_neon_st3:
567     Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
568     break;
569   case Intrinsic::aarch64_neon_ld4:
570   case Intrinsic::aarch64_neon_st4:
571     Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;
572     break;
573   }
574   return true;
575 }
576 
577 unsigned AArch64TTIImpl::getCacheLineSize() {
578   if (ST->isCyclone())
579     return 64;
580   return BaseT::getCacheLineSize();
581 }
582 
583 unsigned AArch64TTIImpl::getPrefetchDistance() {
584   if (ST->isCyclone())
585     return 280;
586   return BaseT::getPrefetchDistance();
587 }
588 
589 unsigned AArch64TTIImpl::getMinPrefetchStride() {
590   if (ST->isCyclone())
591     // The HW prefetcher handles accesses with strides up to 2KB.
592     return 2048;
593   return BaseT::getMinPrefetchStride();
594 }
595 
596 unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() {
597   if (ST->isCyclone())
598     // Be conservative for now and don't prefetch ahead too much since the loop
599     // may terminate early.
600     return 3;
601   return BaseT::getMaxPrefetchIterationsAhead();
602 }
603