1 //===- HexagonTargetTransformInfo.cpp - Hexagon specific TTI pass ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 /// \file
8 /// This file implements a TargetTransformInfo analysis pass specific to the
9 /// Hexagon target machine. It uses the target's detailed information to provide
10 /// more precise answers to certain TTI queries, while letting the target
11 /// independent and default TTI implementations handle the rest.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "HexagonTargetTransformInfo.h"
16 #include "HexagonSubtarget.h"
17 #include "llvm/Analysis/TargetTransformInfo.h"
18 #include "llvm/CodeGen/ValueTypes.h"
19 #include "llvm/IR/InstrTypes.h"
20 #include "llvm/IR/Instructions.h"
21 #include "llvm/IR/User.h"
22 #include "llvm/Support/Casting.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Transforms/Utils/LoopPeel.h"
25 #include "llvm/Transforms/Utils/UnrollLoop.h"
26 
27 using namespace llvm;
28 
29 #define DEBUG_TYPE "hexagontti"
30 
31 static cl::opt<bool> HexagonAutoHVX("hexagon-autohvx", cl::init(false),
32   cl::Hidden, cl::desc("Enable loop vectorizer for HVX"));
33 
34 static cl::opt<bool> EmitLookupTables("hexagon-emit-lookup-tables",
35   cl::init(true), cl::Hidden,
36   cl::desc("Control lookup table emission on Hexagon target"));
37 
38 static cl::opt<bool> HexagonMaskedVMem("hexagon-masked-vmem", cl::init(true),
39   cl::Hidden, cl::desc("Enable masked loads/stores for HVX"));
40 
41 // Constant "cost factor" to make floating point operations more expensive
42 // in terms of vectorization cost. This isn't the best way, but it should
43 // do. Ultimately, the cost should use cycles.
44 static const unsigned FloatFactor = 4;
45 
46 bool HexagonTTIImpl::useHVX() const {
47   return ST.useHVXOps() && HexagonAutoHVX;
48 }
49 
50 unsigned HexagonTTIImpl::getTypeNumElements(Type *Ty) const {
51   if (auto *VTy = dyn_cast<FixedVectorType>(Ty))
52     return VTy->getNumElements();
53   assert((Ty->isIntegerTy() || Ty->isFloatingPointTy()) &&
54          "Expecting scalar type");
55   return 1;
56 }
57 
58 TargetTransformInfo::PopcntSupportKind
59 HexagonTTIImpl::getPopcntSupport(unsigned IntTyWidthInBit) const {
60   // Return fast hardware support as every input < 64 bits will be promoted
61   // to 64 bits.
62   return TargetTransformInfo::PSK_FastHardware;
63 }
64 
65 // The Hexagon target can unroll loops with run-time trip counts.
66 void HexagonTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
67                                              TTI::UnrollingPreferences &UP) {
68   UP.Runtime = UP.Partial = true;
69 }
70 
71 void HexagonTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
72                                            TTI::PeelingPreferences &PP) {
73   BaseT::getPeelingPreferences(L, SE, PP);
74   // Only try to peel innermost loops with small runtime trip counts.
75   if (L && L->isInnermost() && canPeel(L) &&
76       SE.getSmallConstantTripCount(L) == 0 &&
77       SE.getSmallConstantMaxTripCount(L) > 0 &&
78       SE.getSmallConstantMaxTripCount(L) <= 5) {
79     PP.PeelCount = 2;
80   }
81 }
82 
83 TTI::AddressingModeKind
84 HexagonTTIImpl::getPreferredAddressingMode(const Loop *L,
85                                            ScalarEvolution *SE) const {
86   return TTI::AMK_PostIndexed;
87 }
88 
89 /// --- Vector TTI begin ---
90 
91 unsigned HexagonTTIImpl::getNumberOfRegisters(bool Vector) const {
92   if (Vector)
93     return useHVX() ? 32 : 0;
94   return 32;
95 }
96 
97 unsigned HexagonTTIImpl::getMaxInterleaveFactor(unsigned VF) {
98   return useHVX() ? 2 : 1;
99 }
100 
101 unsigned HexagonTTIImpl::getRegisterBitWidth(bool Vector) const {
102   return Vector ? getMinVectorRegisterBitWidth() : 32;
103 }
104 
105 unsigned HexagonTTIImpl::getMinVectorRegisterBitWidth() const {
106   return useHVX() ? ST.getVectorLength()*8 : 32;
107 }
108 
109 ElementCount HexagonTTIImpl::getMinimumVF(unsigned ElemWidth,
110                                           bool IsScalable) const {
111   assert(!IsScalable && "Scalable VFs are not supported for Hexagon");
112   return ElementCount::getFixed((8 * ST.getVectorLength()) / ElemWidth);
113 }
114 
115 unsigned HexagonTTIImpl::getScalarizationOverhead(VectorType *Ty,
116                                                   const APInt &DemandedElts,
117                                                   bool Insert, bool Extract) {
118   return BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, Extract);
119 }
120 
121 unsigned
122 HexagonTTIImpl::getOperandsScalarizationOverhead(ArrayRef<const Value *> Args,
123                                                  ArrayRef<Type *> Tys) {
124   return BaseT::getOperandsScalarizationOverhead(Args, Tys);
125 }
126 
127 unsigned HexagonTTIImpl::getCallInstrCost(Function *F, Type *RetTy,
128       ArrayRef<Type*> Tys, TTI::TargetCostKind CostKind) {
129   return BaseT::getCallInstrCost(F, RetTy, Tys, CostKind);
130 }
131 
132 unsigned
133 HexagonTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
134                                       TTI::TargetCostKind CostKind) {
135   if (ICA.getID() == Intrinsic::bswap) {
136     std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ICA.getReturnType());
137     return LT.first + 2;
138   }
139   return BaseT::getIntrinsicInstrCost(ICA, CostKind);
140 }
141 
142 unsigned HexagonTTIImpl::getAddressComputationCost(Type *Tp,
143       ScalarEvolution *SE, const SCEV *S) {
144   return 0;
145 }
146 
147 unsigned HexagonTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
148                                          MaybeAlign Alignment,
149                                          unsigned AddressSpace,
150                                          TTI::TargetCostKind CostKind,
151                                          const Instruction *I) {
152   assert(Opcode == Instruction::Load || Opcode == Instruction::Store);
153   // TODO: Handle other cost kinds.
154   if (CostKind != TTI::TCK_RecipThroughput)
155     return 1;
156 
157   if (Opcode == Instruction::Store)
158     return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
159                                   CostKind, I);
160 
161   if (Src->isVectorTy()) {
162     VectorType *VecTy = cast<VectorType>(Src);
163     unsigned VecWidth = VecTy->getPrimitiveSizeInBits().getFixedSize();
164     if (useHVX() && ST.isTypeForHVX(VecTy)) {
165       unsigned RegWidth = getRegisterBitWidth(true);
166       assert(RegWidth && "Non-zero vector register width expected");
167       // Cost of HVX loads.
168       if (VecWidth % RegWidth == 0)
169         return VecWidth / RegWidth;
170       // Cost of constructing HVX vector from scalar loads
171       const Align RegAlign(RegWidth / 8);
172       if (!Alignment || *Alignment > RegAlign)
173         Alignment = RegAlign;
174       assert(Alignment);
175       unsigned AlignWidth = 8 * Alignment->value();
176       unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
177       return 3 * NumLoads;
178     }
179 
180     // Non-HVX vectors.
181     // Add extra cost for floating point types.
182     unsigned Cost =
183         VecTy->getElementType()->isFloatingPointTy() ? FloatFactor : 1;
184 
185     // At this point unspecified alignment is considered as Align(1).
186     const Align BoundAlignment = std::min(Alignment.valueOrOne(), Align(8));
187     unsigned AlignWidth = 8 * BoundAlignment.value();
188     unsigned NumLoads = alignTo(VecWidth, AlignWidth) / AlignWidth;
189     if (Alignment == Align(4) || Alignment == Align(8))
190       return Cost * NumLoads;
191     // Loads of less than 32 bits will need extra inserts to compose a vector.
192     assert(BoundAlignment <= Align(8));
193     unsigned LogA = Log2(BoundAlignment);
194     return (3 - LogA) * Cost * NumLoads;
195   }
196 
197   return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
198                                 CostKind, I);
199 }
200 
201 unsigned HexagonTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
202                                                Align Alignment,
203                                                unsigned AddressSpace,
204                                                TTI::TargetCostKind CostKind) {
205   return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
206                                       CostKind);
207 }
208 
209 unsigned HexagonTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp,
210       int Index, Type *SubTp) {
211   return 1;
212 }
213 
214 unsigned HexagonTTIImpl::getGatherScatterOpCost(
215     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
216     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
217   return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
218                                        Alignment, CostKind, I);
219 }
220 
221 unsigned HexagonTTIImpl::getInterleavedMemoryOpCost(
222     unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
223     Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
224     bool UseMaskForCond, bool UseMaskForGaps) {
225   if (Indices.size() != Factor || UseMaskForCond || UseMaskForGaps)
226     return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
227                                              Alignment, AddressSpace,
228                                              CostKind,
229                                              UseMaskForCond, UseMaskForGaps);
230   return getMemoryOpCost(Opcode, VecTy, MaybeAlign(Alignment), AddressSpace,
231                          CostKind);
232 }
233 
234 unsigned HexagonTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
235                                             Type *CondTy,
236                                             CmpInst::Predicate VecPred,
237                                             TTI::TargetCostKind CostKind,
238                                             const Instruction *I) {
239   if (ValTy->isVectorTy() && CostKind == TTI::TCK_RecipThroughput) {
240     std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, ValTy);
241     if (Opcode == Instruction::FCmp)
242       return LT.first + FloatFactor * getTypeNumElements(ValTy);
243   }
244   return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
245 }
246 
247 unsigned HexagonTTIImpl::getArithmeticInstrCost(
248     unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
249     TTI::OperandValueKind Opd1Info,
250     TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
251     TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
252     const Instruction *CxtI) {
253   // TODO: Handle more cost kinds.
254   if (CostKind != TTI::TCK_RecipThroughput)
255     return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
256                                          Opd2Info, Opd1PropInfo,
257                                          Opd2PropInfo, Args, CxtI);
258 
259   if (Ty->isVectorTy()) {
260     std::pair<int, MVT> LT = TLI.getTypeLegalizationCost(DL, Ty);
261     if (LT.second.isFloatingPoint())
262       return LT.first + FloatFactor * getTypeNumElements(Ty);
263   }
264   return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info, Opd2Info,
265                                        Opd1PropInfo, Opd2PropInfo, Args, CxtI);
266 }
267 
268 unsigned HexagonTTIImpl::getCastInstrCost(unsigned Opcode, Type *DstTy,
269                                           Type *SrcTy, TTI::CastContextHint CCH,
270                                           TTI::TargetCostKind CostKind,
271                                           const Instruction *I) {
272   if (SrcTy->isFPOrFPVectorTy() || DstTy->isFPOrFPVectorTy()) {
273     unsigned SrcN = SrcTy->isFPOrFPVectorTy() ? getTypeNumElements(SrcTy) : 0;
274     unsigned DstN = DstTy->isFPOrFPVectorTy() ? getTypeNumElements(DstTy) : 0;
275 
276     std::pair<int, MVT> SrcLT = TLI.getTypeLegalizationCost(DL, SrcTy);
277     std::pair<int, MVT> DstLT = TLI.getTypeLegalizationCost(DL, DstTy);
278     unsigned Cost = std::max(SrcLT.first, DstLT.first) + FloatFactor * (SrcN + DstN);
279     // TODO: Allow non-throughput costs that aren't binary.
280     if (CostKind != TTI::TCK_RecipThroughput)
281       return Cost == 0 ? 0 : 1;
282     return Cost;
283   }
284   return 1;
285 }
286 
287 unsigned HexagonTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
288       unsigned Index) {
289   Type *ElemTy = Val->isVectorTy() ? cast<VectorType>(Val)->getElementType()
290                                    : Val;
291   if (Opcode == Instruction::InsertElement) {
292     // Need two rotations for non-zero index.
293     unsigned Cost = (Index != 0) ? 2 : 0;
294     if (ElemTy->isIntegerTy(32))
295       return Cost;
296     // If it's not a 32-bit value, there will need to be an extract.
297     return Cost + getVectorInstrCost(Instruction::ExtractElement, Val, Index);
298   }
299 
300   if (Opcode == Instruction::ExtractElement)
301     return 2;
302 
303   return 1;
304 }
305 
306 bool HexagonTTIImpl::isLegalMaskedStore(Type *DataType, Align /*Alignment*/) {
307   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
308 }
309 
310 bool HexagonTTIImpl::isLegalMaskedLoad(Type *DataType, Align /*Alignment*/) {
311   return HexagonMaskedVMem && ST.isTypeForHVX(DataType);
312 }
313 
314 /// --- Vector TTI end ---
315 
316 unsigned HexagonTTIImpl::getPrefetchDistance() const {
317   return ST.getL1PrefetchDistance();
318 }
319 
320 unsigned HexagonTTIImpl::getCacheLineSize() const {
321   return ST.getL1CacheLineSize();
322 }
323 
324 int
325 HexagonTTIImpl::getUserCost(const User *U,
326                             ArrayRef<const Value *> Operands,
327                             TTI::TargetCostKind CostKind) {
328   auto isCastFoldedIntoLoad = [this](const CastInst *CI) -> bool {
329     if (!CI->isIntegerCast())
330       return false;
331     // Only extensions from an integer type shorter than 32-bit to i32
332     // can be folded into the load.
333     const DataLayout &DL = getDataLayout();
334     unsigned SBW = DL.getTypeSizeInBits(CI->getSrcTy());
335     unsigned DBW = DL.getTypeSizeInBits(CI->getDestTy());
336     if (DBW != 32 || SBW >= DBW)
337       return false;
338 
339     const LoadInst *LI = dyn_cast<const LoadInst>(CI->getOperand(0));
340     // Technically, this code could allow multiple uses of the load, and
341     // check if all the uses are the same extension operation, but this
342     // should be sufficient for most cases.
343     return LI && LI->hasOneUse();
344   };
345 
346   if (const CastInst *CI = dyn_cast<const CastInst>(U))
347     if (isCastFoldedIntoLoad(CI))
348       return TargetTransformInfo::TCC_Free;
349   return BaseT::getUserCost(U, Operands, CostKind);
350 }
351 
352 bool HexagonTTIImpl::shouldBuildLookupTables() const {
353   return EmitLookupTables;
354 }
355