1 //===-- RISCVTargetTransformInfo.cpp - RISC-V specific TTI ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "RISCVTargetTransformInfo.h"
10 #include "MCTargetDesc/RISCVMatInt.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/CodeGen/TargetLowering.h"
14 using namespace llvm;
15 
16 #define DEBUG_TYPE "riscvtti"
17 
18 static cl::opt<unsigned> RVVRegisterWidthLMUL(
19     "riscv-v-register-bit-width-lmul",
20     cl::desc(
21         "The LMUL to use for getRegisterBitWidth queries. Affects LMUL used "
22         "by autovectorized code. Fractional LMULs are not supported."),
23     cl::init(1), cl::Hidden);
24 
25 InstructionCost RISCVTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
26                                             TTI::TargetCostKind CostKind) {
27   assert(Ty->isIntegerTy() &&
28          "getIntImmCost can only estimate cost of materialising integers");
29 
30   // We have a Zero register, so 0 is always free.
31   if (Imm == 0)
32     return TTI::TCC_Free;
33 
34   // Otherwise, we check how many instructions it will take to materialise.
35   const DataLayout &DL = getDataLayout();
36   return RISCVMatInt::getIntMatCost(Imm, DL.getTypeSizeInBits(Ty),
37                                     getST()->getFeatureBits());
38 }
39 
40 InstructionCost RISCVTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
41                                                 const APInt &Imm, Type *Ty,
42                                                 TTI::TargetCostKind CostKind,
43                                                 Instruction *Inst) {
44   assert(Ty->isIntegerTy() &&
45          "getIntImmCost can only estimate cost of materialising integers");
46 
47   // We have a Zero register, so 0 is always free.
48   if (Imm == 0)
49     return TTI::TCC_Free;
50 
51   // Some instructions in RISC-V can take a 12-bit immediate. Some of these are
52   // commutative, in others the immediate comes from a specific argument index.
53   bool Takes12BitImm = false;
54   unsigned ImmArgIdx = ~0U;
55 
56   switch (Opcode) {
57   case Instruction::GetElementPtr:
58     // Never hoist any arguments to a GetElementPtr. CodeGenPrepare will
59     // split up large offsets in GEP into better parts than ConstantHoisting
60     // can.
61     return TTI::TCC_Free;
62   case Instruction::And:
63     // zext.h
64     if (Imm == UINT64_C(0xffff) && ST->hasStdExtZbb())
65       return TTI::TCC_Free;
66     // zext.w
67     if (Imm == UINT64_C(0xffffffff) && ST->hasStdExtZbb())
68       return TTI::TCC_Free;
69     LLVM_FALLTHROUGH;
70   case Instruction::Add:
71   case Instruction::Or:
72   case Instruction::Xor:
73   case Instruction::Mul:
74     Takes12BitImm = true;
75     break;
76   case Instruction::Sub:
77   case Instruction::Shl:
78   case Instruction::LShr:
79   case Instruction::AShr:
80     Takes12BitImm = true;
81     ImmArgIdx = 1;
82     break;
83   default:
84     break;
85   }
86 
87   if (Takes12BitImm) {
88     // Check immediate is the correct argument...
89     if (Instruction::isCommutative(Opcode) || Idx == ImmArgIdx) {
90       // ... and fits into the 12-bit immediate.
91       if (Imm.getMinSignedBits() <= 64 &&
92           getTLI()->isLegalAddImmediate(Imm.getSExtValue())) {
93         return TTI::TCC_Free;
94       }
95     }
96 
97     // Otherwise, use the full materialisation cost.
98     return getIntImmCost(Imm, Ty, CostKind);
99   }
100 
101   // By default, prevent hoisting.
102   return TTI::TCC_Free;
103 }
104 
105 InstructionCost
106 RISCVTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
107                                   const APInt &Imm, Type *Ty,
108                                   TTI::TargetCostKind CostKind) {
109   // Prevent hoisting in unknown cases.
110   return TTI::TCC_Free;
111 }
112 
113 TargetTransformInfo::PopcntSupportKind
114 RISCVTTIImpl::getPopcntSupport(unsigned TyWidth) {
115   assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
116   return ST->hasStdExtZbb() ? TTI::PSK_FastHardware : TTI::PSK_Software;
117 }
118 
119 bool RISCVTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {
120   // Currently, the ExpandReductions pass can't expand scalable-vector
121   // reductions, but we still request expansion as RVV doesn't support certain
122   // reductions and the SelectionDAG can't legalize them either.
123   switch (II->getIntrinsicID()) {
124   default:
125     return false;
126   // These reductions have no equivalent in RVV
127   case Intrinsic::vector_reduce_mul:
128   case Intrinsic::vector_reduce_fmul:
129     return true;
130   }
131 }
132 
133 Optional<unsigned> RISCVTTIImpl::getMaxVScale() const {
134   // There is no assumption of the maximum vector length in V specification.
135   // We use the value specified by users as the maximum vector length.
136   // This function will use the assumed maximum vector length to get the
137   // maximum vscale for LoopVectorizer.
138   // If users do not specify the maximum vector length, we have no way to
139   // know whether the LoopVectorizer is safe to do or not.
140   // We only consider to use single vector register (LMUL = 1) to vectorize.
141   unsigned MaxVectorSizeInBits = ST->getMaxRVVVectorSizeInBits();
142   if (ST->hasVInstructions() && MaxVectorSizeInBits != 0)
143     return MaxVectorSizeInBits / RISCV::RVVBitsPerBlock;
144   return BaseT::getMaxVScale();
145 }
146 
147 TypeSize
148 RISCVTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
149   unsigned LMUL = PowerOf2Floor(
150       std::max<unsigned>(std::min<unsigned>(RVVRegisterWidthLMUL, 8), 1));
151   switch (K) {
152   case TargetTransformInfo::RGK_Scalar:
153     return TypeSize::getFixed(ST->getXLen());
154   case TargetTransformInfo::RGK_FixedWidthVector:
155     return TypeSize::getFixed(
156         ST->hasVInstructions() ? LMUL * ST->getMinRVVVectorSizeInBits() : 0);
157   case TargetTransformInfo::RGK_ScalableVector:
158     return TypeSize::getScalable(
159         ST->hasVInstructions() ? LMUL * RISCV::RVVBitsPerBlock : 0);
160   }
161 
162   llvm_unreachable("Unsupported register kind");
163 }
164 
165 InstructionCost RISCVTTIImpl::getSpliceCost(VectorType *Tp, int Index) {
166   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
167 
168   unsigned Cost = 2; // vslidedown+vslideup.
169   // TODO: LMUL should increase cost.
170   // TODO: Multiplying by LT.first implies this legalizes into multiple copies
171   // of similar code, but I think we expand through memory.
172   return Cost * LT.first;
173 }
174 
175 InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
176                                              VectorType *Tp, ArrayRef<int> Mask,
177                                              int Index, VectorType *SubTp) {
178   if (Kind == TTI::SK_Splice && isa<ScalableVectorType>(Tp))
179     return getSpliceCost(Tp, Index);
180   return BaseT::getShuffleCost(Kind, Tp, Mask, Index, SubTp);
181 }
182 
183 InstructionCost
184 RISCVTTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
185                                     unsigned AddressSpace,
186                                     TTI::TargetCostKind CostKind) {
187   if (!isa<ScalableVectorType>(Src))
188     return BaseT::getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
189                                         CostKind);
190 
191   return getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, CostKind);
192 }
193 
194 InstructionCost RISCVTTIImpl::getGatherScatterOpCost(
195     unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
196     Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) {
197   if (CostKind != TTI::TCK_RecipThroughput)
198     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
199                                          Alignment, CostKind, I);
200 
201   if ((Opcode == Instruction::Load &&
202        !isLegalMaskedGather(DataTy, Align(Alignment))) ||
203       (Opcode == Instruction::Store &&
204        !isLegalMaskedScatter(DataTy, Align(Alignment))))
205     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
206                                          Alignment, CostKind, I);
207 
208   // FIXME: Only supporting fixed vectors for now.
209   if (!isa<FixedVectorType>(DataTy))
210     return BaseT::getGatherScatterOpCost(Opcode, DataTy, Ptr, VariableMask,
211                                          Alignment, CostKind, I);
212 
213   auto *VTy = cast<FixedVectorType>(DataTy);
214   unsigned NumLoads = VTy->getNumElements();
215   InstructionCost MemOpCost =
216       getMemoryOpCost(Opcode, VTy->getElementType(), Alignment, 0, CostKind, I);
217   return NumLoads * MemOpCost;
218 }
219 
220 InstructionCost
221 RISCVTTIImpl::getMinMaxReductionCost(VectorType *Ty, VectorType *CondTy,
222                                      bool IsUnsigned,
223                                      TTI::TargetCostKind CostKind) {
224   // FIXME: Only supporting fixed vectors for now.
225   if (!isa<FixedVectorType>(Ty))
226     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
227 
228   if (!ST->useRVVForFixedLengthVectors())
229     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
230 
231   // Skip if scalar size of Ty is bigger than ELEN.
232   if (Ty->getScalarSizeInBits() > ST->getMaxELENForFixedLengthVectors())
233     return BaseT::getMinMaxReductionCost(Ty, CondTy, IsUnsigned, CostKind);
234 
235   // IR Reduction is composed by two vmv and one rvv reduction instruction.
236   InstructionCost BaseCost = 2;
237   unsigned VL = cast<FixedVectorType>(Ty)->getNumElements();
238   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
239   return (LT.first - 1) + BaseCost + Log2_32_Ceil(VL);
240 }
241 
242 InstructionCost
243 RISCVTTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *VTy,
244                                          Optional<FastMathFlags> FMF,
245                                          TTI::TargetCostKind CostKind) {
246   // FIXME: Only supporting fixed vectors for now.
247   if (!isa<FixedVectorType>(VTy))
248     return BaseT::getArithmeticReductionCost(Opcode, VTy, FMF, CostKind);
249 
250   // FIXME: Do not support i1 and/or reduction now.
251   if (VTy->getElementType()->isIntegerTy(1))
252     return BaseT::getArithmeticReductionCost(Opcode, VTy, FMF, CostKind);
253 
254   if (!ST->useRVVForFixedLengthVectors())
255     return BaseT::getArithmeticReductionCost(Opcode, VTy, FMF, CostKind);
256 
257   // Skip if scalar size of VTy is bigger than ELEN.
258   if (VTy->getScalarSizeInBits() > ST->getMaxELENForFixedLengthVectors())
259     return BaseT::getArithmeticReductionCost(Opcode, VTy, FMF, CostKind);
260 
261   int ISD = TLI->InstructionOpcodeToISD(Opcode);
262   assert(ISD && "Invalid opcode");
263 
264   if (ISD != ISD::ADD && ISD != ISD::OR && ISD != ISD::XOR && ISD != ISD::AND &&
265       ISD != ISD::FADD)
266     return BaseT::getArithmeticReductionCost(Opcode, VTy, FMF, CostKind);
267 
268   // IR Reduction is composed by two vmv and one rvv reduction instruction.
269   InstructionCost BaseCost = 2;
270   unsigned VL = cast<FixedVectorType>(VTy)->getNumElements();
271   std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, VTy);
272 
273   if (TTI::requiresOrderedReduction(FMF))
274     return (LT.first - 1) + BaseCost + VL;
275   return (LT.first - 1) + BaseCost + Log2_32_Ceil(VL);
276 }
277 
278 void RISCVTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
279                                            TTI::UnrollingPreferences &UP,
280                                            OptimizationRemarkEmitter *ORE) {
281   // TODO: More tuning on benchmarks and metrics with changes as needed
282   //       would apply to all settings below to enable performance.
283 
284   // Support explicit targets enabled for SiFive with the unrolling preferences
285   // below
286   bool UseDefaultPreferences = true;
287   if (ST->getProcFamily() == RISCVSubtarget::SiFive7)
288     UseDefaultPreferences = false;
289 
290   if (UseDefaultPreferences)
291     return BasicTTIImplBase::getUnrollingPreferences(L, SE, UP, ORE);
292 
293   // Enable Upper bound unrolling universally, not dependant upon the conditions
294   // below.
295   UP.UpperBound = true;
296 
297   // Disable loop unrolling for Oz and Os.
298   UP.OptSizeThreshold = 0;
299   UP.PartialOptSizeThreshold = 0;
300   if (L->getHeader()->getParent()->hasOptSize())
301     return;
302 
303   SmallVector<BasicBlock *, 4> ExitingBlocks;
304   L->getExitingBlocks(ExitingBlocks);
305   LLVM_DEBUG(dbgs() << "Loop has:\n"
306                     << "Blocks: " << L->getNumBlocks() << "\n"
307                     << "Exit blocks: " << ExitingBlocks.size() << "\n");
308 
309   // Only allow another exit other than the latch. This acts as an early exit
310   // as it mirrors the profitability calculation of the runtime unroller.
311   if (ExitingBlocks.size() > 2)
312     return;
313 
314   // Limit the CFG of the loop body for targets with a branch predictor.
315   // Allowing 4 blocks permits if-then-else diamonds in the body.
316   if (L->getNumBlocks() > 4)
317     return;
318 
319   // Don't unroll vectorized loops, including the remainder loop
320   if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized"))
321     return;
322 
323   // Scan the loop: don't unroll loops with calls as this could prevent
324   // inlining.
325   InstructionCost Cost = 0;
326   for (auto *BB : L->getBlocks()) {
327     for (auto &I : *BB) {
328       // Initial setting - Don't unroll loops containing vectorized
329       // instructions.
330       if (I.getType()->isVectorTy())
331         return;
332 
333       if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
334         if (const Function *F = cast<CallBase>(I).getCalledFunction()) {
335           if (!isLoweredToCall(F))
336             continue;
337         }
338         return;
339       }
340 
341       SmallVector<const Value *> Operands(I.operand_values());
342       Cost +=
343           getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
344     }
345   }
346 
347   LLVM_DEBUG(dbgs() << "Cost of loop: " << Cost << "\n");
348 
349   UP.Partial = true;
350   UP.Runtime = true;
351   UP.UnrollRemainder = true;
352   UP.UnrollAndJam = true;
353   UP.UnrollAndJamInnerLoopThreshold = 60;
354 
355   // Force unrolling small loops can be very useful because of the branch
356   // taken cost of the backedge.
357   if (Cost < 12)
358     UP.Force = true;
359 }
360 
361 void RISCVTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
362                                          TTI::PeelingPreferences &PP) {
363   BaseT::getPeelingPreferences(L, SE, PP);
364 }
365 
366 InstructionCost RISCVTTIImpl::getRegUsageForType(Type *Ty) {
367   TypeSize Size = Ty->getPrimitiveSizeInBits();
368   if (Ty->isVectorTy()) {
369     if (Size.isScalable() && ST->hasVInstructions())
370       return divideCeil(Size.getKnownMinValue(), RISCV::RVVBitsPerBlock);
371 
372     if (ST->useRVVForFixedLengthVectors())
373       return divideCeil(Size, ST->getMinRVVVectorSizeInBits());
374   }
375 
376   return BaseT::getRegUsageForType(Ty);
377 }
378